4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_cryptodev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
45 #include <rte_cryptodev_pmd.h>
46 #include <rte_common.h>
47 #include <rte_fslmc.h>
48 #include <fslmc_vfio.h>
49 #include <dpaa2_hw_pvt.h>
50 #include <dpaa2_hw_dpio.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <fsl_dpseci.h>
53 #include <fsl_mc_sys.h>
55 #include "dpaa2_sec_priv.h"
56 #include "dpaa2_sec_logs.h"
58 /* RTA header files */
59 #include <hw/desc/ipsec.h>
60 #include <hw/desc/algo.h>
62 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
63 * a pointer to the shared descriptor
65 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
66 #define FSL_VENDOR_ID 0x1957
67 #define FSL_DEVICE_ID 0x410
68 #define FSL_SUBSYSTEM_SEC 1
69 #define FSL_MC_DPSECI_DEVID 3
72 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
73 #define FLE_POOL_NUM_BUFS 32000
74 #define FLE_POOL_BUF_SIZE 256
75 #define FLE_POOL_CACHE_SIZE 512
77 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
79 static uint8_t cryptodev_driver_id;
82 build_authenc_gcm_fd(dpaa2_sec_session *sess,
83 struct rte_crypto_op *op,
84 struct qbman_fd *fd, uint16_t bpid)
86 struct rte_crypto_sym_op *sym_op = op->sym;
87 struct ctxt_priv *priv = sess->ctxt;
88 struct qbman_fle *fle, *sge;
89 struct sec_flow_context *flc;
90 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
91 int icv_len = sess->digest_length, retval;
93 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
96 PMD_INIT_FUNC_TRACE();
98 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
99 * Currently we donot know which FLE has the mbuf stored.
100 * So while retreiving we can go back 1 FLE from the FD -ADDR
101 * to get the MBUF Addr from the previous FLE.
102 * We can have a better approach to use the inline Mbuf
104 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
106 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
109 memset(fle, 0, FLE_POOL_BUF_SIZE);
110 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
111 DPAA2_FLE_SAVE_CTXT(fle, priv);
114 if (likely(bpid < MAX_BPID)) {
115 DPAA2_SET_FD_BPID(fd, bpid);
116 DPAA2_SET_FLE_BPID(fle, bpid);
117 DPAA2_SET_FLE_BPID(fle + 1, bpid);
118 DPAA2_SET_FLE_BPID(sge, bpid);
119 DPAA2_SET_FLE_BPID(sge + 1, bpid);
120 DPAA2_SET_FLE_BPID(sge + 2, bpid);
121 DPAA2_SET_FLE_BPID(sge + 3, bpid);
123 DPAA2_SET_FD_IVP(fd);
124 DPAA2_SET_FLE_IVP(fle);
125 DPAA2_SET_FLE_IVP((fle + 1));
126 DPAA2_SET_FLE_IVP(sge);
127 DPAA2_SET_FLE_IVP((sge + 1));
128 DPAA2_SET_FLE_IVP((sge + 2));
129 DPAA2_SET_FLE_IVP((sge + 3));
132 /* Save the shared descriptor */
133 flc = &priv->flc_desc[0].flc;
134 /* Configure FD as a FRAME LIST */
135 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
136 DPAA2_SET_FD_COMPOUND_FMT(fd);
137 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
139 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
140 "iv-len=%d data_off: 0x%x\n",
141 sym_op->aead.data.offset,
142 sym_op->aead.data.length,
143 sym_op->aead.digest.length,
145 sym_op->m_src->data_off);
147 /* Configure Output FLE with Scatter/Gather Entry */
148 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
150 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
151 fle->length = (sess->dir == DIR_ENC) ?
152 (sym_op->aead.data.length + icv_len + auth_only_len) :
153 sym_op->aead.data.length + auth_only_len;
155 DPAA2_SET_FLE_SG_EXT(fle);
157 /* Configure Output SGE for Encap/Decap */
158 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
159 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
160 sym_op->m_src->data_off - auth_only_len);
161 sge->length = sym_op->aead.data.length + auth_only_len;
163 if (sess->dir == DIR_ENC) {
165 DPAA2_SET_FLE_ADDR(sge,
166 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
167 sge->length = sess->digest_length;
168 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
169 sess->iv.length + auth_only_len));
171 DPAA2_SET_FLE_FIN(sge);
176 /* Configure Input FLE with Scatter/Gather Entry */
177 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
178 DPAA2_SET_FLE_SG_EXT(fle);
179 DPAA2_SET_FLE_FIN(fle);
180 fle->length = (sess->dir == DIR_ENC) ?
181 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
182 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
183 sess->digest_length);
185 /* Configure Input SGE for Encap/Decap */
186 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
187 sge->length = sess->iv.length;
190 DPAA2_SET_FLE_ADDR(sge,
191 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
192 sge->length = auth_only_len;
193 DPAA2_SET_FLE_BPID(sge, bpid);
197 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
198 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
199 sym_op->m_src->data_off);
200 sge->length = sym_op->aead.data.length;
201 if (sess->dir == DIR_DEC) {
203 old_icv = (uint8_t *)(sge + 1);
204 memcpy(old_icv, sym_op->aead.digest.data,
205 sess->digest_length);
206 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
207 sge->length = sess->digest_length;
208 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
209 sess->digest_length +
213 DPAA2_SET_FLE_FIN(sge);
216 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
217 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
224 build_authenc_fd(dpaa2_sec_session *sess,
225 struct rte_crypto_op *op,
226 struct qbman_fd *fd, uint16_t bpid)
228 struct rte_crypto_sym_op *sym_op = op->sym;
229 struct ctxt_priv *priv = sess->ctxt;
230 struct qbman_fle *fle, *sge;
231 struct sec_flow_context *flc;
232 uint32_t auth_only_len = sym_op->auth.data.length -
233 sym_op->cipher.data.length;
234 int icv_len = sess->digest_length, retval;
236 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
239 PMD_INIT_FUNC_TRACE();
241 /* we are using the first FLE entry to store Mbuf.
242 * Currently we donot know which FLE has the mbuf stored.
243 * So while retreiving we can go back 1 FLE from the FD -ADDR
244 * to get the MBUF Addr from the previous FLE.
245 * We can have a better approach to use the inline Mbuf
247 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
249 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
252 memset(fle, 0, FLE_POOL_BUF_SIZE);
253 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
254 DPAA2_FLE_SAVE_CTXT(fle, priv);
257 if (likely(bpid < MAX_BPID)) {
258 DPAA2_SET_FD_BPID(fd, bpid);
259 DPAA2_SET_FLE_BPID(fle, bpid);
260 DPAA2_SET_FLE_BPID(fle + 1, bpid);
261 DPAA2_SET_FLE_BPID(sge, bpid);
262 DPAA2_SET_FLE_BPID(sge + 1, bpid);
263 DPAA2_SET_FLE_BPID(sge + 2, bpid);
264 DPAA2_SET_FLE_BPID(sge + 3, bpid);
266 DPAA2_SET_FD_IVP(fd);
267 DPAA2_SET_FLE_IVP(fle);
268 DPAA2_SET_FLE_IVP((fle + 1));
269 DPAA2_SET_FLE_IVP(sge);
270 DPAA2_SET_FLE_IVP((sge + 1));
271 DPAA2_SET_FLE_IVP((sge + 2));
272 DPAA2_SET_FLE_IVP((sge + 3));
275 /* Save the shared descriptor */
276 flc = &priv->flc_desc[0].flc;
277 /* Configure FD as a FRAME LIST */
278 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
279 DPAA2_SET_FD_COMPOUND_FMT(fd);
280 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
282 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
283 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
284 sym_op->auth.data.offset,
285 sym_op->auth.data.length,
287 sym_op->cipher.data.offset,
288 sym_op->cipher.data.length,
290 sym_op->m_src->data_off);
292 /* Configure Output FLE with Scatter/Gather Entry */
293 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
295 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
296 fle->length = (sess->dir == DIR_ENC) ?
297 (sym_op->cipher.data.length + icv_len) :
298 sym_op->cipher.data.length;
300 DPAA2_SET_FLE_SG_EXT(fle);
302 /* Configure Output SGE for Encap/Decap */
303 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
304 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
305 sym_op->m_src->data_off);
306 sge->length = sym_op->cipher.data.length;
308 if (sess->dir == DIR_ENC) {
310 DPAA2_SET_FLE_ADDR(sge,
311 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
312 sge->length = sess->digest_length;
313 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
316 DPAA2_SET_FLE_FIN(sge);
321 /* Configure Input FLE with Scatter/Gather Entry */
322 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
323 DPAA2_SET_FLE_SG_EXT(fle);
324 DPAA2_SET_FLE_FIN(fle);
325 fle->length = (sess->dir == DIR_ENC) ?
326 (sym_op->auth.data.length + sess->iv.length) :
327 (sym_op->auth.data.length + sess->iv.length +
328 sess->digest_length);
330 /* Configure Input SGE for Encap/Decap */
331 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
332 sge->length = sess->iv.length;
335 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
336 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
337 sym_op->m_src->data_off);
338 sge->length = sym_op->auth.data.length;
339 if (sess->dir == DIR_DEC) {
341 old_icv = (uint8_t *)(sge + 1);
342 memcpy(old_icv, sym_op->auth.digest.data,
343 sess->digest_length);
344 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
345 sge->length = sess->digest_length;
346 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
347 sess->digest_length +
350 DPAA2_SET_FLE_FIN(sge);
352 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
353 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
359 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
360 struct qbman_fd *fd, uint16_t bpid)
362 struct rte_crypto_sym_op *sym_op = op->sym;
363 struct qbman_fle *fle, *sge;
364 struct sec_flow_context *flc;
365 struct ctxt_priv *priv = sess->ctxt;
369 PMD_INIT_FUNC_TRACE();
371 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
373 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
376 memset(fle, 0, FLE_POOL_BUF_SIZE);
377 /* TODO we are using the first FLE entry to store Mbuf.
378 * Currently we donot know which FLE has the mbuf stored.
379 * So while retreiving we can go back 1 FLE from the FD -ADDR
380 * to get the MBUF Addr from the previous FLE.
381 * We can have a better approach to use the inline Mbuf
383 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
384 DPAA2_FLE_SAVE_CTXT(fle, priv);
387 if (likely(bpid < MAX_BPID)) {
388 DPAA2_SET_FD_BPID(fd, bpid);
389 DPAA2_SET_FLE_BPID(fle, bpid);
390 DPAA2_SET_FLE_BPID(fle + 1, bpid);
392 DPAA2_SET_FD_IVP(fd);
393 DPAA2_SET_FLE_IVP(fle);
394 DPAA2_SET_FLE_IVP((fle + 1));
396 flc = &priv->flc_desc[DESC_INITFINAL].flc;
397 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
399 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
400 fle->length = sess->digest_length;
402 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
403 DPAA2_SET_FD_COMPOUND_FMT(fd);
406 if (sess->dir == DIR_ENC) {
407 DPAA2_SET_FLE_ADDR(fle,
408 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
409 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
410 sym_op->m_src->data_off);
411 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
412 fle->length = sym_op->auth.data.length;
415 DPAA2_SET_FLE_SG_EXT(fle);
416 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
418 if (likely(bpid < MAX_BPID)) {
419 DPAA2_SET_FLE_BPID(sge, bpid);
420 DPAA2_SET_FLE_BPID(sge + 1, bpid);
422 DPAA2_SET_FLE_IVP(sge);
423 DPAA2_SET_FLE_IVP((sge + 1));
425 DPAA2_SET_FLE_ADDR(sge,
426 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
427 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
428 sym_op->m_src->data_off);
430 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
431 sess->digest_length);
432 sge->length = sym_op->auth.data.length;
434 old_digest = (uint8_t *)(sge + 1);
435 rte_memcpy(old_digest, sym_op->auth.digest.data,
436 sess->digest_length);
437 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
438 sge->length = sess->digest_length;
439 fle->length = sym_op->auth.data.length +
441 DPAA2_SET_FLE_FIN(sge);
443 DPAA2_SET_FLE_FIN(fle);
449 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
450 struct qbman_fd *fd, uint16_t bpid)
452 struct rte_crypto_sym_op *sym_op = op->sym;
453 struct qbman_fle *fle, *sge;
455 struct sec_flow_context *flc;
456 struct ctxt_priv *priv = sess->ctxt;
457 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
460 PMD_INIT_FUNC_TRACE();
462 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
464 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
467 memset(fle, 0, FLE_POOL_BUF_SIZE);
468 /* TODO we are using the first FLE entry to store Mbuf.
469 * Currently we donot know which FLE has the mbuf stored.
470 * So while retreiving we can go back 1 FLE from the FD -ADDR
471 * to get the MBUF Addr from the previous FLE.
472 * We can have a better approach to use the inline Mbuf
474 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
475 DPAA2_FLE_SAVE_CTXT(fle, priv);
479 if (likely(bpid < MAX_BPID)) {
480 DPAA2_SET_FD_BPID(fd, bpid);
481 DPAA2_SET_FLE_BPID(fle, bpid);
482 DPAA2_SET_FLE_BPID(fle + 1, bpid);
483 DPAA2_SET_FLE_BPID(sge, bpid);
484 DPAA2_SET_FLE_BPID(sge + 1, bpid);
486 DPAA2_SET_FD_IVP(fd);
487 DPAA2_SET_FLE_IVP(fle);
488 DPAA2_SET_FLE_IVP((fle + 1));
489 DPAA2_SET_FLE_IVP(sge);
490 DPAA2_SET_FLE_IVP((sge + 1));
493 flc = &priv->flc_desc[0].flc;
494 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
495 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
497 DPAA2_SET_FD_COMPOUND_FMT(fd);
498 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
500 PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
501 sym_op->cipher.data.offset,
502 sym_op->cipher.data.length,
504 sym_op->m_src->data_off);
506 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
507 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
508 sym_op->m_src->data_off);
510 fle->length = sym_op->cipher.data.length + sess->iv.length;
512 PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
513 flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
517 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
518 fle->length = sym_op->cipher.data.length + sess->iv.length;
520 DPAA2_SET_FLE_SG_EXT(fle);
522 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
523 sge->length = sess->iv.length;
526 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
527 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
528 sym_op->m_src->data_off);
530 sge->length = sym_op->cipher.data.length;
531 DPAA2_SET_FLE_FIN(sge);
532 DPAA2_SET_FLE_FIN(fle);
534 PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
535 (void *)DPAA2_GET_FD_ADDR(fd),
536 DPAA2_GET_FD_BPID(fd),
537 rte_dpaa2_bpid_info[bpid].meta_data_size,
538 DPAA2_GET_FD_OFFSET(fd),
539 DPAA2_GET_FD_LEN(fd));
545 build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
546 struct qbman_fd *fd, uint16_t bpid)
550 PMD_INIT_FUNC_TRACE();
552 * Segmented buffer is not supported.
554 if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
555 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
558 switch (sess->ctxt_type) {
559 case DPAA2_SEC_CIPHER:
560 ret = build_cipher_fd(sess, op, fd, bpid);
563 ret = build_auth_fd(sess, op, fd, bpid);
566 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
568 case DPAA2_SEC_CIPHER_HASH:
569 ret = build_authenc_fd(sess, op, fd, bpid);
571 case DPAA2_SEC_HASH_CIPHER:
573 RTE_LOG(ERR, PMD, "error: Unsupported session\n");
579 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
582 /* Function to transmit the frames to given device and VQ*/
585 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
586 uint32_t frames_to_send;
587 struct qbman_eq_desc eqdesc;
588 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
589 struct qbman_swp *swp;
591 /*todo - need to support multiple buffer pools */
593 struct rte_mempool *mb_pool;
594 dpaa2_sec_session *sess;
596 if (unlikely(nb_ops == 0))
599 if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
600 RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
603 /*Prepare enqueue descriptor*/
604 qbman_eq_desc_clear(&eqdesc);
605 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
606 qbman_eq_desc_set_response(&eqdesc, 0, 0);
607 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
609 if (!DPAA2_PER_LCORE_SEC_DPIO) {
610 ret = dpaa2_affine_qbman_swp_sec();
612 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
616 swp = DPAA2_PER_LCORE_SEC_PORTAL;
619 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
621 for (loop = 0; loop < frames_to_send; loop++) {
622 /*Clear the unused FD fields before sending*/
623 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
624 sess = (dpaa2_sec_session *)
625 get_session_private_data(
626 (*ops)->sym->session,
627 cryptodev_driver_id);
628 mb_pool = (*ops)->sym->m_src->pool;
629 bpid = mempool_to_bpid(mb_pool);
630 ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
632 PMD_DRV_LOG(ERR, "error: Improper packet"
633 " contents for crypto operation\n");
639 while (loop < frames_to_send) {
640 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
642 frames_to_send - loop);
645 num_tx += frames_to_send;
646 nb_ops -= frames_to_send;
649 dpaa2_qp->tx_vq.tx_pkts += num_tx;
650 dpaa2_qp->tx_vq.err_pkts += nb_ops;
654 static inline struct rte_crypto_op *
655 sec_fd_to_mbuf(const struct qbman_fd *fd)
657 struct qbman_fle *fle;
658 struct rte_crypto_op *op;
659 struct ctxt_priv *priv;
661 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
663 PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
664 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
666 /* we are using the first FLE entry to store Mbuf.
667 * Currently we donot know which FLE has the mbuf stored.
668 * So while retreiving we can go back 1 FLE from the FD -ADDR
669 * to get the MBUF Addr from the previous FLE.
670 * We can have a better approach to use the inline Mbuf
673 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
674 /* TODO complete it. */
675 RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n");
678 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
679 DPAA2_GET_FLE_ADDR((fle - 1)));
682 rte_prefetch0(op->sym->m_src);
684 PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
685 (void *)op->sym->m_src, op->sym->m_src->buf_addr);
687 PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
688 (void *)DPAA2_GET_FD_ADDR(fd),
689 DPAA2_GET_FD_BPID(fd),
690 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
691 DPAA2_GET_FD_OFFSET(fd),
692 DPAA2_GET_FD_LEN(fd));
694 /* free the fle memory */
695 priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
696 rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
702 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
705 /* Function is responsible to receive frames for a given device and VQ*/
706 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
707 struct qbman_result *dq_storage;
708 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
710 uint8_t is_last = 0, status;
711 struct qbman_swp *swp;
712 const struct qbman_fd *fd;
713 struct qbman_pull_desc pulldesc;
715 if (!DPAA2_PER_LCORE_SEC_DPIO) {
716 ret = dpaa2_affine_qbman_swp_sec();
718 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
722 swp = DPAA2_PER_LCORE_SEC_PORTAL;
723 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
725 qbman_pull_desc_clear(&pulldesc);
726 qbman_pull_desc_set_numframes(&pulldesc,
727 (nb_ops > DPAA2_DQRR_RING_SIZE) ?
728 DPAA2_DQRR_RING_SIZE : nb_ops);
729 qbman_pull_desc_set_fq(&pulldesc, fqid);
730 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
731 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
734 /*Issue a volatile dequeue command. */
736 if (qbman_swp_pull(swp, &pulldesc)) {
737 RTE_LOG(WARNING, PMD,
738 "SEC VDQ command is not issued : QBMAN busy\n");
739 /* Portal was busy, try again */
745 /* Receive the packets till Last Dequeue entry is found with
746 * respect to the above issues PULL command.
749 /* Check if the previous issued command is completed.
750 * Also seems like the SWP is shared between the Ethernet Driver
751 * and the SEC driver.
753 while (!qbman_check_command_complete(dq_storage))
756 /* Loop until the dq_storage is updated with
759 while (!qbman_check_new_result(dq_storage))
761 /* Check whether Last Pull command is Expired and
762 * setting Condition for Loop termination
764 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
766 /* Check for valid frame. */
767 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
769 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
770 PMD_RX_LOG(DEBUG, "No frame is delivered");
775 fd = qbman_result_DQ_fd(dq_storage);
776 ops[num_rx] = sec_fd_to_mbuf(fd);
778 if (unlikely(fd->simple.frc)) {
779 /* TODO Parse SEC errors */
780 RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
782 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
784 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
789 } /* End of Packet Rx loop */
791 dpaa2_qp->rx_vq.rx_pkts += num_rx;
793 PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
794 /*Return the total number of packets received to DPAA2 app*/
798 /** Release queue pair */
800 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
802 struct dpaa2_sec_qp *qp =
803 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
805 PMD_INIT_FUNC_TRACE();
807 if (qp->rx_vq.q_storage) {
808 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
809 rte_free(qp->rx_vq.q_storage);
813 dev->data->queue_pairs[queue_pair_id] = NULL;
818 /** Setup a queue pair */
820 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
821 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
822 __rte_unused int socket_id,
823 __rte_unused struct rte_mempool *session_pool)
825 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
826 struct dpaa2_sec_qp *qp;
827 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
828 struct dpseci_rx_queue_cfg cfg;
831 PMD_INIT_FUNC_TRACE();
833 /* If qp is already in use free ring memory and qp metadata. */
834 if (dev->data->queue_pairs[qp_id] != NULL) {
835 PMD_DRV_LOG(INFO, "QP already setup");
839 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
840 dev, qp_id, qp_conf);
842 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
844 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
845 RTE_CACHE_LINE_SIZE);
847 RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
853 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
854 sizeof(struct queue_storage_info_t),
855 RTE_CACHE_LINE_SIZE);
856 if (!qp->rx_vq.q_storage) {
857 RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
860 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
862 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
863 RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
867 dev->data->queue_pairs[qp_id] = qp;
869 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
870 cfg.user_ctx = (uint64_t)(&qp->rx_vq);
871 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
876 /** Start queue pair */
878 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
879 __rte_unused uint16_t queue_pair_id)
881 PMD_INIT_FUNC_TRACE();
886 /** Stop queue pair */
888 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
889 __rte_unused uint16_t queue_pair_id)
891 PMD_INIT_FUNC_TRACE();
896 /** Return the number of allocated queue pairs */
898 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
900 PMD_INIT_FUNC_TRACE();
902 return dev->data->nb_queue_pairs;
905 /** Returns the size of the aesni gcm session structure */
907 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
909 PMD_INIT_FUNC_TRACE();
911 return sizeof(dpaa2_sec_session);
915 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
916 struct rte_crypto_sym_xform *xform,
917 dpaa2_sec_session *session)
919 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
920 struct alginfo cipherdata;
922 struct ctxt_priv *priv;
923 struct sec_flow_context *flc;
925 PMD_INIT_FUNC_TRACE();
927 /* For SEC CIPHER only one descriptor is required. */
928 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
929 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
930 RTE_CACHE_LINE_SIZE);
932 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
936 priv->fle_pool = dev_priv->fle_pool;
938 flc = &priv->flc_desc[0].flc;
940 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
941 RTE_CACHE_LINE_SIZE);
942 if (session->cipher_key.data == NULL) {
943 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
947 session->cipher_key.length = xform->cipher.key.length;
949 memcpy(session->cipher_key.data, xform->cipher.key.data,
950 xform->cipher.key.length);
951 cipherdata.key = (uint64_t)session->cipher_key.data;
952 cipherdata.keylen = session->cipher_key.length;
953 cipherdata.key_enc_flags = 0;
954 cipherdata.key_type = RTA_DATA_IMM;
956 /* Set IV parameters */
957 session->iv.offset = xform->cipher.iv.offset;
958 session->iv.length = xform->cipher.iv.length;
960 switch (xform->cipher.algo) {
961 case RTE_CRYPTO_CIPHER_AES_CBC:
962 cipherdata.algtype = OP_ALG_ALGSEL_AES;
963 cipherdata.algmode = OP_ALG_AAI_CBC;
964 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
966 case RTE_CRYPTO_CIPHER_3DES_CBC:
967 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
968 cipherdata.algmode = OP_ALG_AAI_CBC;
969 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
971 case RTE_CRYPTO_CIPHER_AES_CTR:
972 cipherdata.algtype = OP_ALG_ALGSEL_AES;
973 cipherdata.algmode = OP_ALG_AAI_CTR;
974 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
976 case RTE_CRYPTO_CIPHER_3DES_CTR:
977 case RTE_CRYPTO_CIPHER_AES_ECB:
978 case RTE_CRYPTO_CIPHER_3DES_ECB:
979 case RTE_CRYPTO_CIPHER_AES_XTS:
980 case RTE_CRYPTO_CIPHER_AES_F8:
981 case RTE_CRYPTO_CIPHER_ARC4:
982 case RTE_CRYPTO_CIPHER_KASUMI_F8:
983 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
984 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
985 case RTE_CRYPTO_CIPHER_NULL:
986 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
990 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
994 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
997 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
998 &cipherdata, NULL, session->iv.length,
1001 RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
1006 flc->mode_bits = 0x8000;
1008 flc->word1_sdl = (uint8_t)bufsize;
1009 flc->word2_rflc_31_0 = lower_32_bits(
1010 (uint64_t)&(((struct dpaa2_sec_qp *)
1011 dev->data->queue_pairs[0])->rx_vq));
1012 flc->word3_rflc_63_32 = upper_32_bits(
1013 (uint64_t)&(((struct dpaa2_sec_qp *)
1014 dev->data->queue_pairs[0])->rx_vq));
1015 session->ctxt = priv;
1017 for (i = 0; i < bufsize; i++)
1018 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1019 i, priv->flc_desc[0].desc[i]);
1024 rte_free(session->cipher_key.data);
1030 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1031 struct rte_crypto_sym_xform *xform,
1032 dpaa2_sec_session *session)
1034 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1035 struct alginfo authdata;
1036 unsigned int bufsize, i;
1037 struct ctxt_priv *priv;
1038 struct sec_flow_context *flc;
1040 PMD_INIT_FUNC_TRACE();
1042 /* For SEC AUTH three descriptors are required for various stages */
1043 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1044 sizeof(struct ctxt_priv) + 3 *
1045 sizeof(struct sec_flc_desc),
1046 RTE_CACHE_LINE_SIZE);
1048 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
1052 priv->fle_pool = dev_priv->fle_pool;
1053 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1055 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1056 RTE_CACHE_LINE_SIZE);
1057 if (session->auth_key.data == NULL) {
1058 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1062 session->auth_key.length = xform->auth.key.length;
1064 memcpy(session->auth_key.data, xform->auth.key.data,
1065 xform->auth.key.length);
1066 authdata.key = (uint64_t)session->auth_key.data;
1067 authdata.keylen = session->auth_key.length;
1068 authdata.key_enc_flags = 0;
1069 authdata.key_type = RTA_DATA_IMM;
1071 session->digest_length = xform->auth.digest_length;
1073 switch (xform->auth.algo) {
1074 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1075 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1076 authdata.algmode = OP_ALG_AAI_HMAC;
1077 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1079 case RTE_CRYPTO_AUTH_MD5_HMAC:
1080 authdata.algtype = OP_ALG_ALGSEL_MD5;
1081 authdata.algmode = OP_ALG_AAI_HMAC;
1082 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1084 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1085 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1086 authdata.algmode = OP_ALG_AAI_HMAC;
1087 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1089 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1090 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1091 authdata.algmode = OP_ALG_AAI_HMAC;
1092 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1094 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1095 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1096 authdata.algmode = OP_ALG_AAI_HMAC;
1097 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1099 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1100 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1101 authdata.algmode = OP_ALG_AAI_HMAC;
1102 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1104 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1105 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1106 case RTE_CRYPTO_AUTH_NULL:
1107 case RTE_CRYPTO_AUTH_SHA1:
1108 case RTE_CRYPTO_AUTH_SHA256:
1109 case RTE_CRYPTO_AUTH_SHA512:
1110 case RTE_CRYPTO_AUTH_SHA224:
1111 case RTE_CRYPTO_AUTH_SHA384:
1112 case RTE_CRYPTO_AUTH_MD5:
1113 case RTE_CRYPTO_AUTH_AES_GMAC:
1114 case RTE_CRYPTO_AUTH_KASUMI_F9:
1115 case RTE_CRYPTO_AUTH_AES_CMAC:
1116 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1117 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1118 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
1122 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1126 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1129 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1130 1, 0, &authdata, !session->dir,
1131 session->digest_length);
1133 flc->word1_sdl = (uint8_t)bufsize;
1134 flc->word2_rflc_31_0 = lower_32_bits(
1135 (uint64_t)&(((struct dpaa2_sec_qp *)
1136 dev->data->queue_pairs[0])->rx_vq));
1137 flc->word3_rflc_63_32 = upper_32_bits(
1138 (uint64_t)&(((struct dpaa2_sec_qp *)
1139 dev->data->queue_pairs[0])->rx_vq));
1140 session->ctxt = priv;
1141 for (i = 0; i < bufsize; i++)
1142 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1143 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1149 rte_free(session->auth_key.data);
1155 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1156 struct rte_crypto_sym_xform *xform,
1157 dpaa2_sec_session *session)
1159 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1160 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1161 struct alginfo aeaddata;
1162 unsigned int bufsize, i;
1163 struct ctxt_priv *priv;
1164 struct sec_flow_context *flc;
1165 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1168 PMD_INIT_FUNC_TRACE();
1170 /* Set IV parameters */
1171 session->iv.offset = aead_xform->iv.offset;
1172 session->iv.length = aead_xform->iv.length;
1173 session->ctxt_type = DPAA2_SEC_AEAD;
1175 /* For SEC AEAD only one descriptor is required */
1176 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1177 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1178 RTE_CACHE_LINE_SIZE);
1180 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
1184 priv->fle_pool = dev_priv->fle_pool;
1185 flc = &priv->flc_desc[0].flc;
1187 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1188 RTE_CACHE_LINE_SIZE);
1189 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1190 RTE_LOG(ERR, PMD, "No Memory for aead key\n");
1194 memcpy(session->aead_key.data, aead_xform->key.data,
1195 aead_xform->key.length);
1197 session->digest_length = aead_xform->digest_length;
1198 session->aead_key.length = aead_xform->key.length;
1199 ctxt->auth_only_len = aead_xform->aad_length;
1201 aeaddata.key = (uint64_t)session->aead_key.data;
1202 aeaddata.keylen = session->aead_key.length;
1203 aeaddata.key_enc_flags = 0;
1204 aeaddata.key_type = RTA_DATA_IMM;
1206 switch (aead_xform->algo) {
1207 case RTE_CRYPTO_AEAD_AES_GCM:
1208 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1209 aeaddata.algmode = OP_ALG_AAI_GCM;
1210 session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
1212 case RTE_CRYPTO_AEAD_AES_CCM:
1213 RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
1217 RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
1221 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1224 priv->flc_desc[0].desc[0] = aeaddata.keylen;
1225 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1227 (unsigned int *)priv->flc_desc[0].desc,
1228 &priv->flc_desc[0].desc[1], 1);
1231 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
1234 if (priv->flc_desc[0].desc[1] & 1) {
1235 aeaddata.key_type = RTA_DATA_IMM;
1237 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1238 aeaddata.key_type = RTA_DATA_PTR;
1240 priv->flc_desc[0].desc[0] = 0;
1241 priv->flc_desc[0].desc[1] = 0;
1243 if (session->dir == DIR_ENC)
1244 bufsize = cnstr_shdsc_gcm_encap(
1245 priv->flc_desc[0].desc, 1, 0,
1246 &aeaddata, session->iv.length,
1247 session->digest_length);
1249 bufsize = cnstr_shdsc_gcm_decap(
1250 priv->flc_desc[0].desc, 1, 0,
1251 &aeaddata, session->iv.length,
1252 session->digest_length);
1253 flc->word1_sdl = (uint8_t)bufsize;
1254 flc->word2_rflc_31_0 = lower_32_bits(
1255 (uint64_t)&(((struct dpaa2_sec_qp *)
1256 dev->data->queue_pairs[0])->rx_vq));
1257 flc->word3_rflc_63_32 = upper_32_bits(
1258 (uint64_t)&(((struct dpaa2_sec_qp *)
1259 dev->data->queue_pairs[0])->rx_vq));
1260 session->ctxt = priv;
1261 for (i = 0; i < bufsize; i++)
1262 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1263 i, priv->flc_desc[0].desc[i]);
1268 rte_free(session->aead_key.data);
1275 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1276 struct rte_crypto_sym_xform *xform,
1277 dpaa2_sec_session *session)
1279 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1280 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1281 struct alginfo authdata, cipherdata;
1282 unsigned int bufsize, i;
1283 struct ctxt_priv *priv;
1284 struct sec_flow_context *flc;
1285 struct rte_crypto_cipher_xform *cipher_xform;
1286 struct rte_crypto_auth_xform *auth_xform;
1289 PMD_INIT_FUNC_TRACE();
1291 if (session->ext_params.aead_ctxt.auth_cipher_text) {
1292 cipher_xform = &xform->cipher;
1293 auth_xform = &xform->next->auth;
1294 session->ctxt_type =
1295 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1296 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1298 cipher_xform = &xform->next->cipher;
1299 auth_xform = &xform->auth;
1300 session->ctxt_type =
1301 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1302 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1305 /* Set IV parameters */
1306 session->iv.offset = cipher_xform->iv.offset;
1307 session->iv.length = cipher_xform->iv.length;
1309 /* For SEC AEAD only one descriptor is required */
1310 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1311 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1312 RTE_CACHE_LINE_SIZE);
1314 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
1318 priv->fle_pool = dev_priv->fle_pool;
1319 flc = &priv->flc_desc[0].flc;
1321 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1322 RTE_CACHE_LINE_SIZE);
1323 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1324 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1328 session->cipher_key.length = cipher_xform->key.length;
1329 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1330 RTE_CACHE_LINE_SIZE);
1331 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1332 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1333 rte_free(session->cipher_key.data);
1337 session->auth_key.length = auth_xform->key.length;
1338 memcpy(session->cipher_key.data, cipher_xform->key.data,
1339 cipher_xform->key.length);
1340 memcpy(session->auth_key.data, auth_xform->key.data,
1341 auth_xform->key.length);
1343 authdata.key = (uint64_t)session->auth_key.data;
1344 authdata.keylen = session->auth_key.length;
1345 authdata.key_enc_flags = 0;
1346 authdata.key_type = RTA_DATA_IMM;
1348 session->digest_length = auth_xform->digest_length;
1350 switch (auth_xform->algo) {
1351 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1352 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1353 authdata.algmode = OP_ALG_AAI_HMAC;
1354 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1356 case RTE_CRYPTO_AUTH_MD5_HMAC:
1357 authdata.algtype = OP_ALG_ALGSEL_MD5;
1358 authdata.algmode = OP_ALG_AAI_HMAC;
1359 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1361 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1362 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1363 authdata.algmode = OP_ALG_AAI_HMAC;
1364 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1366 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1367 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1368 authdata.algmode = OP_ALG_AAI_HMAC;
1369 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1371 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1372 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1373 authdata.algmode = OP_ALG_AAI_HMAC;
1374 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1376 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1377 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1378 authdata.algmode = OP_ALG_AAI_HMAC;
1379 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1381 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1382 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1383 case RTE_CRYPTO_AUTH_NULL:
1384 case RTE_CRYPTO_AUTH_SHA1:
1385 case RTE_CRYPTO_AUTH_SHA256:
1386 case RTE_CRYPTO_AUTH_SHA512:
1387 case RTE_CRYPTO_AUTH_SHA224:
1388 case RTE_CRYPTO_AUTH_SHA384:
1389 case RTE_CRYPTO_AUTH_MD5:
1390 case RTE_CRYPTO_AUTH_AES_GMAC:
1391 case RTE_CRYPTO_AUTH_KASUMI_F9:
1392 case RTE_CRYPTO_AUTH_AES_CMAC:
1393 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1394 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1395 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
1399 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1403 cipherdata.key = (uint64_t)session->cipher_key.data;
1404 cipherdata.keylen = session->cipher_key.length;
1405 cipherdata.key_enc_flags = 0;
1406 cipherdata.key_type = RTA_DATA_IMM;
1408 switch (cipher_xform->algo) {
1409 case RTE_CRYPTO_CIPHER_AES_CBC:
1410 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1411 cipherdata.algmode = OP_ALG_AAI_CBC;
1412 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1414 case RTE_CRYPTO_CIPHER_3DES_CBC:
1415 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1416 cipherdata.algmode = OP_ALG_AAI_CBC;
1417 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1419 case RTE_CRYPTO_CIPHER_AES_CTR:
1420 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1421 cipherdata.algmode = OP_ALG_AAI_CTR;
1422 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1424 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1425 case RTE_CRYPTO_CIPHER_NULL:
1426 case RTE_CRYPTO_CIPHER_3DES_ECB:
1427 case RTE_CRYPTO_CIPHER_AES_ECB:
1428 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1429 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
1430 cipher_xform->algo);
1433 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
1434 cipher_xform->algo);
1437 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1440 priv->flc_desc[0].desc[0] = cipherdata.keylen;
1441 priv->flc_desc[0].desc[1] = authdata.keylen;
1442 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1444 (unsigned int *)priv->flc_desc[0].desc,
1445 &priv->flc_desc[0].desc[2], 2);
1448 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
1451 if (priv->flc_desc[0].desc[2] & 1) {
1452 cipherdata.key_type = RTA_DATA_IMM;
1454 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
1455 cipherdata.key_type = RTA_DATA_PTR;
1457 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
1458 authdata.key_type = RTA_DATA_IMM;
1460 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
1461 authdata.key_type = RTA_DATA_PTR;
1463 priv->flc_desc[0].desc[0] = 0;
1464 priv->flc_desc[0].desc[1] = 0;
1465 priv->flc_desc[0].desc[2] = 0;
1467 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
1468 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
1469 0, &cipherdata, &authdata,
1471 ctxt->auth_only_len,
1472 session->digest_length,
1475 RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
1479 flc->word1_sdl = (uint8_t)bufsize;
1480 flc->word2_rflc_31_0 = lower_32_bits(
1481 (uint64_t)&(((struct dpaa2_sec_qp *)
1482 dev->data->queue_pairs[0])->rx_vq));
1483 flc->word3_rflc_63_32 = upper_32_bits(
1484 (uint64_t)&(((struct dpaa2_sec_qp *)
1485 dev->data->queue_pairs[0])->rx_vq));
1486 session->ctxt = priv;
1487 for (i = 0; i < bufsize; i++)
1488 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1489 i, priv->flc_desc[0].desc[i]);
1494 rte_free(session->cipher_key.data);
1495 rte_free(session->auth_key.data);
1501 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
1502 struct rte_crypto_sym_xform *xform, void *sess)
1504 dpaa2_sec_session *session = sess;
1506 PMD_INIT_FUNC_TRACE();
1508 if (unlikely(sess == NULL)) {
1509 RTE_LOG(ERR, PMD, "invalid session struct\n");
1513 /* Default IV length = 0 */
1514 session->iv.length = 0;
1517 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1518 session->ctxt_type = DPAA2_SEC_CIPHER;
1519 dpaa2_sec_cipher_init(dev, xform, session);
1521 /* Authentication Only */
1522 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1523 xform->next == NULL) {
1524 session->ctxt_type = DPAA2_SEC_AUTH;
1525 dpaa2_sec_auth_init(dev, xform, session);
1527 /* Cipher then Authenticate */
1528 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1529 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1530 session->ext_params.aead_ctxt.auth_cipher_text = true;
1531 dpaa2_sec_aead_chain_init(dev, xform, session);
1533 /* Authenticate then Cipher */
1534 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1535 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1536 session->ext_params.aead_ctxt.auth_cipher_text = false;
1537 dpaa2_sec_aead_chain_init(dev, xform, session);
1539 /* AEAD operation for AES-GCM kind of Algorithms */
1540 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1541 xform->next == NULL) {
1542 dpaa2_sec_aead_init(dev, xform, session);
1545 RTE_LOG(ERR, PMD, "Invalid crypto type\n");
1553 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
1554 struct rte_crypto_sym_xform *xform,
1555 struct rte_cryptodev_sym_session *sess,
1556 struct rte_mempool *mempool)
1558 void *sess_private_data;
1561 if (rte_mempool_get(mempool, &sess_private_data)) {
1563 "Couldn't get object from session mempool");
1567 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
1569 PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
1570 "session parameters");
1572 /* Return session to mempool */
1573 rte_mempool_put(mempool, sess_private_data);
1577 set_session_private_data(sess, dev->driver_id,
1583 /** Clear the memory of session so it doesn't leave key material behind */
1585 dpaa2_sec_session_clear(struct rte_cryptodev *dev,
1586 struct rte_cryptodev_sym_session *sess)
1588 PMD_INIT_FUNC_TRACE();
1589 uint8_t index = dev->driver_id;
1590 void *sess_priv = get_session_private_data(sess, index);
1591 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
1595 rte_free(s->cipher_key.data);
1596 rte_free(s->auth_key.data);
1597 memset(sess, 0, sizeof(dpaa2_sec_session));
1598 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1599 set_session_private_data(sess, index, NULL);
1600 rte_mempool_put(sess_mp, sess_priv);
1605 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1606 struct rte_cryptodev_config *config __rte_unused)
1608 PMD_INIT_FUNC_TRACE();
1614 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
1616 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1617 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1618 struct dpseci_attr attr;
1619 struct dpaa2_queue *dpaa2_q;
1620 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1621 dev->data->queue_pairs;
1622 struct dpseci_rx_queue_attr rx_attr;
1623 struct dpseci_tx_queue_attr tx_attr;
1626 PMD_INIT_FUNC_TRACE();
1628 memset(&attr, 0, sizeof(struct dpseci_attr));
1630 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
1632 PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
1634 goto get_attr_failure;
1636 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
1639 "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
1640 goto get_attr_failure;
1642 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
1643 dpaa2_q = &qp[i]->rx_vq;
1644 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1646 dpaa2_q->fqid = rx_attr.fqid;
1647 PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
1649 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
1650 dpaa2_q = &qp[i]->tx_vq;
1651 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1653 dpaa2_q->fqid = tx_attr.fqid;
1654 PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
1659 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1664 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
1666 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1667 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1670 PMD_INIT_FUNC_TRACE();
1672 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1674 PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
1679 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
1681 PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
1688 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
1690 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1691 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1694 PMD_INIT_FUNC_TRACE();
1696 /* Function is reverse of dpaa2_sec_dev_init.
1697 * It does the following:
1698 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
1699 * 2. Close the DPSECI device
1700 * 3. Free the allocated resources.
1703 /*Close the device at underlying layer*/
1704 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
1706 PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
1707 " error code %d\n", ret);
1711 /*Free the allocated memory for ethernet private data and dpseci*/
1719 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
1720 struct rte_cryptodev_info *info)
1722 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1724 PMD_INIT_FUNC_TRACE();
1726 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1727 info->feature_flags = dev->feature_flags;
1728 info->capabilities = dpaa2_sec_capabilities;
1729 info->sym.max_nb_sessions = internals->max_nb_sessions;
1730 info->driver_id = cryptodev_driver_id;
1735 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
1736 struct rte_cryptodev_stats *stats)
1738 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1739 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1740 struct dpseci_sec_counters counters = {0};
1741 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1742 dev->data->queue_pairs;
1745 PMD_INIT_FUNC_TRACE();
1746 if (stats == NULL) {
1747 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1750 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1751 if (qp[i] == NULL) {
1752 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1756 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
1757 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
1758 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
1759 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
1762 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
1765 PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
1767 PMD_DRV_LOG(INFO, "dpseci hw stats:"
1768 "\n\tNumber of Requests Dequeued = %lu"
1769 "\n\tNumber of Outbound Encrypt Requests = %lu"
1770 "\n\tNumber of Inbound Decrypt Requests = %lu"
1771 "\n\tNumber of Outbound Bytes Encrypted = %lu"
1772 "\n\tNumber of Outbound Bytes Protected = %lu"
1773 "\n\tNumber of Inbound Bytes Decrypted = %lu"
1774 "\n\tNumber of Inbound Bytes Validated = %lu",
1775 counters.dequeued_requests,
1776 counters.ob_enc_requests,
1777 counters.ib_dec_requests,
1778 counters.ob_enc_bytes,
1779 counters.ob_prot_bytes,
1780 counters.ib_dec_bytes,
1781 counters.ib_valid_bytes);
1786 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
1789 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1790 (dev->data->queue_pairs);
1792 PMD_INIT_FUNC_TRACE();
1794 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1795 if (qp[i] == NULL) {
1796 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1799 qp[i]->tx_vq.rx_pkts = 0;
1800 qp[i]->tx_vq.tx_pkts = 0;
1801 qp[i]->tx_vq.err_pkts = 0;
1802 qp[i]->rx_vq.rx_pkts = 0;
1803 qp[i]->rx_vq.tx_pkts = 0;
1804 qp[i]->rx_vq.err_pkts = 0;
1808 static struct rte_cryptodev_ops crypto_ops = {
1809 .dev_configure = dpaa2_sec_dev_configure,
1810 .dev_start = dpaa2_sec_dev_start,
1811 .dev_stop = dpaa2_sec_dev_stop,
1812 .dev_close = dpaa2_sec_dev_close,
1813 .dev_infos_get = dpaa2_sec_dev_infos_get,
1814 .stats_get = dpaa2_sec_stats_get,
1815 .stats_reset = dpaa2_sec_stats_reset,
1816 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
1817 .queue_pair_release = dpaa2_sec_queue_pair_release,
1818 .queue_pair_start = dpaa2_sec_queue_pair_start,
1819 .queue_pair_stop = dpaa2_sec_queue_pair_stop,
1820 .queue_pair_count = dpaa2_sec_queue_pair_count,
1821 .session_get_size = dpaa2_sec_session_get_size,
1822 .session_configure = dpaa2_sec_session_configure,
1823 .session_clear = dpaa2_sec_session_clear,
1827 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
1829 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1831 rte_mempool_free(internals->fle_pool);
1833 PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
1834 dev->data->name, rte_socket_id());
1840 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
1842 struct dpaa2_sec_dev_private *internals;
1843 struct rte_device *dev = cryptodev->device;
1844 struct rte_dpaa2_device *dpaa2_dev;
1845 struct fsl_mc_io *dpseci;
1847 struct dpseci_attr attr;
1851 PMD_INIT_FUNC_TRACE();
1852 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1853 if (dpaa2_dev == NULL) {
1854 PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
1857 hw_id = dpaa2_dev->object_id;
1859 cryptodev->driver_id = cryptodev_driver_id;
1860 cryptodev->dev_ops = &crypto_ops;
1862 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
1863 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
1864 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1865 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1866 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1868 internals = cryptodev->data->dev_private;
1869 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
1872 * For secondary processes, we don't initialise any further as primary
1873 * has already done this work. Only check we don't need a different
1876 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1877 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
1880 /*Open the rte device via MC and save the handle for further use*/
1881 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
1882 sizeof(struct fsl_mc_io), 0);
1885 "Error in allocating the memory for dpsec object");
1888 dpseci->regs = rte_mcp_ptr_list[0];
1890 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
1892 PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
1896 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
1899 "Cannot get dpsec device attributed: Error = %x",
1903 sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
1905 internals->max_nb_queue_pairs = attr.num_tx_queues;
1906 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
1907 internals->hw = dpseci;
1908 internals->token = token;
1910 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
1911 internals->fle_pool = rte_mempool_create((const char *)str,
1914 FLE_POOL_CACHE_SIZE, 0,
1915 NULL, NULL, NULL, NULL,
1917 if (!internals->fle_pool) {
1918 RTE_LOG(ERR, PMD, "%s create failed\n", str);
1922 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1926 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1928 /* dpaa2_sec_uninit(crypto_dev_name); */
1933 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
1934 struct rte_dpaa2_device *dpaa2_dev)
1936 struct rte_cryptodev *cryptodev;
1937 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1941 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
1943 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1944 if (cryptodev == NULL)
1947 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1948 cryptodev->data->dev_private = rte_zmalloc_socket(
1949 "cryptodev private structure",
1950 sizeof(struct dpaa2_sec_dev_private),
1951 RTE_CACHE_LINE_SIZE,
1954 if (cryptodev->data->dev_private == NULL)
1955 rte_panic("Cannot allocate memzone for private "
1959 dpaa2_dev->cryptodev = cryptodev;
1960 cryptodev->device = &dpaa2_dev->device;
1961 cryptodev->device->driver = &dpaa2_drv->driver;
1963 /* init user callbacks */
1964 TAILQ_INIT(&(cryptodev->link_intr_cbs));
1966 /* Invoke PMD device initialization function */
1967 retval = dpaa2_sec_dev_init(cryptodev);
1971 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1972 rte_free(cryptodev->data->dev_private);
1974 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1980 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
1982 struct rte_cryptodev *cryptodev;
1985 cryptodev = dpaa2_dev->cryptodev;
1986 if (cryptodev == NULL)
1989 ret = dpaa2_sec_uninit(cryptodev);
1993 /* free crypto device */
1994 rte_cryptodev_pmd_release_device(cryptodev);
1996 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1997 rte_free(cryptodev->data->dev_private);
1999 cryptodev->device = NULL;
2000 cryptodev->data = NULL;
2005 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2006 .drv_type = DPAA2_CRYPTO,
2008 .name = "DPAA2 SEC PMD"
2010 .probe = cryptodev_dpaa2_sec_probe,
2011 .remove = cryptodev_dpaa2_sec_remove,
2014 static struct cryptodev_driver dpaa2_sec_crypto_drv;
2016 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
2017 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
2018 cryptodev_driver_id);