4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_cryptodev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
45 #include <rte_cryptodev_pmd.h>
46 #include <rte_common.h>
47 #include <rte_fslmc.h>
48 #include <fslmc_vfio.h>
49 #include <dpaa2_hw_pvt.h>
50 #include <dpaa2_hw_dpio.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <fsl_dpseci.h>
53 #include <fsl_mc_sys.h>
55 #include "dpaa2_sec_priv.h"
56 #include "dpaa2_sec_logs.h"
58 /* RTA header files */
59 #include <hw/desc/ipsec.h>
60 #include <hw/desc/algo.h>
62 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
63 * a pointer to the shared descriptor
65 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
66 #define FSL_VENDOR_ID 0x1957
67 #define FSL_DEVICE_ID 0x410
68 #define FSL_SUBSYSTEM_SEC 1
69 #define FSL_MC_DPSECI_DEVID 3
72 #define TDES_CBC_IV_LEN 8
73 #define AES_CBC_IV_LEN 16
74 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
77 build_authenc_fd(dpaa2_sec_session *sess,
78 struct rte_crypto_op *op,
79 struct qbman_fd *fd, uint16_t bpid)
81 struct rte_crypto_sym_op *sym_op = op->sym;
82 struct ctxt_priv *priv = sess->ctxt;
83 struct qbman_fle *fle, *sge;
84 struct sec_flow_context *flc;
85 uint32_t auth_only_len = sym_op->auth.data.length -
86 sym_op->cipher.data.length;
87 int icv_len = sess->digest_length;
89 uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
90 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
93 PMD_INIT_FUNC_TRACE();
95 /* we are using the first FLE entry to store Mbuf.
96 * Currently we donot know which FLE has the mbuf stored.
97 * So while retreiving we can go back 1 FLE from the FD -ADDR
98 * to get the MBUF Addr from the previous FLE.
99 * We can have a better approach to use the inline Mbuf
101 fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
103 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
106 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
109 if (likely(bpid < MAX_BPID)) {
110 DPAA2_SET_FD_BPID(fd, bpid);
111 DPAA2_SET_FLE_BPID(fle, bpid);
112 DPAA2_SET_FLE_BPID(fle + 1, bpid);
113 DPAA2_SET_FLE_BPID(sge, bpid);
114 DPAA2_SET_FLE_BPID(sge + 1, bpid);
115 DPAA2_SET_FLE_BPID(sge + 2, bpid);
116 DPAA2_SET_FLE_BPID(sge + 3, bpid);
118 DPAA2_SET_FD_IVP(fd);
119 DPAA2_SET_FLE_IVP(fle);
120 DPAA2_SET_FLE_IVP((fle + 1));
121 DPAA2_SET_FLE_IVP(sge);
122 DPAA2_SET_FLE_IVP((sge + 1));
123 DPAA2_SET_FLE_IVP((sge + 2));
124 DPAA2_SET_FLE_IVP((sge + 3));
127 /* Save the shared descriptor */
128 flc = &priv->flc_desc[0].flc;
129 /* Configure FD as a FRAME LIST */
130 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
131 DPAA2_SET_FD_COMPOUND_FMT(fd);
132 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
134 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
135 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
136 sym_op->auth.data.offset,
137 sym_op->auth.data.length,
139 sym_op->cipher.data.offset,
140 sym_op->cipher.data.length,
142 sym_op->m_src->data_off);
144 /* Configure Output FLE with Scatter/Gather Entry */
145 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
147 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
148 fle->length = (sess->dir == DIR_ENC) ?
149 (sym_op->cipher.data.length + icv_len) :
150 sym_op->cipher.data.length;
152 DPAA2_SET_FLE_SG_EXT(fle);
154 /* Configure Output SGE for Encap/Decap */
155 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
156 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
157 sym_op->m_src->data_off);
158 sge->length = sym_op->cipher.data.length;
160 if (sess->dir == DIR_ENC) {
162 DPAA2_SET_FLE_ADDR(sge,
163 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
164 sge->length = sess->digest_length;
165 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
168 DPAA2_SET_FLE_FIN(sge);
173 /* Configure Input FLE with Scatter/Gather Entry */
174 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
175 DPAA2_SET_FLE_SG_EXT(fle);
176 DPAA2_SET_FLE_FIN(fle);
177 fle->length = (sess->dir == DIR_ENC) ?
178 (sym_op->auth.data.length + sess->iv.length) :
179 (sym_op->auth.data.length + sess->iv.length +
180 sess->digest_length);
182 /* Configure Input SGE for Encap/Decap */
183 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
184 sge->length = sess->iv.length;
187 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
188 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
189 sym_op->m_src->data_off);
190 sge->length = sym_op->auth.data.length;
191 if (sess->dir == DIR_DEC) {
193 old_icv = (uint8_t *)(sge + 1);
194 memcpy(old_icv, sym_op->auth.digest.data,
195 sess->digest_length);
196 memset(sym_op->auth.digest.data, 0, sess->digest_length);
197 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
198 sge->length = sess->digest_length;
199 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
200 sess->digest_length +
203 DPAA2_SET_FLE_FIN(sge);
205 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
206 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
212 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
213 struct qbman_fd *fd, uint16_t bpid)
215 struct rte_crypto_sym_op *sym_op = op->sym;
216 struct qbman_fle *fle, *sge;
217 uint32_t mem_len = (sess->dir == DIR_ENC) ?
218 (3 * sizeof(struct qbman_fle)) :
219 (5 * sizeof(struct qbman_fle) +
220 sess->digest_length);
221 struct sec_flow_context *flc;
222 struct ctxt_priv *priv = sess->ctxt;
225 PMD_INIT_FUNC_TRACE();
227 fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
229 RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
232 /* TODO we are using the first FLE entry to store Mbuf.
233 * Currently we donot know which FLE has the mbuf stored.
234 * So while retreiving we can go back 1 FLE from the FD -ADDR
235 * to get the MBUF Addr from the previous FLE.
236 * We can have a better approach to use the inline Mbuf
238 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
241 if (likely(bpid < MAX_BPID)) {
242 DPAA2_SET_FD_BPID(fd, bpid);
243 DPAA2_SET_FLE_BPID(fle, bpid);
244 DPAA2_SET_FLE_BPID(fle + 1, bpid);
246 DPAA2_SET_FD_IVP(fd);
247 DPAA2_SET_FLE_IVP(fle);
248 DPAA2_SET_FLE_IVP((fle + 1));
250 flc = &priv->flc_desc[DESC_INITFINAL].flc;
251 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
253 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
254 fle->length = sess->digest_length;
256 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
257 DPAA2_SET_FD_COMPOUND_FMT(fd);
260 if (sess->dir == DIR_ENC) {
261 DPAA2_SET_FLE_ADDR(fle,
262 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
263 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
264 sym_op->m_src->data_off);
265 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
266 fle->length = sym_op->auth.data.length;
269 DPAA2_SET_FLE_SG_EXT(fle);
270 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
272 if (likely(bpid < MAX_BPID)) {
273 DPAA2_SET_FLE_BPID(sge, bpid);
274 DPAA2_SET_FLE_BPID(sge + 1, bpid);
276 DPAA2_SET_FLE_IVP(sge);
277 DPAA2_SET_FLE_IVP((sge + 1));
279 DPAA2_SET_FLE_ADDR(sge,
280 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
281 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
282 sym_op->m_src->data_off);
284 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
285 sess->digest_length);
286 sge->length = sym_op->auth.data.length;
288 old_digest = (uint8_t *)(sge + 1);
289 rte_memcpy(old_digest, sym_op->auth.digest.data,
290 sess->digest_length);
291 memset(sym_op->auth.digest.data, 0, sess->digest_length);
292 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
293 sge->length = sess->digest_length;
294 fle->length = sym_op->auth.data.length +
296 DPAA2_SET_FLE_FIN(sge);
298 DPAA2_SET_FLE_FIN(fle);
304 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
305 struct qbman_fd *fd, uint16_t bpid)
307 struct rte_crypto_sym_op *sym_op = op->sym;
308 struct qbman_fle *fle, *sge;
309 uint32_t mem_len = (5 * sizeof(struct qbman_fle));
310 struct sec_flow_context *flc;
311 struct ctxt_priv *priv = sess->ctxt;
312 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
315 PMD_INIT_FUNC_TRACE();
317 /* todo - we can use some mempool to avoid malloc here */
318 fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
320 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
323 /* TODO we are using the first FLE entry to store Mbuf.
324 * Currently we donot know which FLE has the mbuf stored.
325 * So while retreiving we can go back 1 FLE from the FD -ADDR
326 * to get the MBUF Addr from the previous FLE.
327 * We can have a better approach to use the inline Mbuf
329 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
333 if (likely(bpid < MAX_BPID)) {
334 DPAA2_SET_FD_BPID(fd, bpid);
335 DPAA2_SET_FLE_BPID(fle, bpid);
336 DPAA2_SET_FLE_BPID(fle + 1, bpid);
337 DPAA2_SET_FLE_BPID(sge, bpid);
338 DPAA2_SET_FLE_BPID(sge + 1, bpid);
340 DPAA2_SET_FD_IVP(fd);
341 DPAA2_SET_FLE_IVP(fle);
342 DPAA2_SET_FLE_IVP((fle + 1));
343 DPAA2_SET_FLE_IVP(sge);
344 DPAA2_SET_FLE_IVP((sge + 1));
347 flc = &priv->flc_desc[0].flc;
348 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
349 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
351 DPAA2_SET_FD_COMPOUND_FMT(fd);
352 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
354 PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
355 sym_op->cipher.data.offset,
356 sym_op->cipher.data.length,
358 sym_op->m_src->data_off);
360 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
361 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
362 sym_op->m_src->data_off);
364 fle->length = sym_op->cipher.data.length + sess->iv.length;
366 PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
367 flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
371 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
372 fle->length = sym_op->cipher.data.length + sess->iv.length;
374 DPAA2_SET_FLE_SG_EXT(fle);
376 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
377 sge->length = sess->iv.length;
380 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
381 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
382 sym_op->m_src->data_off);
384 sge->length = sym_op->cipher.data.length;
385 DPAA2_SET_FLE_FIN(sge);
386 DPAA2_SET_FLE_FIN(fle);
388 PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
389 (void *)DPAA2_GET_FD_ADDR(fd),
390 DPAA2_GET_FD_BPID(fd),
391 rte_dpaa2_bpid_info[bpid].meta_data_size,
392 DPAA2_GET_FD_OFFSET(fd),
393 DPAA2_GET_FD_LEN(fd));
399 build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
400 struct qbman_fd *fd, uint16_t bpid)
404 PMD_INIT_FUNC_TRACE();
406 switch (sess->ctxt_type) {
407 case DPAA2_SEC_CIPHER:
408 ret = build_cipher_fd(sess, op, fd, bpid);
411 ret = build_auth_fd(sess, op, fd, bpid);
413 case DPAA2_SEC_CIPHER_HASH:
414 ret = build_authenc_fd(sess, op, fd, bpid);
416 case DPAA2_SEC_HASH_CIPHER:
418 RTE_LOG(ERR, PMD, "error: Unsupported session\n");
424 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
427 /* Function to transmit the frames to given device and VQ*/
430 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
431 uint32_t frames_to_send;
432 struct qbman_eq_desc eqdesc;
433 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
434 struct qbman_swp *swp;
436 /*todo - need to support multiple buffer pools */
438 struct rte_mempool *mb_pool;
439 dpaa2_sec_session *sess;
441 if (unlikely(nb_ops == 0))
444 if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
445 RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
448 /*Prepare enqueue descriptor*/
449 qbman_eq_desc_clear(&eqdesc);
450 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
451 qbman_eq_desc_set_response(&eqdesc, 0, 0);
452 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
454 if (!DPAA2_PER_LCORE_SEC_DPIO) {
455 ret = dpaa2_affine_qbman_swp_sec();
457 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
461 swp = DPAA2_PER_LCORE_SEC_PORTAL;
464 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
466 for (loop = 0; loop < frames_to_send; loop++) {
467 /*Clear the unused FD fields before sending*/
468 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
469 sess = (dpaa2_sec_session *)
470 (*ops)->sym->session->_private;
471 mb_pool = (*ops)->sym->m_src->pool;
472 bpid = mempool_to_bpid(mb_pool);
473 ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
475 PMD_DRV_LOG(ERR, "error: Improper packet"
476 " contents for crypto operation\n");
482 while (loop < frames_to_send) {
483 loop += qbman_swp_send_multiple(swp, &eqdesc,
485 frames_to_send - loop);
488 num_tx += frames_to_send;
489 nb_ops -= frames_to_send;
492 dpaa2_qp->tx_vq.tx_pkts += num_tx;
493 dpaa2_qp->tx_vq.err_pkts += nb_ops;
497 static inline struct rte_crypto_op *
498 sec_fd_to_mbuf(const struct qbman_fd *fd)
500 struct qbman_fle *fle;
501 struct rte_crypto_op *op;
503 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
505 PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
506 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
508 /* we are using the first FLE entry to store Mbuf.
509 * Currently we donot know which FLE has the mbuf stored.
510 * So while retreiving we can go back 1 FLE from the FD -ADDR
511 * to get the MBUF Addr from the previous FLE.
512 * We can have a better approach to use the inline Mbuf
515 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
516 /* TODO complete it. */
517 RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
520 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
521 DPAA2_GET_FLE_ADDR((fle - 1)));
524 rte_prefetch0(op->sym->m_src);
526 PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
527 (void *)op->sym->m_src, op->sym->m_src->buf_addr);
529 PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
530 (void *)DPAA2_GET_FD_ADDR(fd),
531 DPAA2_GET_FD_BPID(fd),
532 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
533 DPAA2_GET_FD_OFFSET(fd),
534 DPAA2_GET_FD_LEN(fd));
536 /* free the fle memory */
543 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
546 /* Function is responsible to receive frames for a given device and VQ*/
547 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
548 struct qbman_result *dq_storage;
549 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
551 uint8_t is_last = 0, status;
552 struct qbman_swp *swp;
553 const struct qbman_fd *fd;
554 struct qbman_pull_desc pulldesc;
556 if (!DPAA2_PER_LCORE_SEC_DPIO) {
557 ret = dpaa2_affine_qbman_swp_sec();
559 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
563 swp = DPAA2_PER_LCORE_SEC_PORTAL;
564 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
566 qbman_pull_desc_clear(&pulldesc);
567 qbman_pull_desc_set_numframes(&pulldesc,
568 (nb_ops > DPAA2_DQRR_RING_SIZE) ?
569 DPAA2_DQRR_RING_SIZE : nb_ops);
570 qbman_pull_desc_set_fq(&pulldesc, fqid);
571 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
572 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
575 /*Issue a volatile dequeue command. */
577 if (qbman_swp_pull(swp, &pulldesc)) {
578 RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
580 /* Portal was busy, try again */
586 /* Receive the packets till Last Dequeue entry is found with
587 * respect to the above issues PULL command.
590 /* Check if the previous issued command is completed.
591 * Also seems like the SWP is shared between the Ethernet Driver
592 * and the SEC driver.
594 while (!qbman_check_command_complete(swp, dq_storage))
597 /* Loop until the dq_storage is updated with
600 while (!qbman_result_has_new_result(swp, dq_storage))
602 /* Check whether Last Pull command is Expired and
603 * setting Condition for Loop termination
605 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
607 /* Check for valid frame. */
608 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
610 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
611 PMD_RX_LOG(DEBUG, "No frame is delivered");
616 fd = qbman_result_DQ_fd(dq_storage);
617 ops[num_rx] = sec_fd_to_mbuf(fd);
619 if (unlikely(fd->simple.frc)) {
620 /* TODO Parse SEC errors */
621 RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
623 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
625 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
630 } /* End of Packet Rx loop */
632 dpaa2_qp->rx_vq.rx_pkts += num_rx;
634 PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
635 /*Return the total number of packets received to DPAA2 app*/
639 /** Release queue pair */
641 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
643 struct dpaa2_sec_qp *qp =
644 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
646 PMD_INIT_FUNC_TRACE();
648 if (qp->rx_vq.q_storage) {
649 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
650 rte_free(qp->rx_vq.q_storage);
654 dev->data->queue_pairs[queue_pair_id] = NULL;
659 /** Setup a queue pair */
661 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
662 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
663 __rte_unused int socket_id)
665 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
666 struct dpaa2_sec_qp *qp;
667 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
668 struct dpseci_rx_queue_cfg cfg;
671 PMD_INIT_FUNC_TRACE();
673 /* If qp is already in use free ring memory and qp metadata. */
674 if (dev->data->queue_pairs[qp_id] != NULL) {
675 PMD_DRV_LOG(INFO, "QP already setup");
679 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
680 dev, qp_id, qp_conf);
682 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
684 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
685 RTE_CACHE_LINE_SIZE);
687 RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
693 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
694 sizeof(struct queue_storage_info_t),
695 RTE_CACHE_LINE_SIZE);
696 if (!qp->rx_vq.q_storage) {
697 RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
700 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
702 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
703 RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
707 dev->data->queue_pairs[qp_id] = qp;
709 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
710 cfg.user_ctx = (uint64_t)(&qp->rx_vq);
711 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
716 /** Start queue pair */
718 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
719 __rte_unused uint16_t queue_pair_id)
721 PMD_INIT_FUNC_TRACE();
726 /** Stop queue pair */
728 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
729 __rte_unused uint16_t queue_pair_id)
731 PMD_INIT_FUNC_TRACE();
736 /** Return the number of allocated queue pairs */
738 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
740 PMD_INIT_FUNC_TRACE();
742 return dev->data->nb_queue_pairs;
745 /** Returns the size of the aesni gcm session structure */
747 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
749 PMD_INIT_FUNC_TRACE();
751 return sizeof(dpaa2_sec_session);
755 dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
756 void *sess __rte_unused)
758 PMD_INIT_FUNC_TRACE();
762 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
763 struct rte_crypto_sym_xform *xform,
764 dpaa2_sec_session *session)
766 struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
767 struct alginfo cipherdata;
769 struct ctxt_priv *priv;
770 struct sec_flow_context *flc;
772 PMD_INIT_FUNC_TRACE();
774 /* For SEC CIPHER only one descriptor is required. */
775 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
776 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
777 RTE_CACHE_LINE_SIZE);
779 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
783 flc = &priv->flc_desc[0].flc;
785 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
786 RTE_CACHE_LINE_SIZE);
787 if (session->cipher_key.data == NULL) {
788 RTE_LOG(ERR, PMD, "No Memory for cipher key");
792 session->cipher_key.length = xform->cipher.key.length;
794 memcpy(session->cipher_key.data, xform->cipher.key.data,
795 xform->cipher.key.length);
796 cipherdata.key = (uint64_t)session->cipher_key.data;
797 cipherdata.keylen = session->cipher_key.length;
798 cipherdata.key_enc_flags = 0;
799 cipherdata.key_type = RTA_DATA_IMM;
801 /* Set IV parameters */
802 session->iv.offset = xform->cipher.iv.offset;
803 session->iv.length = xform->cipher.iv.length;
805 switch (xform->cipher.algo) {
806 case RTE_CRYPTO_CIPHER_AES_CBC:
807 cipherdata.algtype = OP_ALG_ALGSEL_AES;
808 cipherdata.algmode = OP_ALG_AAI_CBC;
809 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
810 ctxt->iv.length = AES_CBC_IV_LEN;
812 case RTE_CRYPTO_CIPHER_3DES_CBC:
813 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
814 cipherdata.algmode = OP_ALG_AAI_CBC;
815 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
816 ctxt->iv.length = TDES_CBC_IV_LEN;
818 case RTE_CRYPTO_CIPHER_AES_CTR:
819 case RTE_CRYPTO_CIPHER_3DES_CTR:
820 case RTE_CRYPTO_CIPHER_AES_GCM:
821 case RTE_CRYPTO_CIPHER_AES_CCM:
822 case RTE_CRYPTO_CIPHER_AES_ECB:
823 case RTE_CRYPTO_CIPHER_3DES_ECB:
824 case RTE_CRYPTO_CIPHER_AES_XTS:
825 case RTE_CRYPTO_CIPHER_AES_F8:
826 case RTE_CRYPTO_CIPHER_ARC4:
827 case RTE_CRYPTO_CIPHER_KASUMI_F8:
828 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
829 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
830 case RTE_CRYPTO_CIPHER_NULL:
831 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
835 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
839 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
842 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
843 &cipherdata, NULL, ctxt->iv.length,
846 RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
851 flc->mode_bits = 0x8000;
853 flc->word1_sdl = (uint8_t)bufsize;
854 flc->word2_rflc_31_0 = lower_32_bits(
855 (uint64_t)&(((struct dpaa2_sec_qp *)
856 dev->data->queue_pairs[0])->rx_vq));
857 flc->word3_rflc_63_32 = upper_32_bits(
858 (uint64_t)&(((struct dpaa2_sec_qp *)
859 dev->data->queue_pairs[0])->rx_vq));
860 session->ctxt = priv;
862 for (i = 0; i < bufsize; i++)
863 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
864 i, priv->flc_desc[0].desc[i]);
869 rte_free(session->cipher_key.data);
875 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
876 struct rte_crypto_sym_xform *xform,
877 dpaa2_sec_session *session)
879 struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
880 struct alginfo authdata;
881 unsigned int bufsize;
882 struct ctxt_priv *priv;
883 struct sec_flow_context *flc;
885 PMD_INIT_FUNC_TRACE();
887 /* For SEC AUTH three descriptors are required for various stages */
888 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
889 sizeof(struct ctxt_priv) + 3 *
890 sizeof(struct sec_flc_desc),
891 RTE_CACHE_LINE_SIZE);
893 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
897 flc = &priv->flc_desc[DESC_INITFINAL].flc;
899 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
900 RTE_CACHE_LINE_SIZE);
901 if (session->auth_key.data == NULL) {
902 RTE_LOG(ERR, PMD, "No Memory for auth key");
906 session->auth_key.length = xform->auth.key.length;
908 memcpy(session->auth_key.data, xform->auth.key.data,
909 xform->auth.key.length);
910 authdata.key = (uint64_t)session->auth_key.data;
911 authdata.keylen = session->auth_key.length;
912 authdata.key_enc_flags = 0;
913 authdata.key_type = RTA_DATA_IMM;
915 session->digest_length = xform->auth.digest_length;
917 switch (xform->auth.algo) {
918 case RTE_CRYPTO_AUTH_SHA1_HMAC:
919 authdata.algtype = OP_ALG_ALGSEL_SHA1;
920 authdata.algmode = OP_ALG_AAI_HMAC;
921 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
923 case RTE_CRYPTO_AUTH_MD5_HMAC:
924 authdata.algtype = OP_ALG_ALGSEL_MD5;
925 authdata.algmode = OP_ALG_AAI_HMAC;
926 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
928 case RTE_CRYPTO_AUTH_SHA256_HMAC:
929 authdata.algtype = OP_ALG_ALGSEL_SHA256;
930 authdata.algmode = OP_ALG_AAI_HMAC;
931 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
933 case RTE_CRYPTO_AUTH_SHA384_HMAC:
934 authdata.algtype = OP_ALG_ALGSEL_SHA384;
935 authdata.algmode = OP_ALG_AAI_HMAC;
936 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
938 case RTE_CRYPTO_AUTH_SHA512_HMAC:
939 authdata.algtype = OP_ALG_ALGSEL_SHA512;
940 authdata.algmode = OP_ALG_AAI_HMAC;
941 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
943 case RTE_CRYPTO_AUTH_SHA224_HMAC:
944 authdata.algtype = OP_ALG_ALGSEL_SHA224;
945 authdata.algmode = OP_ALG_AAI_HMAC;
946 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
948 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
949 case RTE_CRYPTO_AUTH_AES_GCM:
950 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
951 case RTE_CRYPTO_AUTH_NULL:
952 case RTE_CRYPTO_AUTH_SHA1:
953 case RTE_CRYPTO_AUTH_SHA256:
954 case RTE_CRYPTO_AUTH_SHA512:
955 case RTE_CRYPTO_AUTH_SHA224:
956 case RTE_CRYPTO_AUTH_SHA384:
957 case RTE_CRYPTO_AUTH_MD5:
958 case RTE_CRYPTO_AUTH_AES_CCM:
959 case RTE_CRYPTO_AUTH_AES_GMAC:
960 case RTE_CRYPTO_AUTH_KASUMI_F9:
961 case RTE_CRYPTO_AUTH_AES_CMAC:
962 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
963 case RTE_CRYPTO_AUTH_ZUC_EIA3:
964 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
968 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
972 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
975 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
976 1, 0, &authdata, !session->dir,
979 flc->word1_sdl = (uint8_t)bufsize;
980 flc->word2_rflc_31_0 = lower_32_bits(
981 (uint64_t)&(((struct dpaa2_sec_qp *)
982 dev->data->queue_pairs[0])->rx_vq));
983 flc->word3_rflc_63_32 = upper_32_bits(
984 (uint64_t)&(((struct dpaa2_sec_qp *)
985 dev->data->queue_pairs[0])->rx_vq));
986 session->ctxt = priv;
991 rte_free(session->auth_key.data);
997 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
998 struct rte_crypto_sym_xform *xform,
999 dpaa2_sec_session *session)
1001 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1002 struct alginfo authdata, cipherdata;
1003 unsigned int bufsize;
1004 struct ctxt_priv *priv;
1005 struct sec_flow_context *flc;
1006 struct rte_crypto_cipher_xform *cipher_xform;
1007 struct rte_crypto_auth_xform *auth_xform;
1010 PMD_INIT_FUNC_TRACE();
1012 if (session->ext_params.aead_ctxt.auth_cipher_text) {
1013 cipher_xform = &xform->cipher;
1014 auth_xform = &xform->next->auth;
1015 session->ctxt_type =
1016 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1017 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1019 cipher_xform = &xform->next->cipher;
1020 auth_xform = &xform->auth;
1021 session->ctxt_type =
1022 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1023 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1026 /* Set IV parameters */
1027 session->iv.offset = cipher_xform->iv.offset;
1028 session->iv.length = cipher_xform->iv.length;
1030 /* For SEC AEAD only one descriptor is required */
1031 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1032 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1033 RTE_CACHE_LINE_SIZE);
1035 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1039 flc = &priv->flc_desc[0].flc;
1041 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1042 RTE_CACHE_LINE_SIZE);
1043 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1044 RTE_LOG(ERR, PMD, "No Memory for cipher key");
1048 session->cipher_key.length = cipher_xform->key.length;
1049 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1050 RTE_CACHE_LINE_SIZE);
1051 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1052 RTE_LOG(ERR, PMD, "No Memory for auth key");
1053 rte_free(session->cipher_key.data);
1057 session->auth_key.length = auth_xform->key.length;
1058 memcpy(session->cipher_key.data, cipher_xform->key.data,
1059 cipher_xform->key.length);
1060 memcpy(session->auth_key.data, auth_xform->key.data,
1061 auth_xform->key.length);
1063 ctxt->trunc_len = auth_xform->digest_length;
1064 authdata.key = (uint64_t)session->auth_key.data;
1065 authdata.keylen = session->auth_key.length;
1066 authdata.key_enc_flags = 0;
1067 authdata.key_type = RTA_DATA_IMM;
1069 session->digest_length = auth_xform->digest_length;
1071 switch (auth_xform->algo) {
1072 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1073 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1074 authdata.algmode = OP_ALG_AAI_HMAC;
1075 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1077 case RTE_CRYPTO_AUTH_MD5_HMAC:
1078 authdata.algtype = OP_ALG_ALGSEL_MD5;
1079 authdata.algmode = OP_ALG_AAI_HMAC;
1080 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1082 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1083 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1084 authdata.algmode = OP_ALG_AAI_HMAC;
1085 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1087 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1088 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1089 authdata.algmode = OP_ALG_AAI_HMAC;
1090 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1092 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1093 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1094 authdata.algmode = OP_ALG_AAI_HMAC;
1095 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1097 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1098 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1099 authdata.algmode = OP_ALG_AAI_HMAC;
1100 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1102 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1103 case RTE_CRYPTO_AUTH_AES_GCM:
1104 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1105 case RTE_CRYPTO_AUTH_NULL:
1106 case RTE_CRYPTO_AUTH_SHA1:
1107 case RTE_CRYPTO_AUTH_SHA256:
1108 case RTE_CRYPTO_AUTH_SHA512:
1109 case RTE_CRYPTO_AUTH_SHA224:
1110 case RTE_CRYPTO_AUTH_SHA384:
1111 case RTE_CRYPTO_AUTH_MD5:
1112 case RTE_CRYPTO_AUTH_AES_CCM:
1113 case RTE_CRYPTO_AUTH_AES_GMAC:
1114 case RTE_CRYPTO_AUTH_KASUMI_F9:
1115 case RTE_CRYPTO_AUTH_AES_CMAC:
1116 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1117 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1118 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
1122 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1126 cipherdata.key = (uint64_t)session->cipher_key.data;
1127 cipherdata.keylen = session->cipher_key.length;
1128 cipherdata.key_enc_flags = 0;
1129 cipherdata.key_type = RTA_DATA_IMM;
1131 switch (cipher_xform->algo) {
1132 case RTE_CRYPTO_CIPHER_AES_CBC:
1133 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1134 cipherdata.algmode = OP_ALG_AAI_CBC;
1135 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1136 ctxt->iv.length = AES_CBC_IV_LEN;
1138 case RTE_CRYPTO_CIPHER_3DES_CBC:
1139 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1140 cipherdata.algmode = OP_ALG_AAI_CBC;
1141 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1142 ctxt->iv.length = TDES_CBC_IV_LEN;
1144 case RTE_CRYPTO_CIPHER_AES_GCM:
1145 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1146 case RTE_CRYPTO_CIPHER_NULL:
1147 case RTE_CRYPTO_CIPHER_3DES_ECB:
1148 case RTE_CRYPTO_CIPHER_AES_ECB:
1149 case RTE_CRYPTO_CIPHER_AES_CTR:
1150 case RTE_CRYPTO_CIPHER_AES_CCM:
1151 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1152 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
1153 cipher_xform->algo);
1156 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
1157 cipher_xform->algo);
1160 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1163 priv->flc_desc[0].desc[0] = cipherdata.keylen;
1164 priv->flc_desc[0].desc[1] = authdata.keylen;
1165 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1167 (unsigned int *)priv->flc_desc[0].desc,
1168 &priv->flc_desc[0].desc[2], 2);
1171 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
1174 if (priv->flc_desc[0].desc[2] & 1) {
1175 cipherdata.key_type = RTA_DATA_IMM;
1177 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
1178 cipherdata.key_type = RTA_DATA_PTR;
1180 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
1181 authdata.key_type = RTA_DATA_IMM;
1183 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
1184 authdata.key_type = RTA_DATA_PTR;
1186 priv->flc_desc[0].desc[0] = 0;
1187 priv->flc_desc[0].desc[1] = 0;
1188 priv->flc_desc[0].desc[2] = 0;
1190 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
1191 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
1192 0, &cipherdata, &authdata,
1194 ctxt->auth_only_len,
1198 RTE_LOG(ERR, PMD, "Hash before cipher not supported");
1202 flc->word1_sdl = (uint8_t)bufsize;
1203 flc->word2_rflc_31_0 = lower_32_bits(
1204 (uint64_t)&(((struct dpaa2_sec_qp *)
1205 dev->data->queue_pairs[0])->rx_vq));
1206 flc->word3_rflc_63_32 = upper_32_bits(
1207 (uint64_t)&(((struct dpaa2_sec_qp *)
1208 dev->data->queue_pairs[0])->rx_vq));
1209 session->ctxt = priv;
1214 rte_free(session->cipher_key.data);
1215 rte_free(session->auth_key.data);
1221 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
1222 struct rte_crypto_sym_xform *xform, void *sess)
1224 dpaa2_sec_session *session = sess;
1226 PMD_INIT_FUNC_TRACE();
1228 if (unlikely(sess == NULL)) {
1229 RTE_LOG(ERR, PMD, "invalid session struct");
1233 /* Default IV length = 0 */
1234 session->iv.length = 0;
1237 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1238 session->ctxt_type = DPAA2_SEC_CIPHER;
1239 dpaa2_sec_cipher_init(dev, xform, session);
1241 /* Authentication Only */
1242 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1243 xform->next == NULL) {
1244 session->ctxt_type = DPAA2_SEC_AUTH;
1245 dpaa2_sec_auth_init(dev, xform, session);
1247 /* Cipher then Authenticate */
1248 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1249 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1250 session->ext_params.aead_ctxt.auth_cipher_text = true;
1251 dpaa2_sec_aead_init(dev, xform, session);
1253 /* Authenticate then Cipher */
1254 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1255 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1256 session->ext_params.aead_ctxt.auth_cipher_text = false;
1257 dpaa2_sec_aead_init(dev, xform, session);
1259 RTE_LOG(ERR, PMD, "Invalid crypto type");
1266 /** Clear the memory of session so it doesn't leave key material behind */
1268 dpaa2_sec_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
1270 PMD_INIT_FUNC_TRACE();
1271 dpaa2_sec_session *s = (dpaa2_sec_session *)sess;
1275 rte_free(s->cipher_key.data);
1276 rte_free(s->auth_key.data);
1277 memset(sess, 0, sizeof(dpaa2_sec_session));
1282 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1283 struct rte_cryptodev_config *config __rte_unused)
1285 PMD_INIT_FUNC_TRACE();
1291 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
1293 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1294 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1295 struct dpseci_attr attr;
1296 struct dpaa2_queue *dpaa2_q;
1297 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1298 dev->data->queue_pairs;
1299 struct dpseci_rx_queue_attr rx_attr;
1300 struct dpseci_tx_queue_attr tx_attr;
1303 PMD_INIT_FUNC_TRACE();
1305 memset(&attr, 0, sizeof(struct dpseci_attr));
1307 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
1309 PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
1311 goto get_attr_failure;
1313 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
1316 "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
1317 goto get_attr_failure;
1319 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
1320 dpaa2_q = &qp[i]->rx_vq;
1321 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1323 dpaa2_q->fqid = rx_attr.fqid;
1324 PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
1326 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
1327 dpaa2_q = &qp[i]->tx_vq;
1328 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1330 dpaa2_q->fqid = tx_attr.fqid;
1331 PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
1336 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1341 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
1343 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1344 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1347 PMD_INIT_FUNC_TRACE();
1349 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1351 PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
1356 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
1358 PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
1365 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
1367 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1368 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1371 PMD_INIT_FUNC_TRACE();
1373 /* Function is reverse of dpaa2_sec_dev_init.
1374 * It does the following:
1375 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
1376 * 2. Close the DPSECI device
1377 * 3. Free the allocated resources.
1380 /*Close the device at underlying layer*/
1381 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
1383 PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
1384 " error code %d\n", ret);
1388 /*Free the allocated memory for ethernet private data and dpseci*/
1396 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
1397 struct rte_cryptodev_info *info)
1399 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1401 PMD_INIT_FUNC_TRACE();
1403 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1404 info->feature_flags = dev->feature_flags;
1405 info->capabilities = dpaa2_sec_capabilities;
1406 info->sym.max_nb_sessions = internals->max_nb_sessions;
1407 info->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
1412 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
1413 struct rte_cryptodev_stats *stats)
1415 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1416 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1417 struct dpseci_sec_counters counters = {0};
1418 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1419 dev->data->queue_pairs;
1422 PMD_INIT_FUNC_TRACE();
1423 if (stats == NULL) {
1424 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1427 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1428 if (qp[i] == NULL) {
1429 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1433 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
1434 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
1435 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
1436 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
1439 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
1442 PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
1444 PMD_DRV_LOG(INFO, "dpseci hw stats:"
1445 "\n\tNumber of Requests Dequeued = %lu"
1446 "\n\tNumber of Outbound Encrypt Requests = %lu"
1447 "\n\tNumber of Inbound Decrypt Requests = %lu"
1448 "\n\tNumber of Outbound Bytes Encrypted = %lu"
1449 "\n\tNumber of Outbound Bytes Protected = %lu"
1450 "\n\tNumber of Inbound Bytes Decrypted = %lu"
1451 "\n\tNumber of Inbound Bytes Validated = %lu",
1452 counters.dequeued_requests,
1453 counters.ob_enc_requests,
1454 counters.ib_dec_requests,
1455 counters.ob_enc_bytes,
1456 counters.ob_prot_bytes,
1457 counters.ib_dec_bytes,
1458 counters.ib_valid_bytes);
1463 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
1466 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1467 (dev->data->queue_pairs);
1469 PMD_INIT_FUNC_TRACE();
1471 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1472 if (qp[i] == NULL) {
1473 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1476 qp[i]->tx_vq.rx_pkts = 0;
1477 qp[i]->tx_vq.tx_pkts = 0;
1478 qp[i]->tx_vq.err_pkts = 0;
1479 qp[i]->rx_vq.rx_pkts = 0;
1480 qp[i]->rx_vq.tx_pkts = 0;
1481 qp[i]->rx_vq.err_pkts = 0;
1485 static struct rte_cryptodev_ops crypto_ops = {
1486 .dev_configure = dpaa2_sec_dev_configure,
1487 .dev_start = dpaa2_sec_dev_start,
1488 .dev_stop = dpaa2_sec_dev_stop,
1489 .dev_close = dpaa2_sec_dev_close,
1490 .dev_infos_get = dpaa2_sec_dev_infos_get,
1491 .stats_get = dpaa2_sec_stats_get,
1492 .stats_reset = dpaa2_sec_stats_reset,
1493 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
1494 .queue_pair_release = dpaa2_sec_queue_pair_release,
1495 .queue_pair_start = dpaa2_sec_queue_pair_start,
1496 .queue_pair_stop = dpaa2_sec_queue_pair_stop,
1497 .queue_pair_count = dpaa2_sec_queue_pair_count,
1498 .session_get_size = dpaa2_sec_session_get_size,
1499 .session_initialize = dpaa2_sec_session_initialize,
1500 .session_configure = dpaa2_sec_session_configure,
1501 .session_clear = dpaa2_sec_session_clear,
1505 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
1507 PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
1508 dev->data->name, rte_socket_id());
1514 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
1516 struct dpaa2_sec_dev_private *internals;
1517 struct rte_device *dev = cryptodev->device;
1518 struct rte_dpaa2_device *dpaa2_dev;
1519 struct fsl_mc_io *dpseci;
1521 struct dpseci_attr attr;
1524 PMD_INIT_FUNC_TRACE();
1525 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1526 if (dpaa2_dev == NULL) {
1527 PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
1530 hw_id = dpaa2_dev->object_id;
1532 cryptodev->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
1533 cryptodev->dev_ops = &crypto_ops;
1535 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
1536 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
1537 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1538 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1539 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1541 internals = cryptodev->data->dev_private;
1542 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
1545 * For secondary processes, we don't initialise any further as primary
1546 * has already done this work. Only check we don't need a different
1549 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1550 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
1553 /*Open the rte device via MC and save the handle for further use*/
1554 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
1555 sizeof(struct fsl_mc_io), 0);
1558 "Error in allocating the memory for dpsec object");
1561 dpseci->regs = rte_mcp_ptr_list[0];
1563 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
1565 PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
1569 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
1572 "Cannot get dpsec device attributed: Error = %x",
1576 sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
1578 internals->max_nb_queue_pairs = attr.num_tx_queues;
1579 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
1580 internals->hw = dpseci;
1581 internals->token = token;
1583 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1587 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1589 /* dpaa2_sec_uninit(crypto_dev_name); */
1594 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
1595 struct rte_dpaa2_device *dpaa2_dev)
1597 struct rte_cryptodev *cryptodev;
1598 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1602 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
1604 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1605 if (cryptodev == NULL)
1608 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1609 cryptodev->data->dev_private = rte_zmalloc_socket(
1610 "cryptodev private structure",
1611 sizeof(struct dpaa2_sec_dev_private),
1612 RTE_CACHE_LINE_SIZE,
1615 if (cryptodev->data->dev_private == NULL)
1616 rte_panic("Cannot allocate memzone for private "
1620 dpaa2_dev->cryptodev = cryptodev;
1621 cryptodev->device = &dpaa2_dev->device;
1623 /* init user callbacks */
1624 TAILQ_INIT(&(cryptodev->link_intr_cbs));
1626 /* Invoke PMD device initialization function */
1627 retval = dpaa2_sec_dev_init(cryptodev);
1631 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1632 rte_free(cryptodev->data->dev_private);
1634 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1640 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
1642 struct rte_cryptodev *cryptodev;
1645 cryptodev = dpaa2_dev->cryptodev;
1646 if (cryptodev == NULL)
1649 ret = dpaa2_sec_uninit(cryptodev);
1653 /* free crypto device */
1654 rte_cryptodev_pmd_release_device(cryptodev);
1656 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1657 rte_free(cryptodev->data->dev_private);
1659 cryptodev->device = NULL;
1660 cryptodev->data = NULL;
1665 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
1666 .drv_type = DPAA2_MC_DPSECI_DEVID,
1668 .name = "DPAA2 SEC PMD"
1670 .probe = cryptodev_dpaa2_sec_probe,
1671 .remove = cryptodev_dpaa2_sec_remove,
1674 RTE_PMD_REGISTER_DPAA2(dpaa2_sec_pmd, rte_dpaa2_sec_driver);