1 /* SPDX-License-Identifier: BSD-3-Clause
5 #include <cryptodev_pmd.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
14 struct dpaa2_sec_raw_dp_ctx {
15 dpaa2_sec_session *session;
18 uint16_t cached_enqueue;
19 uint16_t cached_dequeue;
23 build_raw_dp_chain_fd(uint8_t *drv_ctx,
24 struct rte_crypto_sgl *sgl,
25 struct rte_crypto_va_iova_ptr *iv,
26 struct rte_crypto_va_iova_ptr *digest,
27 struct rte_crypto_va_iova_ptr *auth_iv,
28 union rte_crypto_sym_ofs ofs,
32 RTE_SET_USED(drv_ctx);
36 RTE_SET_USED(auth_iv);
38 RTE_SET_USED(userdata);
45 build_raw_dp_aead_fd(uint8_t *drv_ctx,
46 struct rte_crypto_sgl *sgl,
47 struct rte_crypto_va_iova_ptr *iv,
48 struct rte_crypto_va_iova_ptr *digest,
49 struct rte_crypto_va_iova_ptr *auth_iv,
50 union rte_crypto_sym_ofs ofs,
54 RTE_SET_USED(drv_ctx);
58 RTE_SET_USED(auth_iv);
60 RTE_SET_USED(userdata);
67 build_raw_dp_auth_fd(uint8_t *drv_ctx,
68 struct rte_crypto_sgl *sgl,
69 struct rte_crypto_va_iova_ptr *iv,
70 struct rte_crypto_va_iova_ptr *digest,
71 struct rte_crypto_va_iova_ptr *auth_iv,
72 union rte_crypto_sym_ofs ofs,
76 RTE_SET_USED(drv_ctx);
80 RTE_SET_USED(auth_iv);
82 RTE_SET_USED(userdata);
89 build_raw_dp_proto_fd(uint8_t *drv_ctx,
90 struct rte_crypto_sgl *sgl,
91 struct rte_crypto_va_iova_ptr *iv,
92 struct rte_crypto_va_iova_ptr *digest,
93 struct rte_crypto_va_iova_ptr *auth_iv,
94 union rte_crypto_sym_ofs ofs,
98 RTE_SET_USED(drv_ctx);
101 RTE_SET_USED(digest);
102 RTE_SET_USED(auth_iv);
104 RTE_SET_USED(userdata);
111 build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
112 struct rte_crypto_sgl *sgl,
113 struct rte_crypto_va_iova_ptr *iv,
114 struct rte_crypto_va_iova_ptr *digest,
115 struct rte_crypto_va_iova_ptr *auth_iv,
116 union rte_crypto_sym_ofs ofs,
120 RTE_SET_USED(drv_ctx);
123 RTE_SET_USED(digest);
124 RTE_SET_USED(auth_iv);
126 RTE_SET_USED(userdata);
133 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
134 struct rte_crypto_sgl *sgl,
135 struct rte_crypto_va_iova_ptr *iv,
136 struct rte_crypto_va_iova_ptr *digest,
137 struct rte_crypto_va_iova_ptr *auth_iv,
138 union rte_crypto_sym_ofs ofs,
142 RTE_SET_USED(digest);
143 RTE_SET_USED(auth_iv);
145 dpaa2_sec_session *sess =
146 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
147 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
148 int total_len = 0, data_len = 0, data_offset;
149 struct sec_flow_context *flc;
150 struct ctxt_priv *priv = sess->ctxt;
153 for (i = 0; i < sgl->num; i++)
154 total_len += sgl->vec[i].len;
156 data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
157 data_offset = ofs.ofs.cipher.head;
159 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
160 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
161 if ((data_len & 7) || (data_offset & 7)) {
162 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
166 data_len = data_len >> 3;
167 data_offset = data_offset >> 3;
170 /* first FLE entry used to store mbuf and session ctxt */
171 fle = (struct qbman_fle *)rte_malloc(NULL,
172 FLE_SG_MEM_SIZE(2*sgl->num),
173 RTE_CACHE_LINE_SIZE);
175 DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
178 memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
179 /* first FLE entry used to store userdata and session ctxt */
180 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
181 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
187 flc = &priv->flc_desc[0].flc;
190 "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
196 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
197 op_fle->length = data_len;
198 DPAA2_SET_FLE_SG_EXT(op_fle);
201 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
202 DPAA2_SET_FLE_OFFSET(sge, data_offset);
203 sge->length = sgl->vec[0].len - data_offset;
206 for (i = 1; i < sgl->num; i++) {
208 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
209 DPAA2_SET_FLE_OFFSET(sge, 0);
210 sge->length = sgl->vec[i].len;
212 DPAA2_SET_FLE_FIN(sge);
215 "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
216 flc, fle, fle->addr_hi, fle->addr_lo,
221 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
222 ip_fle->length = sess->iv.length + data_len;
223 DPAA2_SET_FLE_SG_EXT(ip_fle);
226 DPAA2_SET_FLE_ADDR(sge, iv->iova);
227 DPAA2_SET_FLE_OFFSET(sge, 0);
228 sge->length = sess->iv.length;
233 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
234 DPAA2_SET_FLE_OFFSET(sge, data_offset);
235 sge->length = sgl->vec[0].len - data_offset;
238 for (i = 1; i < sgl->num; i++) {
240 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
241 DPAA2_SET_FLE_OFFSET(sge, 0);
242 sge->length = sgl->vec[i].len;
244 DPAA2_SET_FLE_FIN(sge);
245 DPAA2_SET_FLE_FIN(ip_fle);
248 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
249 DPAA2_SET_FD_LEN(fd, ip_fle->length);
250 DPAA2_SET_FD_COMPOUND_FMT(fd);
251 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
254 "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
255 DPAA2_GET_FD_ADDR(fd),
256 DPAA2_GET_FD_OFFSET(fd),
257 DPAA2_GET_FD_LEN(fd));
262 static __rte_always_inline uint32_t
263 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
264 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
265 void *user_data[], int *status)
267 RTE_SET_USED(user_data);
270 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
271 uint32_t frames_to_send, retry_count;
272 struct qbman_eq_desc eqdesc;
273 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
274 dpaa2_sec_session *sess =
275 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
276 struct qbman_swp *swp;
278 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
280 if (unlikely(vec->num == 0))
284 DPAA2_SEC_ERR("sessionless raw crypto not supported");
287 /*Prepare enqueue descriptor*/
288 qbman_eq_desc_clear(&eqdesc);
289 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
290 qbman_eq_desc_set_response(&eqdesc, 0, 0);
291 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
293 if (!DPAA2_PER_LCORE_DPIO) {
294 ret = dpaa2_affine_qbman_swp();
297 "Failed to allocate IO portal, tid: %d\n",
302 swp = DPAA2_PER_LCORE_PORTAL;
305 frames_to_send = (vec->num > dpaa2_eqcr_size) ?
306 dpaa2_eqcr_size : vec->num;
308 for (loop = 0; loop < frames_to_send; loop++) {
309 /*Clear the unused FD fields before sending*/
310 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
311 ret = sess->build_raw_dp_fd(drv_ctx,
320 DPAA2_SEC_ERR("error: Improper packet contents"
321 " for crypto operation");
329 while (loop < frames_to_send) {
330 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
333 frames_to_send - loop);
334 if (unlikely(ret < 0)) {
336 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
351 dpaa2_qp->tx_vq.tx_pkts += num_tx;
352 dpaa2_qp->tx_vq.err_pkts += vec->num;
357 static __rte_always_inline int
358 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
359 struct rte_crypto_vec *data_vec,
360 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
361 struct rte_crypto_va_iova_ptr *iv,
362 struct rte_crypto_va_iova_ptr *digest,
363 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
366 RTE_SET_USED(qp_data);
367 RTE_SET_USED(drv_ctx);
368 RTE_SET_USED(data_vec);
369 RTE_SET_USED(n_data_vecs);
372 RTE_SET_USED(digest);
373 RTE_SET_USED(aad_or_auth_iv);
374 RTE_SET_USED(user_data);
380 sec_fd_to_userdata(const struct qbman_fd *fd)
382 struct qbman_fle *fle;
384 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
386 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
387 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
388 userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
389 /* free the fle memory */
390 rte_free((void *)(fle-1));
395 static __rte_always_inline uint32_t
396 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
397 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
398 uint32_t max_nb_to_dequeue,
399 rte_cryptodev_raw_post_dequeue_t post_dequeue,
400 void **out_user_data, uint8_t is_user_data_array,
401 uint32_t *n_success, int *dequeue_status)
403 RTE_SET_USED(drv_ctx);
404 RTE_SET_USED(get_dequeue_count);
406 /* Function is responsible to receive frames for a given device and VQ*/
407 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
408 struct qbman_result *dq_storage;
409 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
411 uint8_t is_last = 0, status;
412 struct qbman_swp *swp;
413 const struct qbman_fd *fd;
414 struct qbman_pull_desc pulldesc;
416 uint32_t nb_ops = max_nb_to_dequeue;
418 if (!DPAA2_PER_LCORE_DPIO) {
419 ret = dpaa2_affine_qbman_swp();
422 "Failed to allocate IO portal, tid: %d\n",
427 swp = DPAA2_PER_LCORE_PORTAL;
428 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
430 qbman_pull_desc_clear(&pulldesc);
431 qbman_pull_desc_set_numframes(&pulldesc,
432 (nb_ops > dpaa2_dqrr_size) ?
433 dpaa2_dqrr_size : nb_ops);
434 qbman_pull_desc_set_fq(&pulldesc, fqid);
435 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
436 (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
439 /*Issue a volatile dequeue command. */
441 if (qbman_swp_pull(swp, &pulldesc)) {
443 "SEC VDQ command is not issued : QBMAN busy");
444 /* Portal was busy, try again */
450 /* Receive the packets till Last Dequeue entry is found with
451 * respect to the above issues PULL command.
454 /* Check if the previous issued command is completed.
455 * Also seems like the SWP is shared between the Ethernet Driver
456 * and the SEC driver.
458 while (!qbman_check_command_complete(dq_storage))
461 /* Loop until the dq_storage is updated with
464 while (!qbman_check_new_result(dq_storage))
466 /* Check whether Last Pull command is Expired and
467 * setting Condition for Loop termination
469 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
471 /* Check for valid frame. */
472 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
474 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
475 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
480 fd = qbman_result_DQ_fd(dq_storage);
481 user_data = sec_fd_to_userdata(fd);
482 if (is_user_data_array)
483 out_user_data[num_rx] = user_data;
485 out_user_data[0] = user_data;
486 if (unlikely(fd->simple.frc)) {
487 /* TODO Parse SEC errors */
488 DPAA2_SEC_ERR("SEC returned Error - %x",
490 status = RTE_CRYPTO_OP_STATUS_ERROR;
492 status = RTE_CRYPTO_OP_STATUS_SUCCESS;
494 post_dequeue(user_data, num_rx, status);
498 } /* End of Packet Rx loop */
500 dpaa2_qp->rx_vq.rx_pkts += num_rx;
504 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
505 /*Return the total number of packets received to DPAA2 app*/
509 static __rte_always_inline void *
510 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
511 enum rte_crypto_op_status *op_status)
513 RTE_SET_USED(qp_data);
514 RTE_SET_USED(drv_ctx);
515 RTE_SET_USED(dequeue_status);
516 RTE_SET_USED(op_status);
521 static __rte_always_inline int
522 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
524 RTE_SET_USED(qp_data);
525 RTE_SET_USED(drv_ctx);
531 static __rte_always_inline int
532 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
534 RTE_SET_USED(qp_data);
535 RTE_SET_USED(drv_ctx);
542 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
543 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
544 enum rte_crypto_op_sess_type sess_type,
545 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
547 dpaa2_sec_session *sess;
548 struct dpaa2_sec_raw_dp_ctx *dp_ctx;
552 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
553 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
556 if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
557 sess = (dpaa2_sec_session *)get_sec_session_private_data(
558 session_ctx.sec_sess);
559 else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
560 sess = (dpaa2_sec_session *)get_sym_session_private_data(
561 session_ctx.crypto_sess, cryptodev_driver_id);
564 raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
565 raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
566 raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
567 raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
568 raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
569 raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
571 if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
572 sess->build_raw_dp_fd = build_raw_dp_chain_fd;
573 else if (sess->ctxt_type == DPAA2_SEC_AEAD)
574 sess->build_raw_dp_fd = build_raw_dp_aead_fd;
575 else if (sess->ctxt_type == DPAA2_SEC_AUTH)
576 sess->build_raw_dp_fd = build_raw_dp_auth_fd;
577 else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
578 sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
579 else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
580 sess->build_raw_dp_fd = build_raw_dp_proto_fd;
581 else if (sess->ctxt_type == DPAA2_SEC_PDCP)
582 sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
585 dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
586 dp_ctx->session = sess;
592 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
594 return sizeof(struct dpaa2_sec_raw_dp_ctx);