1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_event_crypto_adapter.h>
10 #include "cn10k_cryptodev.h"
11 #include "cn10k_cryptodev_ops.h"
12 #include "cn10k_ipsec_la_ops.h"
13 #include "cn10k_ipsec.h"
15 #include "cnxk_cryptodev.h"
16 #include "cnxk_cryptodev_ops.h"
21 static inline struct cnxk_se_sess *
22 cn10k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
24 const int driver_id = cn10k_cryptodev_driver_id;
25 struct rte_crypto_sym_op *sym_op = op->sym;
26 struct rte_cryptodev_sym_session *sess;
27 struct cnxk_se_sess *priv;
30 /* Create temporary session */
31 sess = rte_cryptodev_sym_session_create(qp->sess_mp);
35 ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
36 sess, qp->sess_mp_priv);
40 priv = get_sym_session_private_data(sess, driver_id);
42 sym_op->session = sess;
47 rte_mempool_put(qp->sess_mp, sess);
51 static __rte_always_inline int __rte_hot
52 cpt_sec_inst_fill(struct rte_crypto_op *op, struct cn10k_sec_session *sess,
53 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
55 struct rte_crypto_sym_op *sym_op = op->sym;
56 union roc_ot_ipsec_sa_word2 *w2;
57 struct cn10k_ipsec_sa *sa;
60 if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) {
61 plt_dp_err("Out of place is not supported");
65 if (unlikely(!rte_pktmbuf_is_contiguous(sym_op->m_src))) {
66 plt_dp_err("Scatter Gather mode is not supported");
71 w2 = (union roc_ot_ipsec_sa_word2 *)&sa->in_sa.w2;
73 if (w2->s.dir == ROC_IE_SA_DIR_OUTBOUND)
74 ret = process_outb_sa(op, sa, inst);
76 infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
77 ret = process_inb_sa(op, sa, inst);
83 static __rte_always_inline int __rte_hot
84 cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
85 struct cnxk_se_sess *sess, struct cpt_inflight_req *infl_req,
86 struct cpt_inst_s *inst)
91 cpt_op = sess->cpt_op;
93 if (cpt_op & ROC_SE_OP_CIPHER_MASK)
94 ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
96 ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
103 cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
104 struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req)
106 struct cn10k_sec_session *sec_sess;
107 struct rte_crypto_asym_op *asym_op;
108 struct rte_crypto_sym_op *sym_op;
109 struct cnxk_ae_sess *ae_sess;
110 struct cnxk_se_sess *sess;
111 struct rte_crypto_op *op;
123 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
124 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
125 sec_sess = get_sec_session_private_data(
126 sym_op->sec_session);
127 ret = cpt_sec_inst_fill(op, sec_sess, infl_req,
131 w7 = sec_sess->sa.inst.w7;
132 } else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
133 sess = get_sym_session_private_data(
134 sym_op->session, cn10k_cryptodev_driver_id);
135 ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
139 w7 = sess->cpt_inst_w7;
141 sess = cn10k_cpt_sym_temp_sess_create(qp, op);
142 if (unlikely(sess == NULL)) {
143 plt_dp_err("Could not create temp session");
147 ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
150 sym_session_clear(cn10k_cryptodev_driver_id,
152 rte_mempool_put(qp->sess_mp, op->sym->session);
155 w7 = sess->cpt_inst_w7;
157 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
159 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
161 ae_sess = get_asym_session_private_data(
162 asym_op->session, cn10k_cryptodev_driver_id);
163 ret = cnxk_ae_enqueue(qp, op, infl_req, &inst[0],
167 w7 = ae_sess->cpt_inst_w7;
169 plt_dp_err("Not supported Asym op without session");
173 plt_dp_err("Unsupported op type");
177 inst[0].res_addr = (uint64_t)&infl_req->res;
178 infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
186 #define PKTS_PER_LOOP 32
187 #define PKTS_PER_STEORL 16
190 cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
192 uint64_t lmt_base, lmt_arg, io_addr;
193 struct cpt_inflight_req *infl_req;
194 uint16_t nb_allowed, count = 0;
195 struct cnxk_cpt_qp *qp = qptr;
196 struct pending_queue *pend_q;
197 struct cpt_inst_s *inst;
202 pend_q = &qp->pend_q;
204 const uint64_t pq_mask = pend_q->pq_mask;
207 nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);
208 nb_ops = RTE_MIN(nb_ops, nb_allowed);
210 if (unlikely(nb_ops == 0))
213 lmt_base = qp->lmtline.lmt_base;
214 io_addr = qp->lmtline.io_addr;
216 ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
217 inst = (struct cpt_inst_s *)lmt_base;
220 for (i = 0; i < RTE_MIN(PKTS_PER_LOOP, nb_ops); i++) {
221 infl_req = &pend_q->req_queue[head];
222 infl_req->op_flags = 0;
224 ret = cn10k_cpt_fill_inst(qp, ops + i, &inst[2 * i], infl_req);
225 if (unlikely(ret != 1)) {
226 plt_dp_err("Could not process op: %p", ops + i);
232 pending_queue_advance(&head, pq_mask);
235 if (i > PKTS_PER_STEORL) {
236 lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 |
238 roc_lmt_submit_steorl(lmt_arg, io_addr);
239 lmt_arg = ROC_CN10K_CPT_LMT_ARG |
240 (i - PKTS_PER_STEORL - 1) << 12 |
241 (uint64_t)(lmt_id + PKTS_PER_STEORL);
242 roc_lmt_submit_steorl(lmt_arg, io_addr);
244 lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 |
246 roc_lmt_submit_steorl(lmt_arg, io_addr);
251 if (nb_ops - i > 0 && i == PKTS_PER_LOOP) {
259 rte_atomic_thread_fence(__ATOMIC_RELEASE);
262 pend_q->time_out = rte_get_timer_cycles() +
263 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
269 cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
271 union rte_event_crypto_metadata *ec_mdata;
272 struct cpt_inflight_req *infl_req;
273 struct rte_event *rsp_info;
274 uint64_t lmt_base, lmt_arg;
275 struct cpt_inst_s *inst;
276 struct cnxk_cpt_qp *qp;
282 ec_mdata = cnxk_event_crypto_mdata_get(op);
288 cdev_id = ec_mdata->request_info.cdev_id;
289 qp_id = ec_mdata->request_info.queue_pair_id;
290 qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
291 rsp_info = &ec_mdata->response_info;
293 if (unlikely(!qp->ca.enabled)) {
298 if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
302 infl_req->op_flags = 0;
304 lmt_base = qp->lmtline.lmt_base;
305 ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
306 inst = (struct cpt_inst_s *)lmt_base;
308 ret = cn10k_cpt_fill_inst(qp, &op, inst, infl_req);
309 if (unlikely(ret != 1)) {
310 plt_dp_err("Could not process op: %p", op);
311 rte_mempool_put(qp->ca.req_mp, infl_req);
316 infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
319 inst->res_addr = (uint64_t)&infl_req->res;
320 inst->w2.u64 = CNXK_CPT_INST_W2(
321 (RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
322 rsp_info->sched_type, rsp_info->queue_id, 0);
323 inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
325 if (roc_cpt_is_iq_full(&qp->lf)) {
326 rte_mempool_put(qp->ca.req_mp, infl_req);
331 if (!rsp_info->sched_type)
332 roc_sso_hws_head_wait(tag_op);
334 lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
335 roc_lmt_submit_steorl(lmt_arg, qp->lmtline.io_addr);
343 cn10k_cpt_sec_post_process(struct rte_crypto_op *cop,
344 struct cpt_cn10k_res_s *res)
346 struct rte_mbuf *m = cop->sym->m_src;
347 const uint16_t m_len = res->rlen;
354 cn10k_cpt_sec_ucc_process(struct rte_crypto_op *cop,
355 struct cpt_inflight_req *infl_req,
356 const uint8_t uc_compcode)
358 struct cn10k_sec_session *sess;
359 struct cn10k_ipsec_sa *sa;
360 struct rte_mbuf *mbuf;
362 if (uc_compcode == ROC_IE_OT_UCC_SUCCESS_SA_SOFTEXP_FIRST)
363 cop->aux_flags = RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY;
365 if (!(infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND))
368 sess = get_sec_session_private_data(cop->sym->sec_session);
371 mbuf = cop->sym->m_src;
373 switch (uc_compcode) {
374 case ROC_IE_OT_UCC_SUCCESS:
375 if (sa->ip_csum_enable)
376 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
378 case ROC_IE_OT_UCC_SUCCESS_PKT_IP_BADCSUM:
379 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
381 case ROC_IE_OT_UCC_SUCCESS_PKT_L4_GOODCSUM:
382 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
383 if (sa->ip_csum_enable)
384 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
386 case ROC_IE_OT_UCC_SUCCESS_PKT_L4_BADCSUM:
387 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
388 if (sa->ip_csum_enable)
389 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
397 cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
398 struct rte_crypto_op *cop,
399 struct cpt_inflight_req *infl_req)
401 struct cpt_cn10k_res_s *res = (struct cpt_cn10k_res_s *)&infl_req->res;
402 const uint8_t uc_compcode = res->uc_compcode;
403 const uint8_t compcode = res->compcode;
406 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
408 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
409 cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
410 if (likely(compcode == CPT_COMP_WARN)) {
411 /* Success with additional info */
412 cn10k_cpt_sec_ucc_process(cop, infl_req, uc_compcode);
413 cn10k_cpt_sec_post_process(cop, res);
415 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
416 plt_dp_info("HW completion code 0x%x", res->compcode);
417 if (compcode == CPT_COMP_GOOD) {
419 "Request failed with microcode error");
420 plt_dp_info("MC completion code 0x%x",
428 if (likely(compcode == CPT_COMP_GOOD || compcode == CPT_COMP_WARN)) {
429 if (unlikely(uc_compcode)) {
430 if (uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
431 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
433 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
435 plt_dp_info("Request failed with microcode error");
436 plt_dp_info("MC completion code 0x%x",
441 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
442 /* Verify authentication data if required */
443 if (unlikely(infl_req->op_flags &
444 CPT_OP_FLAGS_AUTH_VERIFY)) {
445 uintptr_t *rsp = infl_req->mdata;
446 compl_auth_verify(cop, (uint8_t *)rsp[0],
449 } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
450 struct rte_crypto_asym_op *op = cop->asym;
451 uintptr_t *mdata = infl_req->mdata;
452 struct cnxk_ae_sess *sess;
454 sess = get_asym_session_private_data(
455 op->session, cn10k_cryptodev_driver_id);
457 cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
460 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
461 plt_dp_info("HW completion code 0x%x", res->compcode);
464 case CPT_COMP_INSTERR:
465 plt_dp_err("Request failed with instruction error");
468 plt_dp_err("Request failed with DMA fault");
471 plt_dp_err("Request failed with hardware error");
475 "Request failed with unknown completion code");
480 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
481 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
482 sym_session_clear(cn10k_cryptodev_driver_id,
484 sz = rte_cryptodev_sym_get_existing_header_session_size(
486 memset(cop->sym->session, 0, sz);
487 rte_mempool_put(qp->sess_mp, cop->sym->session);
488 cop->sym->session = NULL;
494 cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
496 struct cpt_inflight_req *infl_req;
497 struct rte_crypto_op *cop;
498 struct cnxk_cpt_qp *qp;
500 infl_req = (struct cpt_inflight_req *)(get_work1);
504 cn10k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req);
506 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
507 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
509 rte_mempool_put(qp->ca.req_mp, infl_req);
510 return (uintptr_t)cop;
514 cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
516 struct cpt_inflight_req *infl_req;
517 struct cnxk_cpt_qp *qp = qptr;
518 struct pending_queue *pend_q;
519 struct cpt_cn10k_res_s *res;
520 uint64_t infl_cnt, pq_tail;
521 struct rte_crypto_op *cop;
524 pend_q = &qp->pend_q;
526 const uint64_t pq_mask = pend_q->pq_mask;
528 pq_tail = pend_q->tail;
529 infl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);
530 nb_ops = RTE_MIN(nb_ops, infl_cnt);
532 /* Ensure infl_cnt isn't read before data lands */
533 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
535 for (i = 0; i < nb_ops; i++) {
536 infl_req = &pend_q->req_queue[pq_tail];
538 res = (struct cpt_cn10k_res_s *)&infl_req->res;
540 if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
541 if (unlikely(rte_get_timer_cycles() >
543 plt_err("Request timed out");
544 pend_q->time_out = rte_get_timer_cycles() +
545 DEFAULT_COMMAND_TIMEOUT *
551 pending_queue_advance(&pq_tail, pq_mask);
557 cn10k_cpt_dequeue_post_process(qp, cop, infl_req);
559 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
560 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
563 pend_q->tail = pq_tail;
569 cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
571 dev->enqueue_burst = cn10k_cpt_enqueue_burst;
572 dev->dequeue_burst = cn10k_cpt_dequeue_burst;
578 cn10k_cpt_dev_info_get(struct rte_cryptodev *dev,
579 struct rte_cryptodev_info *info)
582 cnxk_cpt_dev_info_get(dev, info);
583 info->driver_id = cn10k_cryptodev_driver_id;
587 struct rte_cryptodev_ops cn10k_cpt_ops = {
588 /* Device control ops */
589 .dev_configure = cnxk_cpt_dev_config,
590 .dev_start = cnxk_cpt_dev_start,
591 .dev_stop = cnxk_cpt_dev_stop,
592 .dev_close = cnxk_cpt_dev_close,
593 .dev_infos_get = cn10k_cpt_dev_info_get,
597 .queue_pair_setup = cnxk_cpt_queue_pair_setup,
598 .queue_pair_release = cnxk_cpt_queue_pair_release,
600 /* Symmetric crypto ops */
601 .sym_session_get_size = cnxk_cpt_sym_session_get_size,
602 .sym_session_configure = cnxk_cpt_sym_session_configure,
603 .sym_session_clear = cnxk_cpt_sym_session_clear,
605 /* Asymmetric crypto ops */
606 .asym_session_get_size = cnxk_ae_session_size_get,
607 .asym_session_configure = cnxk_ae_session_cfg,
608 .asym_session_clear = cnxk_ae_session_clear,