1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_event_crypto_adapter.h>
11 #include "cn9k_cryptodev.h"
12 #include "cn9k_cryptodev_ops.h"
13 #include "cn9k_ipsec.h"
14 #include "cn9k_ipsec_la_ops.h"
16 #include "cnxk_cryptodev.h"
17 #include "cnxk_cryptodev_ops.h"
20 static __rte_always_inline int __rte_hot
21 cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
22 struct cnxk_se_sess *sess,
23 struct cpt_inflight_req *infl_req,
24 struct cpt_inst_s *inst)
29 cpt_op = sess->cpt_op;
31 if (cpt_op & ROC_SE_OP_CIPHER_MASK)
32 ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
34 ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
40 static __rte_always_inline int __rte_hot
41 cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
42 struct cpt_inflight_req *infl_req,
43 struct cpt_inst_s *inst)
45 struct rte_crypto_sym_op *sym_op = op->sym;
46 struct cn9k_sec_session *priv;
47 struct cn9k_ipsec_sa *sa;
49 if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) {
50 plt_dp_err("Out of place is not supported");
54 if (unlikely(!rte_pktmbuf_is_contiguous(sym_op->m_src))) {
55 plt_dp_err("Scatter Gather mode is not supported");
59 priv = get_sec_session_private_data(op->sym->sec_session);
62 if (sa->dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
63 return process_outb_sa(op, sa, inst);
65 infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
67 return process_inb_sa(op, sa, inst);
70 static inline struct cnxk_se_sess *
71 cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
73 const int driver_id = cn9k_cryptodev_driver_id;
74 struct rte_crypto_sym_op *sym_op = op->sym;
75 struct rte_cryptodev_sym_session *sess;
76 struct cnxk_se_sess *priv;
79 /* Create temporary session */
80 sess = rte_cryptodev_sym_session_create(qp->sess_mp);
84 ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
85 sess, qp->sess_mp_priv);
89 priv = get_sym_session_private_data(sess, driver_id);
91 sym_op->session = sess;
96 rte_mempool_put(qp->sess_mp, sess);
101 cn9k_cpt_inst_prep(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
102 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
106 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
107 struct rte_crypto_sym_op *sym_op;
108 struct cnxk_se_sess *sess;
110 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
112 sess = get_sym_session_private_data(
113 sym_op->session, cn9k_cryptodev_driver_id);
114 ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
116 inst->w7.u64 = sess->cpt_inst_w7;
117 } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
118 ret = cn9k_cpt_sec_inst_fill(op, infl_req, inst);
120 sess = cn9k_cpt_sym_temp_sess_create(qp, op);
121 if (unlikely(sess == NULL)) {
122 plt_dp_err("Could not create temp session");
126 ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
129 sym_session_clear(cn9k_cryptodev_driver_id,
131 rte_mempool_put(qp->sess_mp, op->sym->session);
133 inst->w7.u64 = sess->cpt_inst_w7;
135 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
136 struct rte_crypto_asym_op *asym_op;
137 struct cnxk_ae_sess *sess;
139 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
141 sess = (struct cnxk_ae_sess *)
142 asym_op->session->sess_private_data;
143 ret = cnxk_ae_enqueue(qp, op, infl_req, inst, sess);
144 inst->w7.u64 = sess->cpt_inst_w7;
150 plt_dp_err("Unsupported op type");
157 cn9k_cpt_inst_submit(struct cpt_inst_s *inst, uint64_t lmtline,
163 /* Copy CPT command to LMTLINE */
164 roc_lmt_mov64((void *)lmtline, inst);
167 * Make sure compiler does not reorder memcpy and ldeor.
168 * LMTST transactions are always flushed from the write
169 * buffer immediately, a DMB is not required to push out
173 lmt_status = roc_lmt_submit_ldeor(io_addr);
174 } while (lmt_status == 0);
177 static __plt_always_inline void
178 cn9k_cpt_inst_submit_dual(struct cpt_inst_s *inst, uint64_t lmtline,
184 /* Copy 2 CPT inst_s to LMTLINE */
185 #if defined(RTE_ARCH_ARM64)
186 uint64_t *s = (uint64_t *)inst;
187 uint64_t *d = (uint64_t *)lmtline;
189 vst1q_u64(&d[0], vld1q_u64(&s[0]));
190 vst1q_u64(&d[2], vld1q_u64(&s[2]));
191 vst1q_u64(&d[4], vld1q_u64(&s[4]));
192 vst1q_u64(&d[6], vld1q_u64(&s[6]));
193 vst1q_u64(&d[8], vld1q_u64(&s[8]));
194 vst1q_u64(&d[10], vld1q_u64(&s[10]));
195 vst1q_u64(&d[12], vld1q_u64(&s[12]));
196 vst1q_u64(&d[14], vld1q_u64(&s[14]));
198 roc_lmt_mov_seg((void *)lmtline, inst, 8);
202 * Make sure compiler does not reorder memcpy and ldeor.
203 * LMTST transactions are always flushed from the write
204 * buffer immediately, a DMB is not required to push out
208 lmt_status = roc_lmt_submit_ldeor(io_addr);
209 } while (lmt_status == 0);
213 cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
215 struct cpt_inflight_req *infl_req_1, *infl_req_2;
216 struct cpt_inst_s inst[2] __rte_cache_aligned;
217 struct rte_crypto_op *op_1, *op_2;
218 uint16_t nb_allowed, count = 0;
219 struct cnxk_cpt_qp *qp = qptr;
220 struct pending_queue *pend_q;
224 const union cpt_res_s res = {
225 .cn10k.compcode = CPT_COMP_NOT_DONE,
228 pend_q = &qp->pend_q;
230 const uint64_t lmt_base = qp->lf.lmt_base;
231 const uint64_t io_addr = qp->lf.io_addr;
232 const uint64_t pq_mask = pend_q->pq_mask;
234 /* Clear w0, w2, w3 of both inst */
244 nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);
245 nb_ops = RTE_MIN(nb_ops, nb_allowed);
247 if (unlikely(nb_ops & 1)) {
249 infl_req_1 = &pend_q->req_queue[head];
250 infl_req_1->op_flags = 0;
252 ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
254 plt_dp_err("Could not process op: %p", op_1);
258 infl_req_1->cop = op_1;
259 infl_req_1->res.cn9k.compcode = CPT_COMP_NOT_DONE;
260 inst[0].res_addr = (uint64_t)&infl_req_1->res;
262 cn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);
263 pending_queue_advance(&head, pq_mask);
267 while (count < nb_ops) {
269 op_2 = ops[count + 1];
271 infl_req_1 = &pend_q->req_queue[head];
272 pending_queue_advance(&head, pq_mask);
273 infl_req_2 = &pend_q->req_queue[head];
274 pending_queue_advance(&head, pq_mask);
276 infl_req_1->cop = op_1;
277 infl_req_2->cop = op_2;
278 infl_req_1->op_flags = 0;
279 infl_req_2->op_flags = 0;
281 __atomic_store_n(&infl_req_1->res.u64[0], res.u64[0],
283 inst[0].res_addr = (uint64_t)&infl_req_1->res;
285 __atomic_store_n(&infl_req_2->res.u64[0], res.u64[0],
287 inst[1].res_addr = (uint64_t)&infl_req_2->res;
289 ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
291 plt_dp_err("Could not process op: %p", op_1);
292 pending_queue_retreat(&head, pq_mask, 2);
296 ret = cn9k_cpt_inst_prep(qp, op_2, infl_req_2, &inst[1]);
298 plt_dp_err("Could not process op: %p", op_2);
299 pending_queue_retreat(&head, pq_mask, 1);
300 cn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);
305 cn9k_cpt_inst_submit_dual(&inst[0], lmt_base, io_addr);
310 rte_atomic_thread_fence(__ATOMIC_RELEASE);
313 pend_q->time_out = rte_get_timer_cycles() +
314 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
320 cn9k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
322 enum rte_crypto_op_type op_type,
323 enum rte_crypto_op_sess_type sess_type,
326 union rte_event_crypto_metadata *ec_mdata = mdata;
327 struct rte_event *rsp_info;
328 struct cnxk_cpt_qp *qp;
334 cdev_id = ec_mdata->request_info.cdev_id;
335 qp_id = ec_mdata->request_info.queue_pair_id;
336 qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
339 rsp_info = &ec_mdata->response_info;
340 w2 = CNXK_CPT_INST_W2(
341 (RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
342 rsp_info->sched_type, rsp_info->queue_id, 0);
344 /* Set meta according to session type */
345 if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
346 if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
347 struct cn9k_sec_session *priv;
348 struct cn9k_ipsec_sa *sa;
350 priv = get_sec_session_private_data(sess);
354 } else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
355 struct cnxk_se_sess *priv;
357 priv = get_sym_session_private_data(
358 sess, cn9k_cryptodev_driver_id);
360 priv->cpt_inst_w2 = w2;
363 } else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
364 if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
365 struct rte_cryptodev_asym_session *asym_sess = sess;
366 struct cnxk_ae_sess *priv;
368 priv = (struct cnxk_ae_sess *)asym_sess->sess_private_data;
370 priv->cpt_inst_w2 = w2;
380 cn9k_ca_meta_info_extract(struct rte_crypto_op *op,
381 struct cnxk_cpt_qp **qp, struct cpt_inst_s *inst)
383 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
384 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
385 struct cn9k_sec_session *priv;
386 struct cn9k_ipsec_sa *sa;
388 priv = get_sec_session_private_data(op->sym->sec_session);
391 inst->w2.u64 = sa->inst.w2;
392 } else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
393 struct cnxk_se_sess *priv;
395 priv = get_sym_session_private_data(
396 op->sym->session, cn9k_cryptodev_driver_id);
398 inst->w2.u64 = priv->cpt_inst_w2;
400 union rte_event_crypto_metadata *ec_mdata;
401 struct rte_event *rsp_info;
405 ec_mdata = (union rte_event_crypto_metadata *)
406 ((uint8_t *)op + op->private_data_offset);
409 rsp_info = &ec_mdata->response_info;
410 cdev_id = ec_mdata->request_info.cdev_id;
411 qp_id = ec_mdata->request_info.queue_pair_id;
412 *qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
413 inst->w2.u64 = CNXK_CPT_INST_W2(
414 (RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
415 rsp_info->sched_type, rsp_info->queue_id, 0);
417 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
418 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
419 struct rte_cryptodev_asym_session *asym_sess;
420 struct cnxk_ae_sess *priv;
422 asym_sess = op->asym->session;
423 priv = (struct cnxk_ae_sess *)asym_sess->sess_private_data;
425 inst->w2.u64 = priv->cpt_inst_w2;
435 cn9k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
437 struct cpt_inflight_req *infl_req;
438 struct cnxk_cpt_qp *qp;
439 struct cpt_inst_s inst;
442 ret = cn9k_ca_meta_info_extract(op, &qp, &inst);
448 if (unlikely(!qp->ca.enabled)) {
453 if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
457 infl_req->op_flags = 0;
459 ret = cn9k_cpt_inst_prep(qp, op, infl_req, &inst);
461 plt_dp_err("Could not process op: %p", op);
462 rte_mempool_put(qp->ca.req_mp, infl_req);
467 infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
470 inst.res_addr = (uint64_t)&infl_req->res;
471 inst.w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
473 if (roc_cpt_is_iq_full(&qp->lf)) {
474 rte_mempool_put(qp->ca.req_mp, infl_req);
479 if (inst.w2.s.tt == RTE_SCHED_TYPE_ORDERED)
480 roc_sso_hws_head_wait(base);
482 cn9k_cpt_inst_submit(&inst, qp->lmtline.lmt_base, qp->lmtline.io_addr);
488 cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
489 struct cpt_inflight_req *infl_req)
491 struct rte_crypto_sym_op *sym_op = cop->sym;
492 struct rte_mbuf *m = sym_op->m_src;
493 struct rte_ipv6_hdr *ip6;
494 struct rte_ipv4_hdr *ip;
498 if (infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND) {
499 data = rte_pktmbuf_mtod(m, char *);
501 ip = (struct rte_ipv4_hdr *)(data + ROC_IE_ON_INB_RPTR_HDR);
503 if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
505 m_len = rte_be_to_cpu_16(ip->total_length);
507 PLT_ASSERT(((ip->version_ihl & 0xf0) >>
508 RTE_IPV4_IHL_MULTIPLIER) == 6);
509 ip6 = (struct rte_ipv6_hdr *)ip;
510 m_len = rte_be_to_cpu_16(ip6->payload_len) +
511 sizeof(struct rte_ipv6_hdr);
516 m->data_off += ROC_IE_ON_INB_RPTR_HDR;
521 cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
522 struct cpt_inflight_req *infl_req,
523 struct cpt_cn9k_res_s *res)
527 if (likely(res->compcode == CPT_COMP_GOOD)) {
528 if (unlikely(res->uc_compcode)) {
529 if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
530 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
532 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
534 plt_dp_info("Request failed with microcode error");
535 plt_dp_info("MC completion code 0x%x",
540 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
541 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
542 if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
543 cn9k_cpt_sec_post_process(cop, infl_req);
547 /* Verify authentication data if required */
548 if (unlikely(infl_req->op_flags &
549 CPT_OP_FLAGS_AUTH_VERIFY)) {
550 uintptr_t *rsp = infl_req->mdata;
551 compl_auth_verify(cop, (uint8_t *)rsp[0],
554 } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
555 struct rte_crypto_asym_op *op = cop->asym;
556 uintptr_t *mdata = infl_req->mdata;
557 struct cnxk_ae_sess *sess;
559 sess = (struct cnxk_ae_sess *)
560 op->session->sess_private_data;
562 cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
565 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
566 plt_dp_info("HW completion code 0x%x", res->compcode);
568 switch (res->compcode) {
569 case CPT_COMP_INSTERR:
570 plt_dp_err("Request failed with instruction error");
573 plt_dp_err("Request failed with DMA fault");
576 plt_dp_err("Request failed with hardware error");
580 "Request failed with unknown completion code");
585 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
586 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
587 sym_session_clear(cn9k_cryptodev_driver_id,
589 sz = rte_cryptodev_sym_get_existing_header_session_size(
591 memset(cop->sym->session, 0, sz);
592 rte_mempool_put(qp->sess_mp, cop->sym->session);
593 cop->sym->session = NULL;
599 cn9k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
601 struct cpt_inflight_req *infl_req;
602 struct rte_crypto_op *cop;
603 struct cnxk_cpt_qp *qp;
606 infl_req = (struct cpt_inflight_req *)(get_work1);
610 res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
612 cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req, &res.cn9k);
614 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
615 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
617 rte_mempool_put(qp->ca.req_mp, infl_req);
618 return (uintptr_t)cop;
622 cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
624 struct cpt_inflight_req *infl_req;
625 struct cnxk_cpt_qp *qp = qptr;
626 struct pending_queue *pend_q;
627 uint64_t infl_cnt, pq_tail;
628 struct rte_crypto_op *cop;
632 pend_q = &qp->pend_q;
634 const uint64_t pq_mask = pend_q->pq_mask;
636 pq_tail = pend_q->tail;
637 infl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);
638 nb_ops = RTE_MIN(nb_ops, infl_cnt);
640 /* Ensure infl_cnt isn't read before data lands */
641 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
643 for (i = 0; i < nb_ops; i++) {
644 infl_req = &pend_q->req_queue[pq_tail];
646 res.u64[0] = __atomic_load_n(&infl_req->res.u64[0],
649 if (unlikely(res.cn9k.compcode == CPT_COMP_NOT_DONE)) {
650 if (unlikely(rte_get_timer_cycles() >
652 plt_err("Request timed out");
653 cnxk_cpt_dump_on_err(qp);
654 pend_q->time_out = rte_get_timer_cycles() +
655 DEFAULT_COMMAND_TIMEOUT *
661 pending_queue_advance(&pq_tail, pq_mask);
667 cn9k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn9k);
669 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
670 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
673 pend_q->tail = pq_tail;
678 cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
680 dev->enqueue_burst = cn9k_cpt_enqueue_burst;
681 dev->dequeue_burst = cn9k_cpt_dequeue_burst;
687 cn9k_cpt_dev_info_get(struct rte_cryptodev *dev,
688 struct rte_cryptodev_info *info)
691 cnxk_cpt_dev_info_get(dev, info);
692 info->driver_id = cn9k_cryptodev_driver_id;
696 struct rte_cryptodev_ops cn9k_cpt_ops = {
697 /* Device control ops */
698 .dev_configure = cnxk_cpt_dev_config,
699 .dev_start = cnxk_cpt_dev_start,
700 .dev_stop = cnxk_cpt_dev_stop,
701 .dev_close = cnxk_cpt_dev_close,
702 .dev_infos_get = cn9k_cpt_dev_info_get,
706 .queue_pair_setup = cnxk_cpt_queue_pair_setup,
707 .queue_pair_release = cnxk_cpt_queue_pair_release,
709 /* Symmetric crypto ops */
710 .sym_session_get_size = cnxk_cpt_sym_session_get_size,
711 .sym_session_configure = cnxk_cpt_sym_session_configure,
712 .sym_session_clear = cnxk_cpt_sym_session_clear,
714 /* Asymmetric crypto ops */
715 .asym_session_get_size = cnxk_ae_session_size_get,
716 .asym_session_configure = cnxk_ae_session_cfg,
717 .asym_session_clear = cnxk_ae_session_clear,
719 /* Event crypto ops */
720 .session_ev_mdata_set = cn9k_cpt_crypto_adapter_ev_mdata_set,