1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2019 Marvell International Ltd.
7 #include <cryptodev_pmd.h>
9 #include <ethdev_driver.h>
10 #include <rte_event_crypto_adapter.h>
12 #include "otx2_cryptodev.h"
13 #include "otx2_cryptodev_capabilities.h"
14 #include "otx2_cryptodev_hw_access.h"
15 #include "otx2_cryptodev_mbox.h"
16 #include "otx2_cryptodev_ops.h"
17 #include "otx2_cryptodev_ops_helper.h"
18 #include "otx2_ipsec_anti_replay.h"
19 #include "otx2_ipsec_po_ops.h"
20 #include "otx2_mbox.h"
21 #include "otx2_sec_idev.h"
22 #include "otx2_security.h"
24 #include "cpt_hw_types.h"
25 #include "cpt_pmd_logs.h"
26 #include "cpt_pmd_ops_helper.h"
27 #include "cpt_ucode.h"
28 #include "cpt_ucode_asym.h"
30 #define METABUF_POOL_CACHE_SIZE 512
32 static uint64_t otx2_fpm_iova[CPT_EC_ID_PMAX];
34 /* Forward declarations */
37 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
40 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
42 snprintf(name, size, "otx2_cpt_lf_mem_%u:%u", dev_id, qp_id);
46 otx2_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
47 struct otx2_cpt_qp *qp, uint8_t qp_id,
48 unsigned int nb_elements)
50 char mempool_name[RTE_MEMPOOL_NAMESIZE];
51 struct cpt_qp_meta_info *meta_info;
52 int lcore_cnt = rte_lcore_count();
53 int ret, max_mlen, mb_pool_sz;
54 struct rte_mempool *pool;
59 if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
61 /* Get meta len for scatter gather mode */
62 sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
64 /* Extra 32B saved for future considerations */
65 sg_mlen += 4 * sizeof(uint64_t);
67 /* Get meta len for linear buffer (direct) mode */
68 lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
70 /* Extra 32B saved for future considerations */
71 lb_mlen += 4 * sizeof(uint64_t);
74 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
76 /* Get meta len required for asymmetric operations */
77 asym_mlen = cpt_pmd_ops_helper_asym_get_mlen();
81 * Check max requirement for meta buffer to
82 * support crypto op of any type (sym/asym).
84 max_mlen = RTE_MAX(RTE_MAX(lb_mlen, sg_mlen), asym_mlen);
86 /* Allocate mempool */
88 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx2_cpt_mb_%u:%u",
89 dev->data->dev_id, qp_id);
91 mb_pool_sz = nb_elements;
93 /* For poll mode, core that enqueues and core that dequeues can be
94 * different. For event mode, all cores are allowed to use same crypto
97 mb_pool_sz += (RTE_MAX(2, lcore_cnt) * METABUF_POOL_CACHE_SIZE);
99 pool = rte_mempool_create_empty(mempool_name, mb_pool_sz, max_mlen,
100 METABUF_POOL_CACHE_SIZE, 0,
104 CPT_LOG_ERR("Could not create mempool for metabuf");
108 ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
111 CPT_LOG_ERR("Could not set mempool ops");
115 ret = rte_mempool_populate_default(pool);
117 CPT_LOG_ERR("Could not populate metabuf pool");
121 meta_info = &qp->meta_info;
123 meta_info->pool = pool;
124 meta_info->lb_mlen = lb_mlen;
125 meta_info->sg_mlen = sg_mlen;
130 rte_mempool_free(pool);
135 otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
137 struct cpt_qp_meta_info *meta_info = &qp->meta_info;
139 rte_mempool_free(meta_info->pool);
141 meta_info->pool = NULL;
142 meta_info->lb_mlen = 0;
143 meta_info->sg_mlen = 0;
147 otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
149 static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1);
150 uint16_t port_id, nb_ethport = rte_eth_dev_count_avail();
153 for (i = 0; i < nb_ethport; i++) {
154 port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport;
155 if (otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
162 ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id);
166 /* Publish inline Tx QP to eth dev security */
167 ret = otx2_sec_idev_tx_cpt_qp_add(port_id, qp);
174 static struct otx2_cpt_qp *
175 otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
178 struct otx2_cpt_vf *vf = dev->data->dev_private;
179 uint64_t pg_sz = sysconf(_SC_PAGESIZE);
180 const struct rte_memzone *lf_mem;
181 uint32_t len, iq_len, size_div40;
182 char name[RTE_MEMZONE_NAMESIZE];
183 uint64_t used_len, iova;
184 struct otx2_cpt_qp *qp;
189 /* Allocate queue pair */
190 qp = rte_zmalloc_socket("OCTEON TX2 Crypto PMD Queue Pair", sizeof(*qp),
193 CPT_LOG_ERR("Could not allocate queue pair");
198 * Pending queue updates make assumption that queue size is a power
201 RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(OTX2_CPT_DEFAULT_CMD_QLEN));
203 iq_len = OTX2_CPT_DEFAULT_CMD_QLEN;
206 * Queue size must be a multiple of 40 and effective queue size to
207 * software is (size_div40 - 1) * 40
209 size_div40 = (iq_len + 40 - 1) / 40 + 1;
211 /* For pending queue */
212 len = iq_len * RTE_ALIGN(sizeof(qp->pend_q.rid_queue[0]), 8);
214 /* Space for instruction group memory */
215 len += size_div40 * 16;
217 /* So that instruction queues start as pg size aligned */
218 len = RTE_ALIGN(len, pg_sz);
220 /* For instruction queues */
221 len += OTX2_CPT_DEFAULT_CMD_QLEN * sizeof(union cpt_inst_s);
223 /* Wastage after instruction queues */
224 len = RTE_ALIGN(len, pg_sz);
226 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
229 lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
230 RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
231 RTE_CACHE_LINE_SIZE);
232 if (lf_mem == NULL) {
233 CPT_LOG_ERR("Could not allocate reserved memzone");
242 ret = otx2_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
244 CPT_LOG_ERR("Could not create mempool for metabuf");
248 /* Initialize pending queue */
249 qp->pend_q.rid_queue = (void **)va;
253 used_len = iq_len * RTE_ALIGN(sizeof(qp->pend_q.rid_queue[0]), 8);
254 used_len += size_div40 * 16;
255 used_len = RTE_ALIGN(used_len, pg_sz);
258 qp->iq_dma_addr = iova;
260 qp->blkaddr = vf->lf_blkaddr[qp_id];
261 qp->base = OTX2_CPT_LF_BAR2(vf, qp->blkaddr, qp_id);
263 lmtline = vf->otx2_dev.bar2 +
264 (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
265 OTX2_LMT_LF_LMTLINE(0);
267 qp->lmtline = (void *)lmtline;
269 qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
271 ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
272 if (ret && (ret != -ENOENT)) {
273 CPT_LOG_ERR("Could not delete inline configuration");
274 goto mempool_destroy;
277 otx2_cpt_iq_disable(qp);
279 ret = otx2_cpt_qp_inline_cfg(dev, qp);
281 CPT_LOG_ERR("Could not configure queue for inline IPsec");
282 goto mempool_destroy;
285 ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
288 CPT_LOG_ERR("Could not enable instruction queue");
289 goto mempool_destroy;
295 otx2_cpt_metabuf_mempool_destroy(qp);
297 rte_memzone_free(lf_mem);
304 otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
306 const struct rte_memzone *lf_mem;
307 char name[RTE_MEMZONE_NAMESIZE];
310 ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
311 if (ret && (ret != -ENOENT)) {
312 CPT_LOG_ERR("Could not delete inline configuration");
316 otx2_cpt_iq_disable(qp);
318 otx2_cpt_metabuf_mempool_destroy(qp);
320 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
323 lf_mem = rte_memzone_lookup(name);
325 ret = rte_memzone_free(lf_mem);
335 sym_xform_verify(struct rte_crypto_sym_xform *xform)
338 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
339 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
340 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
341 (xform->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC ||
342 xform->next->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC))
345 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
346 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
347 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
348 (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC ||
349 xform->next->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC))
352 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
353 xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
354 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
355 xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
358 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
359 xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
360 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
361 xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
365 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
366 xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
367 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
374 sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
375 struct rte_cryptodev_sym_session *sess,
376 struct rte_mempool *pool)
378 struct rte_crypto_sym_xform *temp_xform = xform;
379 struct cpt_sess_misc *misc;
380 vq_cmd_word3_t vq_cmd_w3;
384 ret = sym_xform_verify(xform);
388 if (unlikely(rte_mempool_get(pool, &priv))) {
389 CPT_LOG_ERR("Could not allocate session private data");
393 memset(priv, 0, sizeof(struct cpt_sess_misc) +
394 offsetof(struct cpt_ctx, mc_ctx));
398 for ( ; xform != NULL; xform = xform->next) {
399 switch (xform->type) {
400 case RTE_CRYPTO_SYM_XFORM_AEAD:
401 ret = fill_sess_aead(xform, misc);
403 case RTE_CRYPTO_SYM_XFORM_CIPHER:
404 ret = fill_sess_cipher(xform, misc);
406 case RTE_CRYPTO_SYM_XFORM_AUTH:
407 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
408 ret = fill_sess_gmac(xform, misc);
410 ret = fill_sess_auth(xform, misc);
420 if ((GET_SESS_FC_TYPE(misc) == HASH_HMAC) &&
421 cpt_mac_len_verify(&temp_xform->auth)) {
422 CPT_LOG_ERR("MAC length is not supported");
423 struct cpt_ctx *ctx = SESS_PRIV(misc);
424 if (ctx->auth_key != NULL) {
425 rte_free(ctx->auth_key);
426 ctx->auth_key = NULL;
432 set_sym_session_private_data(sess, driver_id, misc);
434 misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
435 sizeof(struct cpt_sess_misc);
438 vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
442 * IE engines support IPsec operations
443 * SE engines support IPsec operations, Chacha-Poly and
444 * Air-Crypto operations
446 if (misc->zsk_flag || misc->chacha_poly)
447 vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE;
449 vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE_IE;
451 misc->cpt_inst_w7 = vq_cmd_w3.u64;
456 rte_mempool_put(pool, priv);
461 static __rte_always_inline int32_t __rte_hot
462 otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
463 struct cpt_request_info *req,
465 struct rte_crypto_op *op,
466 uint64_t cpt_inst_w7)
468 union rte_event_crypto_metadata *m_data;
469 union cpt_inst_s inst;
472 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
473 m_data = rte_cryptodev_sym_session_get_user_data(
475 if (m_data == NULL) {
476 rte_pktmbuf_free(op->sym->m_src);
477 rte_crypto_op_free(op);
481 } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
482 op->private_data_offset) {
483 m_data = (union rte_event_crypto_metadata *)
485 op->private_data_offset);
491 inst.s9x.res_addr = req->comp_baddr;
495 inst.s9x.ei0 = req->ist.ei0;
496 inst.s9x.ei1 = req->ist.ei1;
497 inst.s9x.ei2 = req->ist.ei2;
498 inst.s9x.ei3 = cpt_inst_w7;
500 inst.u[2] = (((RTE_EVENT_TYPE_CRYPTODEV << 28) |
501 m_data->response_info.flow_id) |
502 ((uint64_t)m_data->response_info.sched_type << 32) |
503 ((uint64_t)m_data->response_info.queue_id << 34));
504 inst.u[3] = 1 | (((uint64_t)req >> 3) << 3);
508 /* Copy CPT command to LMTLINE */
509 memcpy(lmtline, &inst, sizeof(inst));
512 * Make sure compiler does not reorder memcpy and ldeor.
513 * LMTST transactions are always flushed from the write
514 * buffer immediately, a DMB is not required to push out
518 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
519 } while (lmt_status == 0);
524 static __rte_always_inline int32_t __rte_hot
525 otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
526 struct pending_queue *pend_q,
527 struct cpt_request_info *req,
528 struct rte_crypto_op *op,
529 uint64_t cpt_inst_w7,
530 unsigned int burst_index)
532 void *lmtline = qp->lmtline;
533 union cpt_inst_s inst;
537 return otx2_ca_enqueue_req(qp, req, lmtline, op, cpt_inst_w7);
540 inst.s9x.res_addr = req->comp_baddr;
544 inst.s9x.ei0 = req->ist.ei0;
545 inst.s9x.ei1 = req->ist.ei1;
546 inst.s9x.ei2 = req->ist.ei2;
547 inst.s9x.ei3 = cpt_inst_w7;
549 req->time_out = rte_get_timer_cycles() +
550 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
553 /* Copy CPT command to LMTLINE */
554 memcpy(lmtline, &inst, sizeof(inst));
557 * Make sure compiler does not reorder memcpy and ldeor.
558 * LMTST transactions are always flushed from the write
559 * buffer immediately, a DMB is not required to push out
563 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
564 } while (lmt_status == 0);
566 pending_queue_push(pend_q, req, burst_index, OTX2_CPT_DEFAULT_CMD_QLEN);
571 static __rte_always_inline int32_t __rte_hot
572 otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
573 struct rte_crypto_op *op,
574 struct pending_queue *pend_q,
575 unsigned int burst_index)
577 struct cpt_qp_meta_info *minfo = &qp->meta_info;
578 struct rte_crypto_asym_op *asym_op = op->asym;
579 struct asym_op_params params = {0};
580 struct cpt_asym_sess_misc *sess;
585 if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
586 CPT_LOG_ERR("Could not allocate meta buffer for request");
590 sess = get_asym_session_private_data(asym_op->session,
591 otx2_cryptodev_driver_id);
593 /* Store IO address of the mdata to meta_buf */
594 params.meta_buf = rte_mempool_virt2iova(mdata);
597 cop[0] = (uintptr_t)mdata;
598 cop[1] = (uintptr_t)op;
599 cop[2] = cop[3] = 0ULL;
601 params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
602 params.req->op = cop;
604 /* Adjust meta_buf to point to end of cpt_request_info structure */
605 params.meta_buf += (4 * sizeof(uintptr_t)) +
606 sizeof(struct cpt_request_info);
607 switch (sess->xfrm_type) {
608 case RTE_CRYPTO_ASYM_XFORM_MODEX:
609 ret = cpt_modex_prep(¶ms, &sess->mod_ctx);
613 case RTE_CRYPTO_ASYM_XFORM_RSA:
614 ret = cpt_enqueue_rsa_op(op, ¶ms, sess);
618 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
619 ret = cpt_enqueue_ecdsa_op(op, ¶ms, sess, otx2_fpm_iova);
623 case RTE_CRYPTO_ASYM_XFORM_ECPM:
624 ret = cpt_ecpm_prep(&asym_op->ecpm, ¶ms,
625 sess->ec_ctx.curveid);
630 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
635 ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, op,
636 sess->cpt_inst_w7, burst_index);
638 CPT_LOG_DP_ERR("Could not enqueue crypto req");
645 free_op_meta(mdata, minfo->pool);
650 static __rte_always_inline int __rte_hot
651 otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
652 struct pending_queue *pend_q, unsigned int burst_index)
654 struct rte_crypto_sym_op *sym_op = op->sym;
655 struct cpt_request_info *req;
656 struct cpt_sess_misc *sess;
661 sess = get_sym_session_private_data(sym_op->session,
662 otx2_cryptodev_driver_id);
664 cpt_op = sess->cpt_op;
666 if (cpt_op & CPT_OP_CIPHER_MASK)
667 ret = fill_fc_params(op, sess, &qp->meta_info, &mdata,
670 ret = fill_digest_params(op, sess, &qp->meta_info, &mdata,
674 CPT_LOG_DP_ERR("Crypto req : op %p, cpt_op 0x%x ret 0x%x",
675 op, (unsigned int)cpt_op, ret);
679 ret = otx2_cpt_enqueue_req(qp, pend_q, req, op, sess->cpt_inst_w7,
682 /* Free buffer allocated by fill params routines */
683 free_op_meta(mdata, qp->meta_info.pool);
689 static __rte_always_inline int __rte_hot
690 otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
691 struct pending_queue *pend_q,
692 const unsigned int burst_index)
694 uint32_t winsz, esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
695 struct rte_mbuf *m_src = op->sym->m_src;
696 struct otx2_sec_session_ipsec_lp *sess;
697 struct otx2_ipsec_po_sa_ctl *ctl_wrd;
698 struct otx2_ipsec_po_in_sa *sa;
699 struct otx2_sec_session *priv;
700 struct cpt_request_info *req;
701 uint64_t seq_in_sa, seq = 0;
705 priv = get_sec_session_private_data(op->sym->sec_session);
706 sess = &priv->ipsec.lp;
710 esn = ctl_wrd->esn_en;
711 winsz = sa->replay_win_sz;
713 if (ctl_wrd->direction == OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND)
714 ret = process_outb_sa(op, sess, &qp->meta_info, (void **)&req);
717 esn_low = rte_be_to_cpu_32(sa->esn_low);
718 esn_hi = rte_be_to_cpu_32(sa->esn_hi);
719 seql = *rte_pktmbuf_mtod_offset(m_src, uint32_t *,
720 sizeof(struct rte_ipv4_hdr) + 4);
721 seql = rte_be_to_cpu_32(seql);
724 seq = (uint64_t)seql;
726 seqh = anti_replay_get_seqh(winsz, seql, esn_hi,
728 seq = ((uint64_t)seqh << 32) | seql;
731 if (unlikely(seq == 0))
732 return IPSEC_ANTI_REPLAY_FAILED;
734 ret = anti_replay_check(sa->replay, seq, winsz);
736 otx2_err("Anti replay check failed");
737 return IPSEC_ANTI_REPLAY_FAILED;
741 seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
742 if (seq > seq_in_sa) {
743 sa->esn_low = rte_cpu_to_be_32(seql);
744 sa->esn_hi = rte_cpu_to_be_32(seqh);
749 ret = process_inb_sa(op, sess, &qp->meta_info, (void **)&req);
753 otx2_err("Crypto req : op %p, ret 0x%x", op, ret);
757 ret = otx2_cpt_enqueue_req(qp, pend_q, req, op, sess->cpt_inst_w7,
763 static __rte_always_inline int __rte_hot
764 otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
765 struct pending_queue *pend_q,
766 unsigned int burst_index)
768 const int driver_id = otx2_cryptodev_driver_id;
769 struct rte_crypto_sym_op *sym_op = op->sym;
770 struct rte_cryptodev_sym_session *sess;
773 /* Create temporary session */
774 sess = rte_cryptodev_sym_session_create(qp->sess_mp);
778 ret = sym_session_configure(driver_id, sym_op->xform, sess,
783 sym_op->session = sess;
785 ret = otx2_cpt_enqueue_sym(qp, op, pend_q, burst_index);
793 sym_session_clear(driver_id, sess);
795 rte_mempool_put(qp->sess_mp, sess);
800 otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
802 uint16_t nb_allowed, count = 0;
803 struct otx2_cpt_qp *qp = qptr;
804 struct pending_queue *pend_q;
805 struct rte_crypto_op *op;
808 pend_q = &qp->pend_q;
810 nb_allowed = pending_queue_free_slots(pend_q,
811 OTX2_CPT_DEFAULT_CMD_QLEN, 0);
812 nb_ops = RTE_MIN(nb_ops, nb_allowed);
814 for (count = 0; count < nb_ops; count++) {
816 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
817 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
818 ret = otx2_cpt_enqueue_sec(qp, op, pend_q,
820 else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
821 ret = otx2_cpt_enqueue_sym(qp, op, pend_q,
824 ret = otx2_cpt_enqueue_sym_sessless(qp, op,
826 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
827 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
828 ret = otx2_cpt_enqueue_asym(qp, op, pend_q,
839 if (unlikely(!qp->ca_enable))
840 pending_queue_commit(pend_q, count, OTX2_CPT_DEFAULT_CMD_QLEN);
845 static __rte_always_inline void
846 otx2_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
847 struct rte_crypto_rsa_xform *rsa_ctx)
849 struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
851 switch (rsa->op_type) {
852 case RTE_CRYPTO_ASYM_OP_ENCRYPT:
853 rsa->cipher.length = rsa_ctx->n.length;
854 memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
856 case RTE_CRYPTO_ASYM_OP_DECRYPT:
857 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
858 rsa->message.length = rsa_ctx->n.length;
859 memcpy(rsa->message.data, req->rptr,
860 rsa->message.length);
862 /* Get length of decrypted output */
863 rsa->message.length = rte_cpu_to_be_16
864 (*((uint16_t *)req->rptr));
866 * Offset output data pointer by length field
867 * (2 bytes) and copy decrypted data.
869 memcpy(rsa->message.data, req->rptr + 2,
870 rsa->message.length);
873 case RTE_CRYPTO_ASYM_OP_SIGN:
874 rsa->sign.length = rsa_ctx->n.length;
875 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
877 case RTE_CRYPTO_ASYM_OP_VERIFY:
878 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
879 rsa->sign.length = rsa_ctx->n.length;
880 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
882 /* Get length of signed output */
883 rsa->sign.length = rte_cpu_to_be_16
884 (*((uint16_t *)req->rptr));
886 * Offset output data pointer by length field
887 * (2 bytes) and copy signed data.
889 memcpy(rsa->sign.data, req->rptr + 2,
892 if (memcmp(rsa->sign.data, rsa->message.data,
893 rsa->message.length)) {
894 CPT_LOG_DP_ERR("RSA verification failed");
895 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
899 CPT_LOG_DP_DEBUG("Invalid RSA operation type");
900 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
905 static __rte_always_inline void
906 otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
907 struct cpt_request_info *req,
908 struct cpt_asym_ec_ctx *ec)
910 int prime_len = ec_grp[ec->curveid].prime.length;
912 if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
915 /* Separate out sign r and s components */
916 memcpy(ecdsa->r.data, req->rptr, prime_len);
917 memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
919 ecdsa->r.length = prime_len;
920 ecdsa->s.length = prime_len;
923 static __rte_always_inline void
924 otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
925 struct cpt_request_info *req,
926 struct cpt_asym_ec_ctx *ec)
928 int prime_len = ec_grp[ec->curveid].prime.length;
930 memcpy(ecpm->r.x.data, req->rptr, prime_len);
931 memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
933 ecpm->r.x.length = prime_len;
934 ecpm->r.y.length = prime_len;
938 otx2_cpt_asym_post_process(struct rte_crypto_op *cop,
939 struct cpt_request_info *req)
941 struct rte_crypto_asym_op *op = cop->asym;
942 struct cpt_asym_sess_misc *sess;
944 sess = get_asym_session_private_data(op->session,
945 otx2_cryptodev_driver_id);
947 switch (sess->xfrm_type) {
948 case RTE_CRYPTO_ASYM_XFORM_RSA:
949 otx2_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
951 case RTE_CRYPTO_ASYM_XFORM_MODEX:
952 op->modex.result.length = sess->mod_ctx.modulus.length;
953 memcpy(op->modex.result.data, req->rptr,
954 op->modex.result.length);
956 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
957 otx2_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
959 case RTE_CRYPTO_ASYM_XFORM_ECPM:
960 otx2_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
963 CPT_LOG_DP_DEBUG("Invalid crypto xform type");
964 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
970 otx2_cpt_sec_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
972 struct cpt_request_info *req = (struct cpt_request_info *)rsp[2];
973 vq_cmd_word0_t *word0 = (vq_cmd_word0_t *)&req->ist.ei0;
974 struct rte_crypto_sym_op *sym_op = cop->sym;
975 struct rte_mbuf *m = sym_op->m_src;
976 struct rte_ipv6_hdr *ip6;
977 struct rte_ipv4_hdr *ip;
982 mdata_len = (int)rsp[3];
983 rte_pktmbuf_trim(m, mdata_len);
985 if (word0->s.opcode.major == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
986 data = rte_pktmbuf_mtod(m, char *);
987 ip = (struct rte_ipv4_hdr *)(data +
988 OTX2_IPSEC_PO_INB_RPTR_HDR);
990 if ((ip->version_ihl >> 4) == 4) {
991 m_len = rte_be_to_cpu_16(ip->total_length);
993 ip6 = (struct rte_ipv6_hdr *)(data +
994 OTX2_IPSEC_PO_INB_RPTR_HDR);
995 m_len = rte_be_to_cpu_16(ip6->payload_len) +
996 sizeof(struct rte_ipv6_hdr);
1001 m->data_off += OTX2_IPSEC_PO_INB_RPTR_HDR;
1006 otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
1007 uintptr_t *rsp, uint8_t cc)
1011 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1012 if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1013 if (likely(cc == OTX2_IPSEC_PO_CC_SUCCESS)) {
1014 otx2_cpt_sec_post_process(cop, rsp);
1015 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1017 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
1022 if (likely(cc == NO_ERR)) {
1023 /* Verify authentication data if required */
1024 if (unlikely(rsp[2]))
1025 compl_auth_verify(cop, (uint8_t *)rsp[2],
1028 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1030 if (cc == ERR_GC_ICV_MISCOMPARE)
1031 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1033 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
1036 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1037 sym_session_clear(otx2_cryptodev_driver_id,
1039 sz = rte_cryptodev_sym_get_existing_header_session_size(
1041 memset(cop->sym->session, 0, sz);
1042 rte_mempool_put(qp->sess_mp, cop->sym->session);
1043 cop->sym->session = NULL;
1047 if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1048 if (likely(cc == NO_ERR)) {
1049 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1051 * Pass cpt_req_info stored in metabuf during
1054 rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
1055 otx2_cpt_asym_post_process(cop,
1056 (struct cpt_request_info *)rsp);
1058 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
1063 otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
1065 int i, nb_pending, nb_completed;
1066 struct otx2_cpt_qp *qp = qptr;
1067 struct pending_queue *pend_q;
1068 struct cpt_request_info *req;
1069 struct rte_crypto_op *cop;
1074 pend_q = &qp->pend_q;
1076 nb_pending = pending_queue_level(pend_q, OTX2_CPT_DEFAULT_CMD_QLEN);
1078 /* Ensure pcount isn't read before data lands */
1079 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
1081 nb_ops = RTE_MIN(nb_ops, nb_pending);
1083 for (i = 0; i < nb_ops; i++) {
1084 pending_queue_peek(pend_q, (void **)&req,
1085 OTX2_CPT_DEFAULT_CMD_QLEN, 0);
1087 cc[i] = otx2_cpt_compcode_get(req);
1089 if (unlikely(cc[i] == ERR_REQ_PENDING))
1094 pending_queue_pop(pend_q, OTX2_CPT_DEFAULT_CMD_QLEN);
1099 for (i = 0; i < nb_completed; i++) {
1100 rsp = (void *)ops[i];
1102 metabuf = (void *)rsp[0];
1103 cop = (void *)rsp[1];
1107 otx2_cpt_dequeue_post_process(qp, cop, rsp, cc[i]);
1109 free_op_meta(metabuf, qp->meta_info.pool);
1112 return nb_completed;
1116 otx2_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
1118 dev->enqueue_burst = otx2_cpt_enqueue_burst;
1119 dev->dequeue_burst = otx2_cpt_dequeue_burst;
1127 otx2_cpt_dev_config(struct rte_cryptodev *dev,
1128 struct rte_cryptodev_config *conf)
1130 struct otx2_cpt_vf *vf = dev->data->dev_private;
1133 if (conf->nb_queue_pairs > vf->max_queues) {
1134 CPT_LOG_ERR("Invalid number of queue pairs requested");
1138 dev->feature_flags = otx2_cpt_default_ff_get() & ~conf->ff_disable;
1140 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
1141 /* Initialize shared FPM table */
1142 ret = cpt_fpm_init(otx2_fpm_iova);
1147 /* Unregister error interrupts */
1148 if (vf->err_intr_registered)
1149 otx2_cpt_err_intr_unregister(dev);
1152 if (vf->nb_queues) {
1153 ret = otx2_cpt_queues_detach(dev);
1155 CPT_LOG_ERR("Could not detach CPT queues");
1161 ret = otx2_cpt_queues_attach(dev, conf->nb_queue_pairs);
1163 CPT_LOG_ERR("Could not attach CPT queues");
1167 ret = otx2_cpt_msix_offsets_get(dev);
1169 CPT_LOG_ERR("Could not get MSI-X offsets");
1173 /* Register error interrupts */
1174 ret = otx2_cpt_err_intr_register(dev);
1176 CPT_LOG_ERR("Could not register error interrupts");
1180 ret = otx2_cpt_inline_init(dev);
1182 CPT_LOG_ERR("Could not enable inline IPsec");
1183 goto intr_unregister;
1186 otx2_cpt_set_enqdeq_fns(dev);
1191 otx2_cpt_err_intr_unregister(dev);
1193 otx2_cpt_queues_detach(dev);
1198 otx2_cpt_dev_start(struct rte_cryptodev *dev)
1202 CPT_PMD_INIT_FUNC_TRACE();
1208 otx2_cpt_dev_stop(struct rte_cryptodev *dev)
1210 CPT_PMD_INIT_FUNC_TRACE();
1212 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
1217 otx2_cpt_dev_close(struct rte_cryptodev *dev)
1219 struct otx2_cpt_vf *vf = dev->data->dev_private;
1222 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1223 ret = otx2_cpt_queue_pair_release(dev, i);
1228 /* Unregister error interrupts */
1229 if (vf->err_intr_registered)
1230 otx2_cpt_err_intr_unregister(dev);
1233 if (vf->nb_queues) {
1234 ret = otx2_cpt_queues_detach(dev);
1236 CPT_LOG_ERR("Could not detach CPT queues");
1243 otx2_cpt_dev_info_get(struct rte_cryptodev *dev,
1244 struct rte_cryptodev_info *info)
1246 struct otx2_cpt_vf *vf = dev->data->dev_private;
1249 info->max_nb_queue_pairs = vf->max_queues;
1250 info->feature_flags = otx2_cpt_default_ff_get();
1251 info->capabilities = otx2_cpt_capabilities_get();
1252 info->sym.max_nb_sessions = 0;
1253 info->driver_id = otx2_cryptodev_driver_id;
1254 info->min_mbuf_headroom_req = OTX2_CPT_MIN_HEADROOM_REQ;
1255 info->min_mbuf_tailroom_req = OTX2_CPT_MIN_TAILROOM_REQ;
1260 otx2_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1261 const struct rte_cryptodev_qp_conf *conf,
1262 int socket_id __rte_unused)
1264 uint8_t grp_mask = OTX2_CPT_ENG_GRPS_MASK;
1265 struct rte_pci_device *pci_dev;
1266 struct otx2_cpt_qp *qp;
1268 CPT_PMD_INIT_FUNC_TRACE();
1270 if (dev->data->queue_pairs[qp_id] != NULL)
1271 otx2_cpt_queue_pair_release(dev, qp_id);
1273 if (conf->nb_descriptors > OTX2_CPT_DEFAULT_CMD_QLEN) {
1274 CPT_LOG_ERR("Could not setup queue pair for %u descriptors",
1275 conf->nb_descriptors);
1279 pci_dev = RTE_DEV_TO_PCI(dev->device);
1281 if (pci_dev->mem_resource[2].addr == NULL) {
1282 CPT_LOG_ERR("Invalid PCI mem address");
1286 qp = otx2_cpt_qp_create(dev, qp_id, grp_mask);
1288 CPT_LOG_ERR("Could not create queue pair %d", qp_id);
1292 qp->sess_mp = conf->mp_session;
1293 qp->sess_mp_priv = conf->mp_session_private;
1294 dev->data->queue_pairs[qp_id] = qp;
1300 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
1302 struct otx2_cpt_qp *qp = dev->data->queue_pairs[qp_id];
1305 CPT_PMD_INIT_FUNC_TRACE();
1310 CPT_LOG_INFO("Releasing queue pair %d", qp_id);
1312 ret = otx2_cpt_qp_destroy(dev, qp);
1314 CPT_LOG_ERR("Could not destroy queue pair %d", qp_id);
1318 dev->data->queue_pairs[qp_id] = NULL;
1324 otx2_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1326 return cpt_get_session_size();
1330 otx2_cpt_sym_session_configure(struct rte_cryptodev *dev,
1331 struct rte_crypto_sym_xform *xform,
1332 struct rte_cryptodev_sym_session *sess,
1333 struct rte_mempool *pool)
1335 CPT_PMD_INIT_FUNC_TRACE();
1337 return sym_session_configure(dev->driver_id, xform, sess, pool);
1341 otx2_cpt_sym_session_clear(struct rte_cryptodev *dev,
1342 struct rte_cryptodev_sym_session *sess)
1344 CPT_PMD_INIT_FUNC_TRACE();
1346 return sym_session_clear(dev->driver_id, sess);
1350 otx2_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
1352 return sizeof(struct cpt_asym_sess_misc);
1356 otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
1357 struct rte_crypto_asym_xform *xform,
1358 struct rte_cryptodev_asym_session *sess,
1359 struct rte_mempool *pool)
1361 struct cpt_asym_sess_misc *priv;
1362 vq_cmd_word3_t vq_cmd_w3;
1365 CPT_PMD_INIT_FUNC_TRACE();
1367 if (rte_mempool_get(pool, (void **)&priv)) {
1368 CPT_LOG_ERR("Could not allocate session_private_data");
1372 memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
1374 ret = cpt_fill_asym_session_parameters(priv, xform);
1376 CPT_LOG_ERR("Could not configure session parameters");
1378 /* Return session to mempool */
1379 rte_mempool_put(pool, priv);
1384 vq_cmd_w3.s.grp = OTX2_CPT_EGRP_AE;
1385 priv->cpt_inst_w7 = vq_cmd_w3.u64;
1387 set_asym_session_private_data(sess, dev->driver_id, priv);
1393 otx2_cpt_asym_session_clear(struct rte_cryptodev *dev,
1394 struct rte_cryptodev_asym_session *sess)
1396 struct cpt_asym_sess_misc *priv;
1397 struct rte_mempool *sess_mp;
1399 CPT_PMD_INIT_FUNC_TRACE();
1401 priv = get_asym_session_private_data(sess, dev->driver_id);
1405 /* Free resources allocated in session_cfg */
1406 cpt_free_asym_session_parameters(priv);
1408 /* Reset and free object back to pool */
1409 memset(priv, 0, otx2_cpt_asym_session_size_get(dev));
1410 sess_mp = rte_mempool_from_obj(priv);
1411 set_asym_session_private_data(sess, dev->driver_id, NULL);
1412 rte_mempool_put(sess_mp, priv);
1415 struct rte_cryptodev_ops otx2_cpt_ops = {
1416 /* Device control ops */
1417 .dev_configure = otx2_cpt_dev_config,
1418 .dev_start = otx2_cpt_dev_start,
1419 .dev_stop = otx2_cpt_dev_stop,
1420 .dev_close = otx2_cpt_dev_close,
1421 .dev_infos_get = otx2_cpt_dev_info_get,
1424 .stats_reset = NULL,
1425 .queue_pair_setup = otx2_cpt_queue_pair_setup,
1426 .queue_pair_release = otx2_cpt_queue_pair_release,
1428 /* Symmetric crypto ops */
1429 .sym_session_get_size = otx2_cpt_sym_session_get_size,
1430 .sym_session_configure = otx2_cpt_sym_session_configure,
1431 .sym_session_clear = otx2_cpt_sym_session_clear,
1433 /* Asymmetric crypto ops */
1434 .asym_session_get_size = otx2_cpt_asym_session_size_get,
1435 .asym_session_configure = otx2_cpt_asym_session_cfg,
1436 .asym_session_clear = otx2_cpt_asym_session_clear,