*/
#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
#include <rte_errno.h>
#include "roc_cpt.h"
+#include "cnxk_ae.h"
#include "cnxk_cryptodev.h"
#include "cnxk_cryptodev_ops.h"
+#include "cnxk_cryptodev_capabilities.h"
#include "cnxk_se.h"
+#define CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS 5
+#define CNXK_CPT_MAX_ASYM_OP_MOD_LEN 1024
+
static int
cnxk_cpt_get_mlen(void)
{
return len;
}
+static int
+cnxk_cpt_asym_get_mlen(void)
+{
+ uint32_t len;
+
+ /* To hold RPTR */
+ len = sizeof(uint64_t);
+
+ /* Get meta len for asymmetric operations */
+ len += CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS * CNXK_CPT_MAX_ASYM_OP_MOD_LEN;
+
+ return len;
+}
+
int
cnxk_cpt_dev_config(struct rte_cryptodev *dev,
struct rte_cryptodev_config *conf)
uint16_t nb_lf_avail, nb_lf;
int ret;
- dev->feature_flags &= ~conf->ff_disable;
+ dev->feature_flags = cnxk_cpt_default_ff_get() & ~conf->ff_disable;
nb_lf_avail = roc_cpt->nb_lf_avail;
nb_lf = conf->nb_queue_pairs;
return ret;
}
+ if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+ /* Initialize shared FPM table */
+ ret = roc_ae_fpm_get(vf->cnxk_fpm_iova);
+ if (ret) {
+ plt_err("Could not get FPM table");
+ return ret;
+ }
+
+ /* Init EC grp table */
+ ret = roc_ae_ec_grp_get(vf->ec_grp);
+ if (ret) {
+ plt_err("Could not get EC grp table");
+ roc_ae_fpm_put();
+ return ret;
+ }
+ }
+
return 0;
}
int
cnxk_cpt_dev_start(struct rte_cryptodev *dev)
{
- RTE_SET_USED(dev);
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ struct roc_cpt *roc_cpt = &vf->cpt;
+ uint16_t nb_lf = roc_cpt->nb_lf;
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < nb_lf; qp_id++) {
+ /* Application may not setup all queue pair */
+ if (roc_cpt->lf[qp_id] == NULL)
+ continue;
+
+ roc_cpt_iq_enable(roc_cpt->lf[qp_id]);
+ }
return 0;
}
void
cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
{
- RTE_SET_USED(dev);
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ struct roc_cpt *roc_cpt = &vf->cpt;
+ uint16_t nb_lf = roc_cpt->nb_lf;
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < nb_lf; qp_id++) {
+ if (roc_cpt->lf[qp_id] == NULL)
+ continue;
+
+ roc_cpt_iq_disable(roc_cpt->lf[qp_id]);
+ }
}
int
}
}
+ if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+ roc_ae_fpm_put();
+ roc_ae_ec_grp_put();
+ }
+
roc_cpt_dev_clear(&vf->cpt);
return 0;
struct cnxk_cpt_vf *vf = dev->data->dev_private;
struct roc_cpt *roc_cpt = &vf->cpt;
- info->max_nb_queue_pairs = roc_cpt->nb_lf_avail;
- info->feature_flags = dev->feature_flags;
- info->capabilities = NULL;
+ info->max_nb_queue_pairs =
+ RTE_MIN(roc_cpt->nb_lf_avail, vf->max_qps_limit);
+ plt_cpt_dbg("max_nb_queue_pairs %u", info->max_nb_queue_pairs);
+
+ info->feature_flags = cnxk_cpt_default_ff_get();
+ info->capabilities = cnxk_crypto_capabilities_get(vf);
info->sym.max_nb_sessions = 0;
info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
- info->min_mbuf_tailroom_req = 0;
+ info->min_mbuf_tailroom_req = CNXK_CPT_MIN_TAILROOM_REQ;
}
static void
{
char mempool_name[RTE_MEMPOOL_NAMESIZE];
struct cpt_qp_meta_info *meta_info;
+ int lcore_cnt = rte_lcore_count();
struct rte_mempool *pool;
+ int mb_pool_sz, mlen = 8;
uint32_t cache_sz;
- int mlen = 8;
if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
/* Get meta len */
mlen = cnxk_cpt_get_mlen();
}
+ if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+
+ /* Get meta len required for asymmetric operations */
+ mlen = RTE_MAX(mlen, cnxk_cpt_asym_get_mlen());
+ }
+
+ mb_pool_sz = nb_elements;
cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
+ /* For poll mode, core that enqueues and core that dequeues can be
+ * different. For event mode, all cores are allowed to use same crypto
+ * queue pair.
+ */
+
+ mb_pool_sz += (RTE_MAX(2, lcore_cnt) * cache_sz);
+
/* Allocate mempool */
snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
dev->data->dev_id, qp_id);
- pool = rte_mempool_create(mempool_name, nb_elements, mlen, cache_sz, 0,
+ pool = rte_mempool_create(mempool_name, mb_pool_sz, mlen, cache_sz, 0,
NULL, NULL, NULL, NULL, rte_socket_id(), 0);
if (pool == NULL) {
/* Initialize pending queue */
qp->pend_q.req_queue = pq_mem->addr;
- qp->pend_q.enq_tail = 0;
- qp->pend_q.deq_head = 0;
- qp->pend_q.pending_count = 0;
+ qp->pend_q.head = 0;
+ qp->pend_q.tail = 0;
return qp;
goto exit;
}
+ qp->pend_q.pq_mask = qp->lf.nb_desc - 1;
+
roc_cpt->lf[qp_id] = &qp->lf;
ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
}
static int
-sym_xform_verify(struct rte_crypto_sym_xform *xform)
+cnxk_sess_fill(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
{
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
- xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
- return -ENOTSUP;
+ struct rte_crypto_sym_xform *aead_xfrm = NULL;
+ struct rte_crypto_sym_xform *c_xfrm = NULL;
+ struct rte_crypto_sym_xform *a_xfrm = NULL;
+ bool ciph_then_auth = false;
+
+ if (xform == NULL)
+ return -EINVAL;
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
- return CNXK_CPT_CIPHER;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ c_xfrm = xform;
+ a_xfrm = xform->next;
+ ciph_then_auth = true;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ c_xfrm = xform->next;
+ a_xfrm = xform;
+ ciph_then_auth = false;
+ } else {
+ aead_xfrm = xform;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
- return CNXK_CPT_AUTH;
+ if (c_xfrm != NULL && c_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ plt_dp_err("Invalid type in cipher xform");
+ return -EINVAL;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
- return CNXK_CPT_AEAD;
+ if (a_xfrm != NULL && a_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+ plt_dp_err("Invalid type in auth xform");
+ return -EINVAL;
+ }
- if (xform->next == NULL)
- return -EIO;
+ if (aead_xfrm != NULL && aead_xfrm->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
+ plt_dp_err("Invalid type in AEAD xform");
+ return -EINVAL;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
+ if ((c_xfrm == NULL || c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL) &&
+ a_xfrm != NULL && a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ a_xfrm->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ plt_dp_err("Null cipher + null auth verify is not supported");
return -ENOTSUP;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
+ /* Cipher only */
+ if (c_xfrm != NULL &&
+ (a_xfrm == NULL || a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)) {
+ if (fill_sess_cipher(c_xfrm, sess))
+ return -ENOTSUP;
+ else
+ return 0;
+ }
+
+ /* Auth only */
+ if (a_xfrm != NULL &&
+ (c_xfrm == NULL || c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL)) {
+ if (fill_sess_auth(a_xfrm, sess))
+ return -ENOTSUP;
+ else
+ return 0;
+ }
+
+ /* AEAD */
+ if (aead_xfrm != NULL) {
+ if (fill_sess_aead(aead_xfrm, sess))
+ return -ENOTSUP;
+ else
+ return 0;
+ }
+
+ /* Chained ops */
+ if (c_xfrm == NULL || a_xfrm == NULL) {
+ plt_dp_err("Invalid xforms");
+ return -EINVAL;
+ }
+
+ if (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
+ a_xfrm->auth.algo == RTE_CRYPTO_AUTH_SHA1) {
+ plt_dp_err("3DES-CBC + SHA1 is not supported");
return -ENOTSUP;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
- return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
- return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- switch (xform->auth.algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- switch (xform->next->cipher.algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
- default:
- return -ENOTSUP;
- }
- default:
+ /* Cipher then auth */
+ if (ciph_then_auth) {
+ if (fill_sess_cipher(c_xfrm, sess))
return -ENOTSUP;
- }
+ if (fill_sess_auth(a_xfrm, sess))
+ return -ENOTSUP;
+ else
+ return 0;
}
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- switch (xform->cipher.algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- switch (xform->next->auth.algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
+ /* else */
+
+ if (c_xfrm->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ switch (a_xfrm->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ switch (c_xfrm->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ break;
default:
return -ENOTSUP;
}
+ break;
default:
return -ENOTSUP;
}
}
- return -ENOTSUP;
+ if (fill_sess_auth(a_xfrm, sess))
+ return -ENOTSUP;
+ if (fill_sess_cipher(c_xfrm, sess))
+ return -ENOTSUP;
+ else
+ return 0;
}
static uint64_t
void *priv;
int ret;
- ret = sym_xform_verify(xform);
- if (unlikely(ret < 0))
- return ret;
-
if (unlikely(rte_mempool_get(pool, &priv))) {
plt_dp_err("Could not allocate session private data");
return -ENOMEM;
sess_priv = priv;
- switch (ret) {
- case CNXK_CPT_CIPHER:
- ret = fill_sess_cipher(xform, sess_priv);
- break;
- case CNXK_CPT_AUTH:
- if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
- ret = fill_sess_gmac(xform, sess_priv);
- else
- ret = fill_sess_auth(xform, sess_priv);
- break;
- case CNXK_CPT_AEAD:
- ret = fill_sess_aead(xform, sess_priv);
- break;
- default:
- ret = -1;
- }
-
+ ret = cnxk_sess_fill(xform, sess_priv);
if (ret)
goto priv_put;
if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
cpt_mac_len_verify(&xform->auth)) {
plt_dp_err("MAC length is not supported");
+ if (sess_priv->roc_se_ctx.auth_key != NULL) {
+ plt_free(sess_priv->roc_se_ctx.auth_key);
+ sess_priv->roc_se_ctx.auth_key = NULL;
+ }
+
ret = -ENOTSUP;
goto priv_put;
}
priv_put:
rte_mempool_put(pool, priv);
- return -ENOTSUP;
+ return ret;
}
int
sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
{
void *priv = get_sym_session_private_data(sess, driver_id);
+ struct cnxk_se_sess *sess_priv;
struct rte_mempool *pool;
if (priv == NULL)
return;
+ sess_priv = priv;
+
+ if (sess_priv->roc_se_ctx.auth_key != NULL)
+ plt_free(sess_priv->roc_se_ctx.auth_key);
+
memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
pool = rte_mempool_from_obj(priv);
{
return sym_session_clear(dev->driver_id, sess);
}
+
+unsigned int
+cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct cnxk_ae_sess);
+}
+
+void
+cnxk_ae_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess)
+{
+ struct rte_mempool *sess_mp;
+ struct cnxk_ae_sess *priv;
+
+ priv = get_asym_session_private_data(sess, dev->driver_id);
+ if (priv == NULL)
+ return;
+
+ /* Free resources allocated in session_cfg */
+ cnxk_ae_free_session_parameters(priv);
+
+ /* Reset and free object back to pool */
+ memset(priv, 0, cnxk_ae_session_size_get(dev));
+ sess_mp = rte_mempool_from_obj(priv);
+ set_asym_session_private_data(sess, dev->driver_id, NULL);
+ rte_mempool_put(sess_mp, priv);
+}
+
+int
+cnxk_ae_session_cfg(struct rte_cryptodev *dev,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *pool)
+{
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ struct roc_cpt *roc_cpt = &vf->cpt;
+ struct cnxk_ae_sess *priv;
+ union cpt_inst_w7 w7;
+ int ret;
+
+ if (rte_mempool_get(pool, (void **)&priv))
+ return -ENOMEM;
+
+ memset(priv, 0, sizeof(struct cnxk_ae_sess));
+
+ ret = cnxk_ae_fill_session_parameters(priv, xform);
+ if (ret) {
+ rte_mempool_put(pool, priv);
+ return ret;
+ }
+
+ w7.u64 = 0;
+ w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_AE];
+ priv->cpt_inst_w7 = w7.u64;
+ priv->cnxk_fpm_iova = vf->cnxk_fpm_iova;
+ priv->ec_grp = vf->ec_grp;
+ set_asym_session_private_data(sess, dev->driver_id, priv);
+
+ return 0;
+}
+
+void
+cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp)
+{
+ struct pending_queue *pend_q = &qp->pend_q;
+ uint64_t inflight, enq_ptr, deq_ptr, insts;
+ union cpt_lf_q_inst_ptr inst_ptr;
+ union cpt_lf_inprog lf_inprog;
+
+ plt_print("Lcore ID: %d, LF/QP ID: %d", rte_lcore_id(), qp->lf.lf_id);
+ plt_print("");
+ plt_print("S/w pending queue:");
+ plt_print("\tHead: %"PRIu64"", pend_q->head);
+ plt_print("\tTail: %"PRIu64"", pend_q->tail);
+ plt_print("\tMask: 0x%"PRIx64"", pend_q->pq_mask);
+ plt_print("\tInflight count: %"PRIu64"",
+ pending_queue_infl_cnt(pend_q->head, pend_q->tail,
+ pend_q->pq_mask));
+
+ plt_print("");
+ plt_print("H/w pending queue:");
+
+ lf_inprog.u = plt_read64(qp->lf.rbase + CPT_LF_INPROG);
+ inflight = lf_inprog.s.inflight;
+ plt_print("\tInflight in engines: %"PRIu64"", inflight);
+
+ inst_ptr.u = plt_read64(qp->lf.rbase + CPT_LF_Q_INST_PTR);
+
+ enq_ptr = inst_ptr.s.nq_ptr;
+ deq_ptr = inst_ptr.s.dq_ptr;
+
+ if (enq_ptr >= deq_ptr)
+ insts = enq_ptr - deq_ptr;
+ else
+ insts = (enq_ptr + pend_q->pq_mask + 1 + 320 + 40) - deq_ptr;
+
+ plt_print("\tNQ ptr: 0x%"PRIx64"", enq_ptr);
+ plt_print("\tDQ ptr: 0x%"PRIx64"", deq_ptr);
+ plt_print("Insts waiting in CPT: %"PRIu64"", insts);
+
+ plt_print("");
+ roc_cpt_afs_print(qp->lf.roc_cpt);
+}