*/
#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
#include <rte_errno.h>
#include "roc_cpt.h"
uint16_t nb_lf_avail, nb_lf;
int ret;
- dev->feature_flags &= ~conf->ff_disable;
+ dev->feature_flags = cnxk_cpt_default_ff_get() & ~conf->ff_disable;
nb_lf_avail = roc_cpt->nb_lf_avail;
nb_lf = conf->nb_queue_pairs;
int
cnxk_cpt_dev_start(struct rte_cryptodev *dev)
{
- RTE_SET_USED(dev);
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ struct roc_cpt *roc_cpt = &vf->cpt;
+ uint16_t nb_lf = roc_cpt->nb_lf;
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < nb_lf; qp_id++) {
+ /* Application may not setup all queue pair */
+ if (roc_cpt->lf[qp_id] == NULL)
+ continue;
+
+ roc_cpt_iq_enable(roc_cpt->lf[qp_id]);
+ }
return 0;
}
void
cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
{
- RTE_SET_USED(dev);
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ struct roc_cpt *roc_cpt = &vf->cpt;
+ uint16_t nb_lf = roc_cpt->nb_lf;
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < nb_lf; qp_id++) {
+ if (roc_cpt->lf[qp_id] == NULL)
+ continue;
+
+ roc_cpt_iq_disable(roc_cpt->lf[qp_id]);
+ }
}
int
struct cnxk_cpt_vf *vf = dev->data->dev_private;
struct roc_cpt *roc_cpt = &vf->cpt;
- info->max_nb_queue_pairs = roc_cpt->nb_lf_avail;
- info->feature_flags = dev->feature_flags;
+ info->max_nb_queue_pairs =
+ RTE_MIN(roc_cpt->nb_lf_avail, vf->max_qps_limit);
+ plt_cpt_dbg("max_nb_queue_pairs %u", info->max_nb_queue_pairs);
+
+ info->feature_flags = cnxk_cpt_default_ff_get();
info->capabilities = cnxk_crypto_capabilities_get(vf);
info->sym.max_nb_sessions = 0;
info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
- info->min_mbuf_tailroom_req = 0;
+ info->min_mbuf_tailroom_req = CNXK_CPT_MIN_TAILROOM_REQ;
}
static void
{
char mempool_name[RTE_MEMPOOL_NAMESIZE];
struct cpt_qp_meta_info *meta_info;
+ int lcore_cnt = rte_lcore_count();
struct rte_mempool *pool;
+ int mb_pool_sz, mlen = 8;
uint32_t cache_sz;
- int mlen = 8;
if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
/* Get meta len */
mlen = RTE_MAX(mlen, cnxk_cpt_asym_get_mlen());
}
+ mb_pool_sz = nb_elements;
cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
+ /* For poll mode, core that enqueues and core that dequeues can be
+ * different. For event mode, all cores are allowed to use same crypto
+ * queue pair.
+ */
+
+ mb_pool_sz += (RTE_MAX(2, lcore_cnt) * cache_sz);
+
/* Allocate mempool */
snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
dev->data->dev_id, qp_id);
- pool = rte_mempool_create(mempool_name, nb_elements, mlen, cache_sz, 0,
+ pool = rte_mempool_create(mempool_name, mb_pool_sz, mlen, cache_sz, 0,
NULL, NULL, NULL, NULL, rte_socket_id(), 0);
if (pool == NULL) {
/* Initialize pending queue */
qp->pend_q.req_queue = pq_mem->addr;
- qp->pend_q.enq_tail = 0;
- qp->pend_q.deq_head = 0;
- qp->pend_q.pending_count = 0;
+ qp->pend_q.head = 0;
+ qp->pend_q.tail = 0;
return qp;
goto exit;
}
+ qp->pend_q.pq_mask = qp->lf.nb_desc - 1;
+
roc_cpt->lf[qp_id] = &qp->lf;
ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
}
static int
-sym_xform_verify(struct rte_crypto_sym_xform *xform)
+cnxk_sess_fill(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
{
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
- xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
- return -ENOTSUP;
+ struct rte_crypto_sym_xform *aead_xfrm = NULL;
+ struct rte_crypto_sym_xform *c_xfrm = NULL;
+ struct rte_crypto_sym_xform *a_xfrm = NULL;
+ bool ciph_then_auth = false;
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
- return CNXK_CPT_CIPHER;
+ if (xform == NULL)
+ return -EINVAL;
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
- return CNXK_CPT_AUTH;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ c_xfrm = xform;
+ a_xfrm = xform->next;
+ ciph_then_auth = true;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ c_xfrm = xform->next;
+ a_xfrm = xform;
+ ciph_then_auth = false;
+ } else {
+ aead_xfrm = xform;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
- return CNXK_CPT_AEAD;
+ if (c_xfrm != NULL && c_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ plt_dp_err("Invalid type in cipher xform");
+ return -EINVAL;
+ }
- if (xform->next == NULL)
- return -EIO;
+ if (a_xfrm != NULL && a_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+ plt_dp_err("Invalid type in auth xform");
+ return -EINVAL;
+ }
+
+ if (aead_xfrm != NULL && aead_xfrm->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
+ plt_dp_err("Invalid type in AEAD xform");
+ return -EINVAL;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
+ if ((c_xfrm == NULL || c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL) &&
+ a_xfrm != NULL && a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ a_xfrm->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ plt_dp_err("Null cipher + null auth verify is not supported");
return -ENOTSUP;
+ }
+
+ /* Cipher only */
+ if (c_xfrm != NULL &&
+ (a_xfrm == NULL || a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)) {
+ if (fill_sess_cipher(c_xfrm, sess))
+ return -ENOTSUP;
+ else
+ return 0;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
+ /* Auth only */
+ if (a_xfrm != NULL &&
+ (c_xfrm == NULL || c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL)) {
+ if (fill_sess_auth(a_xfrm, sess))
+ return -ENOTSUP;
+ else
+ return 0;
+ }
+
+ /* AEAD */
+ if (aead_xfrm != NULL) {
+ if (fill_sess_aead(aead_xfrm, sess))
+ return -ENOTSUP;
+ else
+ return 0;
+ }
+
+ /* Chained ops */
+ if (c_xfrm == NULL || a_xfrm == NULL) {
+ plt_dp_err("Invalid xforms");
+ return -EINVAL;
+ }
+
+ if (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
+ a_xfrm->auth.algo == RTE_CRYPTO_AUTH_SHA1) {
+ plt_dp_err("3DES-CBC + SHA1 is not supported");
return -ENOTSUP;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
- return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
- return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- switch (xform->auth.algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- switch (xform->next->cipher.algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
- default:
- return -ENOTSUP;
- }
- default:
+ /* Cipher then auth */
+ if (ciph_then_auth) {
+ if (fill_sess_cipher(c_xfrm, sess))
return -ENOTSUP;
- }
+ if (fill_sess_auth(a_xfrm, sess))
+ return -ENOTSUP;
+ else
+ return 0;
}
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- switch (xform->cipher.algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- switch (xform->next->auth.algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
+ /* else */
+
+ if (c_xfrm->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ switch (a_xfrm->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ switch (c_xfrm->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ break;
default:
return -ENOTSUP;
}
+ break;
default:
return -ENOTSUP;
}
}
- return -ENOTSUP;
+ if (fill_sess_auth(a_xfrm, sess))
+ return -ENOTSUP;
+ if (fill_sess_cipher(c_xfrm, sess))
+ return -ENOTSUP;
+ else
+ return 0;
}
static uint64_t
void *priv;
int ret;
- ret = sym_xform_verify(xform);
- if (unlikely(ret < 0))
- return ret;
-
if (unlikely(rte_mempool_get(pool, &priv))) {
plt_dp_err("Could not allocate session private data");
return -ENOMEM;
sess_priv = priv;
- switch (ret) {
- case CNXK_CPT_CIPHER:
- ret = fill_sess_cipher(xform, sess_priv);
- break;
- case CNXK_CPT_AUTH:
- if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
- ret = fill_sess_gmac(xform, sess_priv);
- else
- ret = fill_sess_auth(xform, sess_priv);
- break;
- case CNXK_CPT_AEAD:
- ret = fill_sess_aead(xform, sess_priv);
- break;
- case CNXK_CPT_CIPHER_ENC_AUTH_GEN:
- case CNXK_CPT_CIPHER_DEC_AUTH_VRFY:
- ret = fill_sess_cipher(xform, sess_priv);
- if (ret < 0)
- break;
- ret = fill_sess_auth(xform->next, sess_priv);
- break;
- case CNXK_CPT_AUTH_VRFY_CIPHER_DEC:
- case CNXK_CPT_AUTH_GEN_CIPHER_ENC:
- ret = fill_sess_auth(xform, sess_priv);
- if (ret < 0)
- break;
- ret = fill_sess_cipher(xform->next, sess_priv);
- break;
- default:
- ret = -1;
- }
-
+ ret = cnxk_sess_fill(xform, sess_priv);
if (ret)
goto priv_put;
if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
cpt_mac_len_verify(&xform->auth)) {
plt_dp_err("MAC length is not supported");
+ if (sess_priv->roc_se_ctx.auth_key != NULL) {
+ plt_free(sess_priv->roc_se_ctx.auth_key);
+ sess_priv->roc_se_ctx.auth_key = NULL;
+ }
+
ret = -ENOTSUP;
goto priv_put;
}
priv_put:
rte_mempool_put(pool, priv);
- return -ENOTSUP;
+ return ret;
}
int
sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
{
void *priv = get_sym_session_private_data(sess, driver_id);
+ struct cnxk_se_sess *sess_priv;
struct rte_mempool *pool;
if (priv == NULL)
return;
+ sess_priv = priv;
+
+ if (sess_priv->roc_se_ctx.auth_key != NULL)
+ plt_free(sess_priv->roc_se_ctx.auth_key);
+
memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
pool = rte_mempool_from_obj(priv);
return 0;
}
+
+void
+cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp)
+{
+ struct pending_queue *pend_q = &qp->pend_q;
+ uint64_t inflight, enq_ptr, deq_ptr, insts;
+ union cpt_lf_q_inst_ptr inst_ptr;
+ union cpt_lf_inprog lf_inprog;
+
+ plt_print("Lcore ID: %d, LF/QP ID: %d", rte_lcore_id(), qp->lf.lf_id);
+ plt_print("");
+ plt_print("S/w pending queue:");
+ plt_print("\tHead: %"PRIu64"", pend_q->head);
+ plt_print("\tTail: %"PRIu64"", pend_q->tail);
+ plt_print("\tMask: 0x%"PRIx64"", pend_q->pq_mask);
+ plt_print("\tInflight count: %"PRIu64"",
+ pending_queue_infl_cnt(pend_q->head, pend_q->tail,
+ pend_q->pq_mask));
+
+ plt_print("");
+ plt_print("H/w pending queue:");
+
+ lf_inprog.u = plt_read64(qp->lf.rbase + CPT_LF_INPROG);
+ inflight = lf_inprog.s.inflight;
+ plt_print("\tInflight in engines: %"PRIu64"", inflight);
+
+ inst_ptr.u = plt_read64(qp->lf.rbase + CPT_LF_Q_INST_PTR);
+
+ enq_ptr = inst_ptr.s.nq_ptr;
+ deq_ptr = inst_ptr.s.dq_ptr;
+
+ if (enq_ptr >= deq_ptr)
+ insts = enq_ptr - deq_ptr;
+ else
+ insts = (enq_ptr + pend_q->pq_mask + 1 + 320 + 40) - deq_ptr;
+
+ plt_print("\tNQ ptr: 0x%"PRIx64"", enq_ptr);
+ plt_print("\tDQ ptr: 0x%"PRIx64"", deq_ptr);
+ plt_print("Insts waiting in CPT: %"PRIu64"", insts);
+
+ plt_print("");
+ roc_cpt_afs_print(qp->lf.roc_cpt);
+}