0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
};
-static __rte_always_inline int
-cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
-{
- /*
- * Microcode only supports the following combination.
- * Encryption followed by authentication
- * Authentication followed by decryption
- */
- if (xform->next) {
- if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
- (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
- (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
- /* Unsupported as of now by microcode */
- CPT_LOG_DP_ERR("Unsupported combination");
- return -1;
- }
- if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
- (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
- (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
- /* For GMAC auth there is no cipher operation */
- if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
- xform->next->auth.algo !=
- RTE_CRYPTO_AUTH_AES_GMAC) {
- /* Unsupported as of now by microcode */
- CPT_LOG_DP_ERR("Unsupported combination");
- return -1;
- }
- }
- }
- return 0;
-}
-
static __rte_always_inline void
gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
{
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
-static __rte_always_inline int
-instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
-{
- struct rte_crypto_sym_xform *chain;
-
- CPT_PMD_INIT_FUNC_TRACE();
-
- if (cpt_is_algo_supported(xform))
- goto err;
-
- chain = xform;
- while (chain) {
- switch (chain->type) {
- case RTE_CRYPTO_SYM_XFORM_AEAD:
- if (fill_sess_aead(chain, sess))
- goto err;
- break;
- case RTE_CRYPTO_SYM_XFORM_CIPHER:
- if (fill_sess_cipher(chain, sess))
- goto err;
- break;
- case RTE_CRYPTO_SYM_XFORM_AUTH:
- if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
- if (fill_sess_gmac(chain, sess))
- goto err;
- } else {
- if (fill_sess_auth(chain, sess))
- goto err;
- }
- break;
- default:
- CPT_LOG_DP_ERR("Invalid crypto xform type");
- break;
- }
- chain = chain->next;
- }
-
- return 0;
-
-err:
- return -1;
-}
-
static __rte_always_inline void
find_kasumif9_direction_and_length(uint8_t *src,
uint32_t counter_num_bytes,
return cpt_get_session_size();
}
-static void
-otx_cpt_session_init(void *sym_sess, uint8_t driver_id)
+static int
+sym_xform_verify(struct rte_crypto_sym_xform *xform)
{
- struct rte_cryptodev_sym_session *sess = sym_sess;
- struct cpt_sess_misc *cpt_sess =
- (struct cpt_sess_misc *) get_sym_session_private_data(sess, driver_id);
+ if (xform->next) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
+ return -ENOTSUP;
- CPT_PMD_INIT_FUNC_TRACE();
- cpt_sess->ctx_dma_addr = rte_mempool_virt2iova(cpt_sess) +
- sizeof(struct cpt_sess_misc);
+ } else {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ return -ENOTSUP;
+ }
+ return 0;
}
static int
-otx_cpt_session_cfg(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct rte_cryptodev_sym_session *sess,
- struct rte_mempool *mempool)
+sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *pool)
{
- struct rte_crypto_sym_xform *chain;
- void *sess_private_data = NULL;
-
- CPT_PMD_INIT_FUNC_TRACE();
-
- if (cpt_is_algo_supported(xform))
- goto err;
+ struct cpt_sess_misc *misc;
+ void *priv;
+ int ret;
- if (unlikely(sess == NULL)) {
- CPT_LOG_ERR("invalid session struct");
- return -EINVAL;
- }
+ ret = sym_xform_verify(xform);
+ if (unlikely(ret))
+ return ret;
- if (rte_mempool_get(mempool, &sess_private_data)) {
- CPT_LOG_ERR("Could not allocate sess_private_data");
+ if (unlikely(rte_mempool_get(pool, &priv))) {
+ CPT_LOG_ERR("Could not allocate session private data");
return -ENOMEM;
}
- chain = xform;
- while (chain) {
- switch (chain->type) {
+ misc = priv;
+
+ for ( ; xform != NULL; xform = xform->next) {
+ switch (xform->type) {
case RTE_CRYPTO_SYM_XFORM_AEAD:
- if (fill_sess_aead(chain, sess_private_data))
- goto err;
+ ret = fill_sess_aead(xform, misc);
break;
case RTE_CRYPTO_SYM_XFORM_CIPHER:
- if (fill_sess_cipher(chain, sess_private_data))
- goto err;
+ ret = fill_sess_cipher(xform, misc);
break;
case RTE_CRYPTO_SYM_XFORM_AUTH:
- if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
- if (fill_sess_gmac(chain, sess_private_data))
- goto err;
- } else {
- if (fill_sess_auth(chain, sess_private_data))
- goto err;
- }
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
+ ret = fill_sess_gmac(xform, misc);
+ else
+ ret = fill_sess_auth(xform, misc);
break;
default:
- CPT_LOG_ERR("Invalid crypto xform type");
- break;
+ ret = -1;
}
- chain = chain->next;
+
+ if (ret)
+ goto priv_put;
}
- set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
- otx_cpt_session_init(sess, dev->driver_id);
+
+ set_sym_session_private_data(sess, driver_id, priv);
+
+ misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
+ sizeof(struct cpt_sess_misc);
+
return 0;
-err:
- if (sess_private_data)
- rte_mempool_put(mempool, sess_private_data);
- return -EPERM;
+priv_put:
+ if (priv)
+ rte_mempool_put(pool, priv);
+ return -ENOTSUP;
}
+static void
+sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
+{
+ void *priv = get_sym_session_private_data(sess, driver_id);
+ struct rte_mempool *pool;
+
+ if (priv == NULL)
+ return;
+
+ memset(priv, 0, cpt_get_session_size());
+
+ pool = rte_mempool_from_obj(priv);
+
+ set_sym_session_private_data(sess, driver_id, NULL);
+
+ rte_mempool_put(pool, priv);
+}
+
+static int
+otx_cpt_session_cfg(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *pool)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ return sym_session_configure(dev->driver_id, xform, sess, pool);
+}
+
+
static void
otx_cpt_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
- void *sess_priv = get_sym_session_private_data(sess, dev->driver_id);
-
CPT_PMD_INIT_FUNC_TRACE();
- if (sess_priv) {
- memset(sess_priv, 0, otx_cpt_get_session_size(dev));
- struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_sym_session_private_data(sess, dev->driver_id, NULL);
- rte_mempool_put(sess_mp, sess_priv);
- }
+
+ return sym_session_clear(dev->driver_id, sess);
}
static unsigned int
static __rte_always_inline int __rte_hot
otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
struct rte_crypto_op *op,
- struct pending_queue *pqueue)
+ struct pending_queue *pend_q)
{
- struct cpt_sess_misc *sess;
+ const int driver_id = otx_cryptodev_driver_id;
struct rte_crypto_sym_op *sym_op = op->sym;
+ struct rte_cryptodev_sym_session *sess;
int ret;
- void *sess_t = NULL;
- void *sess_private_data_t = NULL;
-
- /* Create tmp session */
- if (rte_mempool_get(instance->sess_mp, (void **)&sess_t)) {
- ret = -ENOMEM;
- goto exit;
- }
+ /* Create temporary session */
- if (rte_mempool_get(instance->sess_mp_priv,
- (void **)&sess_private_data_t)) {
- ret = -ENOMEM;
- goto free_sess;
- }
-
- sess = (struct cpt_sess_misc *)sess_private_data_t;
-
- sess->ctx_dma_addr = rte_mempool_virt2iova(sess) +
- sizeof(struct cpt_sess_misc);
-
- ret = instance_session_cfg(sym_op->xform, (void *)sess);
- if (unlikely(ret)) {
- ret = -EINVAL;
- goto free_sess_priv;
- }
+ if (rte_mempool_get(instance->sess_mp, (void **)&sess))
+ return -ENOMEM;
- /* Save tmp session in op */
+ ret = sym_session_configure(driver_id, sym_op->xform, sess,
+ instance->sess_mp_priv);
+ if (ret)
+ goto sess_put;
- sym_op->session = (struct rte_cryptodev_sym_session *)sess_t;
- set_sym_session_private_data(sym_op->session, otx_cryptodev_driver_id,
- sess_private_data_t);
+ sym_op->session = sess;
- /* Enqueue op with the tmp session set */
- ret = otx_cpt_enq_single_sym(instance, op, pqueue);
+ ret = otx_cpt_enq_single_sym(instance, op, pend_q);
if (unlikely(ret))
- goto free_sess_priv;
+ goto priv_put;
return 0;
-free_sess_priv:
- rte_mempool_put(instance->sess_mp_priv, sess_private_data_t);
-free_sess:
- rte_mempool_put(instance->sess_mp, sess_t);
-exit:
+priv_put:
+ sym_session_clear(driver_id, sess);
+sess_put:
+ rte_mempool_put(instance->sess_mp, sess);
return ret;
}