#ifndef _CPT_COMMON_H_
#define _CPT_COMMON_H_
+#include <rte_mempool.h>
+
/*
* This file defines common macros and structs
*/
#define MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
-struct cptvf_meta_info {
- void *cptvf_meta_pool;
- int cptvf_op_mlen;
- int cptvf_op_sb_mlen;
+struct cpt_qp_meta_info {
+ struct rte_mempool *pool;
+ int sg_mlen;
+ int lb_mlen;
};
struct rid {
static __rte_always_inline int
fill_fc_params(struct rte_crypto_op *cop,
struct cpt_sess_misc *sess_misc,
- struct cptvf_meta_info *cpt_m_info,
+ struct cpt_qp_meta_info *m_info,
void **mdata_ptr,
void **prep_req)
{
}
if (likely(flags & SINGLE_BUF_HEADTAILROOM))
- mdata = alloc_op_meta(m_src,
- &fc_params.meta_buf,
- cpt_m_info->cptvf_op_sb_mlen,
- cpt_m_info->cptvf_meta_pool);
+ mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
+ m_info->lb_mlen, m_info->pool);
else
- mdata = alloc_op_meta(NULL,
- &fc_params.meta_buf,
- cpt_m_info->cptvf_op_mlen,
- cpt_m_info->cptvf_meta_pool);
+ mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
+ m_info->sg_mlen, m_info->pool);
if (unlikely(mdata == NULL)) {
CPT_LOG_DP_ERR("Error allocating meta buffer for request");
return 0;
free_mdata_and_exit:
- free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
+ free_op_meta(mdata, m_info->pool);
err_exit:
return ret;
}
static __rte_always_inline int
fill_digest_params(struct rte_crypto_op *cop,
struct cpt_sess_misc *sess,
- struct cptvf_meta_info *cpt_m_info,
+ struct cpt_qp_meta_info *m_info,
void **mdata_ptr,
void **prep_req)
{
m_src = sym_op->m_src;
/* For just digest lets force mempool alloc */
- mdata = alloc_op_meta(NULL, ¶ms.meta_buf, cpt_m_info->cptvf_op_mlen,
- cpt_m_info->cptvf_meta_pool);
+ mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
+ m_info->pool);
if (mdata == NULL) {
ret = -ENOMEM;
goto err_exit;
return 0;
free_mdata_and_exit:
- free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
+ free_op_meta(mdata, m_info->pool);
err_exit:
return ret;
}
cryptodev->device = NULL;
cryptodev->data = NULL;
- /* free metapool memory */
- cleanup_global_resources();
-
return 0;
}
#include <rte_branch_prediction.h>
#include <rte_common.h>
+#include <rte_cryptodev.h>
#include <rte_errno.h>
+#include <rte_mempool.h>
#include <rte_memzone.h>
#include <rte_string_fns.h>
#include "otx_cryptodev_mbox.h"
#include "cpt_pmd_logs.h"
+#include "cpt_pmd_ops_helper.h"
#include "cpt_hw_types.h"
+#define METABUF_POOL_CACHE_SIZE 512
+
/*
* VF HAL functions
* Access its own BAR0/4 registers by passing VF number as 0.
return 0;
}
+static int
+otx_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
+ struct cpt_instance *instance, uint8_t qp_id,
+ int nb_elements)
+{
+ char mempool_name[RTE_MEMPOOL_NAMESIZE];
+ int sg_mlen, lb_mlen, max_mlen, ret;
+ struct cpt_qp_meta_info *meta_info;
+ struct rte_mempool *pool;
+
+ /* Get meta len for scatter gather mode */
+ sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
+
+ /* Extra 32B saved for future considerations */
+ sg_mlen += 4 * sizeof(uint64_t);
+
+ /* Get meta len for linear buffer (direct) mode */
+ lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
+
+ /* Extra 32B saved for future considerations */
+ lb_mlen += 4 * sizeof(uint64_t);
+
+ /* Check max requirement for meta buffer */
+ max_mlen = RTE_MAX(lb_mlen, sg_mlen);
+
+ /* Allocate mempool */
+
+ snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx_cpt_mb_%u:%u",
+ dev->data->dev_id, qp_id);
+
+ pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen,
+ METABUF_POOL_CACHE_SIZE, 0,
+ rte_socket_id(), 0);
+
+ if (pool == NULL) {
+ CPT_LOG_ERR("Could not create mempool for metabuf");
+ return rte_errno;
+ }
+
+ ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
+ NULL);
+ if (ret) {
+ CPT_LOG_ERR("Could not set mempool ops");
+ goto mempool_free;
+ }
+
+ ret = rte_mempool_populate_default(pool);
+ if (ret <= 0) {
+ CPT_LOG_ERR("Could not populate metabuf pool");
+ goto mempool_free;
+ }
+
+ meta_info = &instance->meta_info;
+
+ meta_info->pool = pool;
+ meta_info->lb_mlen = lb_mlen;
+ meta_info->sg_mlen = sg_mlen;
+
+ return 0;
+
+mempool_free:
+ rte_mempool_free(pool);
+ return ret;
+}
+
+static void
+otx_cpt_metabuf_mempool_destroy(struct cpt_instance *instance)
+{
+ struct cpt_qp_meta_info *meta_info = &instance->meta_info;
+
+ rte_mempool_free(meta_info->pool);
+
+ meta_info->pool = NULL;
+ meta_info->lb_mlen = 0;
+ meta_info->sg_mlen = 0;
+}
+
int
-otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance)
+otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group,
+ struct cpt_instance **instance, uint16_t qp_id)
{
int ret = -ENOENT, len, qlen, i;
int chunk_len, chunks, chunk_size;
- struct cpt_vf *cptvf = (struct cpt_vf *)dev;
+ struct cpt_vf *cptvf = dev->data->dev_private;
struct cpt_instance *cpt_instance;
struct command_chunk *chunk_head = NULL, *chunk_prev = NULL;
struct command_chunk *chunk = NULL;
RTE_CACHE_LINE_SIZE);
if (!rz) {
ret = rte_errno;
- goto cleanup;
+ goto exit;
}
mem = rz->addr;
cpt_instance->rsvd = (uintptr_t)rz;
+ ret = otx_cpt_metabuf_mempool_create(dev, cpt_instance, qp_id, qlen);
+ if (ret) {
+ CPT_LOG_ERR("Could not create mempool for metabuf");
+ goto memzone_free;
+ }
+
/* Pending queue setup */
cptvf->pqueue.rid_queue = (struct rid *)mem;
cptvf->pqueue.enq_tail = 0;
CPT_LOG_ERR("Failed to initialize CPT VQ of device %s",
cptvf->dev_name);
ret = -EBUSY;
- goto cleanup;
+ goto mempool_destroy;
}
*instance = cpt_instance;
CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name);
return 0;
-cleanup:
+
+mempool_destroy:
+ otx_cpt_metabuf_mempool_destroy(cpt_instance);
+memzone_free:
rte_memzone_free(rz);
+exit:
*instance = NULL;
return ret;
}
CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name);
+ otx_cpt_metabuf_mempool_destroy(instance);
+
rz = (struct rte_memzone *)instance->rsvd;
rte_memzone_free(rz);
return 0;
#include <stdbool.h>
#include <rte_branch_prediction.h>
+#include <rte_cryptodev.h>
#include <rte_cycles.h>
#include <rte_io.h>
#include <rte_memory.h>
uintptr_t rsvd;
struct rte_mempool *sess_mp;
struct rte_mempool *sess_mp_priv;
+ struct cpt_qp_meta_info meta_info;
};
struct command_chunk {
struct command_queue cqueue;
/** Pending queue information */
struct pending_queue pqueue;
- /** Meta information per vf */
- struct cptvf_meta_info meta_info;
/** Below fields are accessed only in control path */
otx_cpt_deinit_device(void *dev);
int
-otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance);
+otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group,
+ struct cpt_instance **instance, uint16_t qp_id);
int
otx_cpt_put_resource(struct cpt_instance *instance);
#include <rte_bus_pci.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_errno.h>
#include <rte_malloc.h>
+#include <rte_mempool.h>
#include "cpt_pmd_logs.h"
-#include "cpt_pmd_ops_helper.h"
#include "cpt_ucode.h"
#include "otx_cryptodev.h"
#include "otx_cryptodev_hw_access.h"
#include "otx_cryptodev_ops.h"
-static int otx_cryptodev_probe_count;
-static rte_spinlock_t otx_probe_count_lock = RTE_SPINLOCK_INITIALIZER;
-
-static struct rte_mempool *otx_cpt_meta_pool;
-static int otx_cpt_op_mlen;
-static int otx_cpt_op_sb_mlen;
-
/* Forward declarations */
static int
otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
-/*
- * Initializes global variables used by fast-path code
- *
- * @return
- * - 0 on success, errcode on error
- */
-static int
-init_global_resources(void)
-{
- /* Get meta len for scatter gather mode */
- otx_cpt_op_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
-
- /* Extra 4B saved for future considerations */
- otx_cpt_op_mlen += 4 * sizeof(uint64_t);
-
- otx_cpt_meta_pool = rte_mempool_create("cpt_metabuf-pool", 4096 * 16,
- otx_cpt_op_mlen, 512, 0,
- NULL, NULL, NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (!otx_cpt_meta_pool) {
- CPT_LOG_ERR("cpt metabuf pool not created");
- return -ENOMEM;
- }
-
- /* Get meta len for direct mode */
- otx_cpt_op_sb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
-
- /* Extra 4B saved for future considerations */
- otx_cpt_op_sb_mlen += 4 * sizeof(uint64_t);
-
- return 0;
-}
-
-void
-cleanup_global_resources(void)
-{
- /* Take lock */
- rte_spinlock_lock(&otx_probe_count_lock);
-
- /* Decrement the cryptodev count */
- otx_cryptodev_probe_count--;
-
- /* Free buffers */
- if (otx_cpt_meta_pool && otx_cryptodev_probe_count == 0)
- rte_mempool_free(otx_cpt_meta_pool);
-
- /* Free lock */
- rte_spinlock_unlock(&otx_probe_count_lock);
-}
-
/* Alarm routines */
static void
const struct rte_cryptodev_qp_conf *qp_conf,
int socket_id __rte_unused)
{
- void *cptvf = dev->data->dev_private;
struct cpt_instance *instance = NULL;
struct rte_pci_device *pci_dev;
int ret = -1;
return -EIO;
}
- ret = otx_cpt_get_resource(cptvf, 0, &instance);
+ ret = otx_cpt_get_resource(dev, 0, &instance, que_pair_id);
if (ret != 0 || instance == NULL) {
CPT_LOG_ERR("Error getting instance handle from device %s : "
"ret = %d", dev->data->name, ret);
void *prep_req, *mdata = NULL;
int ret = 0;
uint64_t cpt_op;
- struct cpt_vf *cptvf = (struct cpt_vf *)instance;
sess = (struct cpt_sess_misc *)
get_sym_session_private_data(sym_op->session,
cpt_op = sess->cpt_op;
if (likely(cpt_op & CPT_OP_CIPHER_MASK))
- ret = fill_fc_params(op, sess, &cptvf->meta_info, &mdata,
+ ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
&prep_req);
else
- ret = fill_digest_params(op, sess, &cptvf->meta_info,
+ ret = fill_digest_params(op, sess, &instance->meta_info,
&mdata, &prep_req);
if (unlikely(ret)) {
if (unlikely(ret)) {
/* Buffer allocated for request preparation need to be freed */
- free_op_meta(mdata, cptvf->meta_info.cptvf_meta_pool);
+ free_op_meta(mdata, instance->meta_info.pool);
return ret;
}
rte_mempool_put(instance->sess_mp, cop->sym->session);
cop->sym->session = NULL;
}
- free_op_meta(metabuf, cptvf->meta_info.cptvf_meta_pool);
+ free_op_meta(metabuf, instance->meta_info.pool);
}
return nb_completed;
.sym_session_clear = otx_cpt_session_clear
};
-static void
-otx_cpt_common_vars_init(struct cpt_vf *cptvf)
-{
- cptvf->meta_info.cptvf_meta_pool = otx_cpt_meta_pool;
- cptvf->meta_info.cptvf_op_mlen = otx_cpt_op_mlen;
- cptvf->meta_info.cptvf_op_sb_mlen = otx_cpt_op_sb_mlen;
-}
-
int
otx_cpt_dev_create(struct rte_cryptodev *c_dev)
{
/* Start off timer for mailbox interrupts */
otx_cpt_periodic_alarm_start(cptvf);
- rte_spinlock_lock(&otx_probe_count_lock);
- if (!otx_cryptodev_probe_count) {
- ret = init_global_resources();
- if (ret) {
- rte_spinlock_unlock(&otx_probe_count_lock);
- goto init_fail;
- }
- }
- otx_cryptodev_probe_count++;
- rte_spinlock_unlock(&otx_probe_count_lock);
-
- /* Initialize data path variables used by common code */
- otx_cpt_common_vars_init(cptvf);
-
c_dev->dev_ops = &cptvf_ops;
c_dev->enqueue_burst = otx_cpt_pkt_enqueue;
return 0;
-init_fail:
- otx_cpt_periodic_alarm_stop(cptvf);
- otx_cpt_deinit_device(cptvf);
-
fail:
if (cptvf) {
/* Free private data allocated */
#define OTX_CPT_MIN_TAILROOM_REQ (8)
#define CPT_NUM_QS_PER_VF (1)
-void
-cleanup_global_resources(void);
-
int
otx_cpt_dev_create(struct rte_cryptodev *c_dev);