#include <rte_branch_prediction.h>
#include <rte_common.h>
-#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
#include <rte_errno.h>
#include <rte_mempool.h>
#include <rte_memzone.h>
static int
otx_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
struct cpt_instance *instance, uint8_t qp_id,
- int nb_elements)
+ unsigned int nb_elements)
{
char mempool_name[RTE_MEMPOOL_NAMESIZE];
- int sg_mlen, lb_mlen, max_mlen, ret;
struct cpt_qp_meta_info *meta_info;
struct rte_mempool *pool;
+ int max_mlen = 0;
+ int sg_mlen = 0;
+ int lb_mlen = 0;
+ int mb_pool_sz;
+ int ret;
- /* Get meta len for scatter gather mode */
- sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
+ /*
+ * Calculate metabuf length required. The 'crypto_octeontx' device
+ * would be either SYMMETRIC or ASYMMETRIC.
+ */
- /* Extra 32B saved for future considerations */
- sg_mlen += 4 * sizeof(uint64_t);
+ if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
- /* Get meta len for linear buffer (direct) mode */
- lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
+ /* Get meta len for scatter gather mode */
+ sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
- /* Extra 32B saved for future considerations */
- lb_mlen += 4 * sizeof(uint64_t);
+ /* Extra 32B saved for future considerations */
+ sg_mlen += 4 * sizeof(uint64_t);
- /* Check max requirement for meta buffer */
- max_mlen = RTE_MAX(lb_mlen, sg_mlen);
+ /* Get meta len for linear buffer (direct) mode */
+ lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
+
+ /* Extra 32B saved for future considerations */
+ lb_mlen += 4 * sizeof(uint64_t);
+
+ /* Check max requirement for meta buffer */
+ max_mlen = RTE_MAX(lb_mlen, sg_mlen);
+ } else {
+
+ /* Asymmetric device */
+
+ /* Get meta len for asymmetric operations */
+ max_mlen = cpt_pmd_ops_helper_asym_get_mlen();
+ }
/* Allocate mempool */
snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx_cpt_mb_%u:%u",
dev->data->dev_id, qp_id);
- pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen,
+ mb_pool_sz = RTE_MAX(nb_elements, (METABUF_POOL_CACHE_SIZE * rte_lcore_count()));
+
+ pool = rte_mempool_create_empty(mempool_name, mb_pool_sz, max_mlen,
METABUF_POOL_CACHE_SIZE, 0,
rte_socket_id(), 0);
memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue));
/* Chunks are of fixed size buffers */
+
+ qlen = DEFAULT_CMD_QLEN;
chunks = DEFAULT_CMD_QCHUNKS;
chunk_len = DEFAULT_CMD_QCHUNK_SIZE;
-
- qlen = chunks * chunk_len;
/* Chunk size includes 8 bytes of next chunk ptr */
chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE;
len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8);
/* For pending queue */
- len += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+ len += qlen * RTE_ALIGN(sizeof(cptvf->pqueue.rid_queue[0]), 8);
/* So that instruction queues start as pg size aligned */
len = RTE_ALIGN(len, pg_sz);
}
mem = rz->addr;
- dma_addr = rz->phys_addr;
+ dma_addr = rz->iova;
alloc_len = len;
memset(mem, 0, len);
}
/* Pending queue setup */
- cptvf->pqueue.rid_queue = (struct rid *)mem;
- cptvf->pqueue.enq_tail = 0;
- cptvf->pqueue.deq_head = 0;
- cptvf->pqueue.pending_count = 0;
-
- mem += qlen * RTE_ALIGN(sizeof(struct rid), 8);
- len -= qlen * RTE_ALIGN(sizeof(struct rid), 8);
- dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+ cptvf->pqueue.rid_queue = (void **)mem;
+
+ mem += qlen * RTE_ALIGN(sizeof(cptvf->pqueue.rid_queue[0]), 8);
+ len -= qlen * RTE_ALIGN(sizeof(cptvf->pqueue.rid_queue[0]), 8);
+ dma_addr += qlen * RTE_ALIGN(sizeof(cptvf->pqueue.rid_queue[0]), 8);
/* Alignment wastage */
used_len = alloc_len - len;