uint32_t src_buf_offset;
uint32_t dst_buf_offset;
uint16_t segment_sz;
+ uint16_t headroom_sz;
+ uint16_t data_len;
uint16_t segments_nb;
};
static void
fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
- void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
+ void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
+ uint16_t headroom, uint16_t data_len)
{
uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
m->buf_iova = rte_mempool_virt2iova(obj) +
mbuf_offset + mbuf_hdr_size;
m->buf_len = segment_sz;
- m->data_len = segment_sz;
+ m->data_len = data_len;
+ m->pkt_len = data_len;
- /* No headroom needed for the buffer */
- m->data_off = 0;
+ /* Use headroom specified for the buffer */
+ m->data_off = headroom;
/* init some constant fields */
m->pool = mp;
static void
fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
- uint16_t segments_nb)
+ uint16_t headroom, uint16_t data_len, uint16_t segments_nb)
{
uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
uint16_t remaining_segments = segments_nb;
m->buf_iova = next_seg_phys_addr;
next_seg_phys_addr += mbuf_hdr_size + segment_sz;
m->buf_len = segment_sz;
- m->data_len = segment_sz;
+ m->data_len = data_len;
- /* No headroom needed for the buffer */
- m->data_off = 0;
+ /* Use headroom specified for the buffer */
+ m->data_off = headroom;
/* init some constant fields */
m->pool = mp;
m->next = NULL;
}
+static void
+mempool_asym_obj_init(struct rte_mempool *mp, __rte_unused void *opaque_arg,
+ void *obj, __rte_unused unsigned int i)
+{
+ struct rte_crypto_op *op = obj;
+
+ /* Set crypto operation */
+ op->type = RTE_CRYPTO_OP_TYPE_ASYMMETRIC;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ op->phys_addr = rte_mem_virt2iova(obj);
+ op->mempool = mp;
+}
+
static void
mempool_obj_init(struct rte_mempool *mp,
void *opaque_arg,
void *obj,
- __attribute__((unused)) unsigned int i)
+ __rte_unused unsigned int i)
{
struct obj_params *params = opaque_arg;
struct rte_crypto_op *op = obj;
op->sym->m_src = m;
if (params->segments_nb == 1)
fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
- params->segment_sz);
+ params->segment_sz, params->headroom_sz,
+ params->data_len);
else
fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
- params->segment_sz, params->segments_nb);
+ params->segment_sz, params->headroom_sz,
+ params->data_len, params->segments_nb);
/* Set destination buffer */
m = (struct rte_mbuf *) ((uint8_t *) obj +
params->dst_buf_offset);
fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
- params->segment_sz);
+ params->segment_sz, params->headroom_sz,
+ params->data_len);
op->sym->m_dst = m;
} else
op->sym->m_dst = NULL;
uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
sizeof(struct rte_crypto_sym_op);
uint16_t crypto_op_private_size;
+
+ if (options->op_type == CPERF_ASYM_MODEX) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_op_pool%u",
+ rte_socket_id());
+ *pool = rte_crypto_op_pool_create(
+ pool_name, RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ options->pool_sz, RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+ rte_socket_id());
+ if (*pool == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Cannot allocate mempool for device %u\n",
+ dev_id);
+ return -1;
+ }
+ rte_mempool_obj_iter(*pool, mempool_asym_obj_init, NULL);
+ return 0;
+ }
+
/*
* If doing AES-CCM, IV field needs to be 16 bytes long,
* and AAD field needs to be long enough to have 18 bytes,
struct obj_params params = {
.segment_sz = options->segment_sz,
+ .headroom_sz = options->headroom_sz,
+ /* Data len = segment size - (headroom + tailroom) */
+ .data_len = options->segment_sz -
+ options->headroom_sz -
+ options->tailroom_sz,
.segments_nb = segments_nb,
.src_buf_offset = crypto_op_total_size_padded,
.dst_buf_offset = 0
(mbuf_size * segments_nb);
params.dst_buf_offset = *dst_buf_offset;
/* Destination buffer will be one segment only */
- obj_size += max_size;
+ obj_size += max_size + sizeof(struct rte_mbuf);
}
*pool = rte_mempool_create_empty(pool_name,