1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
6 #include <rte_mbuf_pool_ops.h>
8 #include "cperf_test_common.h"
11 uint32_t src_buf_offset;
12 uint32_t dst_buf_offset;
20 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
21 void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
22 uint16_t headroom, uint16_t data_len)
24 uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
26 /* start of buffer is after mbuf structure and priv data */
28 m->buf_addr = (char *)m + mbuf_hdr_size;
29 m->buf_iova = rte_mempool_virt2iova(obj) +
30 mbuf_offset + mbuf_hdr_size;
31 m->buf_len = segment_sz;
32 m->data_len = data_len;
33 m->pkt_len = data_len;
35 /* Use headroom specified for the buffer */
36 m->data_off = headroom;
38 /* init some constant fields */
42 rte_mbuf_refcnt_set(m, 1);
47 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
48 void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
49 uint16_t headroom, uint16_t data_len, uint16_t segments_nb)
51 uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
52 uint16_t remaining_segments = segments_nb;
53 struct rte_mbuf *next_mbuf;
54 rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
55 mbuf_offset + mbuf_hdr_size;
58 /* start of buffer is after mbuf structure and priv data */
60 m->buf_addr = (char *)m + mbuf_hdr_size;
61 m->buf_iova = next_seg_phys_addr;
62 next_seg_phys_addr += mbuf_hdr_size + segment_sz;
63 m->buf_len = segment_sz;
64 m->data_len = data_len;
66 /* Use headroom specified for the buffer */
67 m->data_off = headroom;
69 /* init some constant fields */
71 m->nb_segs = segments_nb;
73 rte_mbuf_refcnt_set(m, 1);
74 next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
75 mbuf_hdr_size + segment_sz);
80 } while (remaining_segments > 0);
86 mempool_obj_init(struct rte_mempool *mp,
89 __rte_unused unsigned int i)
91 struct obj_params *params = opaque_arg;
92 struct rte_crypto_op *op = obj;
93 struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
94 params->src_buf_offset);
95 /* Set crypto operation */
96 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
97 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
98 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
99 op->phys_addr = rte_mem_virt2iova(obj);
102 /* Set source buffer */
104 if (params->segments_nb == 1)
105 fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
106 params->segment_sz, params->headroom_sz,
109 fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
110 params->segment_sz, params->headroom_sz,
111 params->data_len, params->segments_nb);
114 /* Set destination buffer */
115 if (params->dst_buf_offset) {
116 m = (struct rte_mbuf *) ((uint8_t *) obj +
117 params->dst_buf_offset);
118 fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
119 params->segment_sz, params->headroom_sz,
123 op->sym->m_dst = NULL;
127 cperf_alloc_common_memory(const struct cperf_options *options,
128 const struct cperf_test_vector *test_vector,
129 uint8_t dev_id, uint16_t qp_id,
130 size_t extra_op_priv_size,
131 uint32_t *src_buf_offset,
132 uint32_t *dst_buf_offset,
133 struct rte_mempool **pool)
135 const char *mp_ops_name;
136 char pool_name[32] = "";
139 /* Calculate the object size */
140 uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
141 sizeof(struct rte_crypto_sym_op);
142 uint16_t crypto_op_private_size;
144 if (options->op_type == CPERF_ASYM_MODEX) {
145 snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_op_pool%u",
147 *pool = rte_crypto_op_pool_create(
148 pool_name, RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
149 options->pool_sz, 0, 0, rte_socket_id());
152 "Cannot allocate mempool for device %u\n",
160 * If doing AES-CCM, IV field needs to be 16 bytes long,
161 * and AAD field needs to be long enough to have 18 bytes,
162 * plus the length of the AAD, and all rounded to a
163 * multiple of 16 bytes.
165 if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
166 crypto_op_private_size = extra_op_priv_size +
167 test_vector->cipher_iv.length +
168 test_vector->auth_iv.length +
169 RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) +
170 RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16);
172 crypto_op_private_size = extra_op_priv_size +
173 test_vector->cipher_iv.length +
174 test_vector->auth_iv.length +
175 test_vector->aead_iv.length +
176 options->aead_aad_sz;
179 uint16_t crypto_op_total_size = crypto_op_size +
180 crypto_op_private_size;
181 uint16_t crypto_op_total_size_padded =
182 RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
183 uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
184 uint32_t max_size = options->max_buffer_size + options->digest_sz;
185 uint16_t segments_nb = (max_size % options->segment_sz) ?
186 (max_size / options->segment_sz) + 1 :
187 max_size / options->segment_sz;
188 uint32_t obj_size = crypto_op_total_size_padded +
189 (mbuf_size * segments_nb);
191 snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
194 *src_buf_offset = crypto_op_total_size_padded;
196 struct obj_params params = {
197 .segment_sz = options->segment_sz,
198 .headroom_sz = options->headroom_sz,
199 /* Data len = segment size - (headroom + tailroom) */
200 .data_len = options->segment_sz -
201 options->headroom_sz -
202 options->tailroom_sz,
203 .segments_nb = segments_nb,
204 .src_buf_offset = crypto_op_total_size_padded,
208 if (options->out_of_place) {
209 *dst_buf_offset = *src_buf_offset +
210 (mbuf_size * segments_nb);
211 params.dst_buf_offset = *dst_buf_offset;
212 /* Destination buffer will be one segment only */
213 obj_size += max_size + sizeof(struct rte_mbuf);
216 *pool = rte_mempool_create_empty(pool_name,
217 options->pool_sz, obj_size, 512, 0,
221 "Cannot allocate mempool for device %u\n",
226 mp_ops_name = rte_mbuf_best_mempool_ops();
228 ret = rte_mempool_set_ops_byname(*pool,
232 "Error setting mempool handler for device %u\n",
237 ret = rte_mempool_populate_default(*pool);
240 "Error populating mempool for device %u\n",
245 rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)¶ms);