1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
7 #include "cperf_test_common.h"
10 uint32_t src_buf_offset;
11 uint32_t dst_buf_offset;
17 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
18 void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
20 uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
22 /* start of buffer is after mbuf structure and priv data */
24 m->buf_addr = (char *)m + mbuf_hdr_size;
25 m->buf_iova = rte_mempool_virt2iova(obj) +
26 mbuf_offset + mbuf_hdr_size;
27 m->buf_len = segment_sz;
28 m->data_len = segment_sz;
30 /* No headroom needed for the buffer */
33 /* init some constant fields */
37 rte_mbuf_refcnt_set(m, 1);
42 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
43 void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
46 uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
47 uint16_t remaining_segments = segments_nb;
48 struct rte_mbuf *next_mbuf;
49 rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
50 mbuf_offset + mbuf_hdr_size;
53 /* start of buffer is after mbuf structure and priv data */
55 m->buf_addr = (char *)m + mbuf_hdr_size;
56 m->buf_iova = next_seg_phys_addr;
57 next_seg_phys_addr += mbuf_hdr_size + segment_sz;
58 m->buf_len = segment_sz;
59 m->data_len = segment_sz;
61 /* No headroom needed for the buffer */
64 /* init some constant fields */
66 m->nb_segs = segments_nb;
68 rte_mbuf_refcnt_set(m, 1);
69 next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
70 mbuf_hdr_size + segment_sz);
75 } while (remaining_segments > 0);
81 mempool_obj_init(struct rte_mempool *mp,
84 __attribute__((unused)) unsigned int i)
86 struct obj_params *params = opaque_arg;
87 struct rte_crypto_op *op = obj;
88 struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
89 params->src_buf_offset);
90 /* Set crypto operation */
91 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
92 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
93 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
94 op->phys_addr = rte_mem_virt2iova(obj);
97 /* Set source buffer */
99 if (params->segments_nb == 1)
100 fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
103 fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
104 params->segment_sz, params->segments_nb);
107 /* Set destination buffer */
108 if (params->dst_buf_offset) {
109 m = (struct rte_mbuf *) ((uint8_t *) obj +
110 params->dst_buf_offset);
111 fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
115 op->sym->m_dst = NULL;
119 cperf_alloc_common_memory(const struct cperf_options *options,
120 const struct cperf_test_vector *test_vector,
121 uint8_t dev_id, uint16_t qp_id,
122 size_t extra_op_priv_size,
123 uint32_t *src_buf_offset,
124 uint32_t *dst_buf_offset,
125 struct rte_mempool **pool)
127 char pool_name[32] = "";
130 /* Calculate the object size */
131 uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
132 sizeof(struct rte_crypto_sym_op);
133 uint16_t crypto_op_private_size;
135 * If doing AES-CCM, IV field needs to be 16 bytes long,
136 * and AAD field needs to be long enough to have 18 bytes,
137 * plus the length of the AAD, and all rounded to a
138 * multiple of 16 bytes.
140 if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
141 crypto_op_private_size = extra_op_priv_size +
142 test_vector->cipher_iv.length +
143 test_vector->auth_iv.length +
144 RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) +
145 RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16);
147 crypto_op_private_size = extra_op_priv_size +
148 test_vector->cipher_iv.length +
149 test_vector->auth_iv.length +
150 test_vector->aead_iv.length +
151 options->aead_aad_sz;
154 uint16_t crypto_op_total_size = crypto_op_size +
155 crypto_op_private_size;
156 uint16_t crypto_op_total_size_padded =
157 RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
158 uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
159 uint32_t max_size = options->max_buffer_size + options->digest_sz;
160 uint16_t segments_nb = (max_size % options->segment_sz) ?
161 (max_size / options->segment_sz) + 1 :
162 max_size / options->segment_sz;
163 uint32_t obj_size = crypto_op_total_size_padded +
164 (mbuf_size * segments_nb);
166 snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
169 *src_buf_offset = crypto_op_total_size_padded;
171 struct obj_params params = {
172 .segment_sz = options->segment_sz,
173 .segments_nb = segments_nb,
174 .src_buf_offset = crypto_op_total_size_padded,
178 if (options->out_of_place) {
179 *dst_buf_offset = *src_buf_offset +
180 (mbuf_size * segments_nb);
181 params.dst_buf_offset = *dst_buf_offset;
182 /* Destination buffer will be one segment only */
183 obj_size += max_size;
186 *pool = rte_mempool_create_empty(pool_name,
187 options->pool_sz, obj_size, 512, 0,
191 "Cannot allocate mempool for device %u\n",
196 ret = rte_mempool_set_ops_byname(*pool,
197 RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
200 "Error setting mempool handler for device %u\n",
205 ret = rte_mempool_populate_default(*pool);
208 "Error populating mempool for device %u\n",
213 rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)¶ms);