4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_malloc.h>
35 #include "cperf_test_common.h"
38 uint32_t src_buf_offset;
39 uint32_t dst_buf_offset;
45 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
46 void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
48 uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
50 /* start of buffer is after mbuf structure and priv data */
52 m->buf_addr = (char *)m + mbuf_hdr_size;
53 m->buf_physaddr = rte_mempool_virt2phy(mp, obj) +
54 mbuf_offset + mbuf_hdr_size;
55 m->buf_len = segment_sz;
56 m->data_len = segment_sz;
58 /* No headroom needed for the buffer */
61 /* init some constant fields */
65 rte_mbuf_refcnt_set(m, 1);
70 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
71 void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
74 uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
75 uint16_t remaining_segments = segments_nb;
76 struct rte_mbuf *next_mbuf;
77 phys_addr_t next_seg_phys_addr = rte_mempool_virt2phy(mp, obj) +
78 mbuf_offset + mbuf_hdr_size;
81 /* start of buffer is after mbuf structure and priv data */
83 m->buf_addr = (char *)m + mbuf_hdr_size;
84 m->buf_physaddr = next_seg_phys_addr;
85 next_seg_phys_addr += mbuf_hdr_size + segment_sz;
86 m->buf_len = segment_sz;
87 m->data_len = segment_sz;
89 /* No headroom needed for the buffer */
92 /* init some constant fields */
94 m->nb_segs = segments_nb;
96 rte_mbuf_refcnt_set(m, 1);
97 next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
98 mbuf_hdr_size + segment_sz);
101 remaining_segments--;
103 } while (remaining_segments > 0);
109 mempool_obj_init(struct rte_mempool *mp,
112 __attribute__((unused)) unsigned int i)
114 struct obj_params *params = opaque_arg;
115 struct rte_crypto_op *op = obj;
116 struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
117 params->src_buf_offset);
118 /* Set crypto operation */
119 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
120 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
121 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
123 /* Set source buffer */
125 if (params->segments_nb == 1)
126 fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
129 fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
130 params->segment_sz, params->segments_nb);
133 /* Set destination buffer */
134 if (params->dst_buf_offset) {
135 m = (struct rte_mbuf *) ((uint8_t *) obj +
136 params->dst_buf_offset);
137 fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
141 op->sym->m_dst = NULL;
145 cperf_alloc_common_memory(const struct cperf_options *options,
146 const struct cperf_test_vector *test_vector,
147 uint8_t dev_id, uint16_t qp_id,
148 size_t extra_op_priv_size,
149 uint32_t *src_buf_offset,
150 uint32_t *dst_buf_offset,
151 struct rte_mempool **pool)
153 char pool_name[32] = "";
156 /* Calculate the object size */
157 uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
158 sizeof(struct rte_crypto_sym_op);
159 uint16_t crypto_op_private_size = extra_op_priv_size +
160 test_vector->cipher_iv.length +
161 test_vector->auth_iv.length +
162 test_vector->aead_iv.length +
163 options->aead_aad_sz;
164 uint16_t crypto_op_total_size = crypto_op_size +
165 crypto_op_private_size;
166 uint16_t crypto_op_total_size_padded =
167 RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
168 uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
169 uint32_t max_size = options->max_buffer_size + options->digest_sz;
170 uint16_t segments_nb = (max_size % options->segment_sz) ?
171 (max_size / options->segment_sz) + 1 :
172 max_size / options->segment_sz;
173 uint32_t obj_size = crypto_op_total_size_padded +
174 (mbuf_size * segments_nb);
176 snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
179 *src_buf_offset = crypto_op_total_size_padded;
181 struct obj_params params = {
182 .segment_sz = options->segment_sz,
183 .segments_nb = segments_nb,
184 .src_buf_offset = crypto_op_total_size_padded,
188 if (options->out_of_place) {
189 *dst_buf_offset = *src_buf_offset +
190 (mbuf_size * segments_nb);
191 params.dst_buf_offset = *dst_buf_offset;
192 /* Destination buffer will be one segment only */
193 obj_size += max_size;
196 *pool = rte_mempool_create_empty(pool_name,
197 options->pool_sz, obj_size, 512, 0,
201 "Cannot allocate mempool for device %u\n",
206 ret = rte_mempool_set_ops_byname(*pool,
207 RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
210 "Error setting mempool handler for device %u\n",
215 ret = rte_mempool_populate_default(*pool);
218 "Error populating mempool for device %u\n",
223 rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)¶ms);