4 * Copyright (C) Cavium 2017.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "test_perf_common.h"
36 perf_test_result(struct evt_test *test, struct evt_options *opt)
39 struct test_perf *t = evt_test_priv(test);
45 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
46 uint8_t stride, uint8_t nb_queues)
48 struct test_perf *t = evt_test_priv(test);
52 /* port configuration */
53 const struct rte_event_port_conf wkr_p_conf = {
54 .dequeue_depth = opt->wkr_deq_dep,
56 .new_event_threshold = 4096,
59 /* setup one port per worker, linking to all queues */
60 for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
62 struct worker_data *w = &t->worker[port];
64 w->dev_id = opt->dev_id;
67 w->processed_pkts = 0;
70 ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
72 evt_err("failed to setup port %d", port);
76 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
77 if (ret != nb_queues) {
78 evt_err("failed to link all queues to port %d", port);
83 /* port for producers, no links */
84 const struct rte_event_port_conf prod_conf = {
87 .new_event_threshold = 1200,
90 for ( ; port < perf_nb_event_ports(opt); port++) {
91 struct prod_data *p = &t->prod[port];
93 p->dev_id = opt->dev_id;
95 p->queue_id = prod * stride;
98 ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
100 evt_err("failed to setup port %d", port);
110 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
113 bool need_slcore = !evt_has_distributed_sched(opt->dev_id);
115 /* N producer + N worker + 1 scheduler(based on dev capa) + 1 master */
116 lcores = need_slcore ? 4 : 3;
118 if (rte_lcore_count() < lcores) {
119 evt_err("test need minimum %d lcores", lcores);
123 /* Validate worker lcores */
124 if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
125 evt_err("worker lcores overlaps with master lcore");
128 if (need_slcore && evt_lcores_has_overlap(opt->wlcores, opt->slcore)) {
129 evt_err("worker lcores overlaps with scheduler lcore");
132 if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
133 evt_err("worker lcores overlaps producer lcores");
136 if (evt_has_disabled_lcore(opt->wlcores)) {
137 evt_err("one or more workers lcores are not enabled");
140 if (!evt_has_active_lcore(opt->wlcores)) {
141 evt_err("minimum one worker is required");
145 /* Validate producer lcores */
146 if (evt_lcores_has_overlap(opt->plcores, rte_get_master_lcore())) {
147 evt_err("producer lcores overlaps with master lcore");
150 if (need_slcore && evt_lcores_has_overlap(opt->plcores, opt->slcore)) {
151 evt_err("producer lcores overlaps with scheduler lcore");
154 if (evt_has_disabled_lcore(opt->plcores)) {
155 evt_err("one or more producer lcores are not enabled");
158 if (!evt_has_active_lcore(opt->plcores)) {
159 evt_err("minimum one producer is required");
163 /* Validate scheduler lcore */
164 if (!evt_has_distributed_sched(opt->dev_id) &&
165 opt->slcore == (int)rte_get_master_lcore()) {
166 evt_err("scheduler lcore and master lcore should be different");
169 if (need_slcore && !rte_lcore_is_enabled(opt->slcore)) {
170 evt_err("scheduler lcore is not enabled");
174 if (evt_has_invalid_stage(opt))
177 if (evt_has_invalid_sched_type(opt))
180 if (nb_queues > EVT_MAX_QUEUES) {
181 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
184 if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
185 evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
190 if (opt->nb_stages == 1 && opt->fwd_latency) {
191 evt_info("fwd_latency is valid when nb_stages > 1, disabling");
192 opt->fwd_latency = 0;
194 if (opt->fwd_latency && !opt->q_priority) {
195 evt_info("enabled queue priority for latency measurement");
203 perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
205 evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
206 evt_dump_producer_lcores(opt);
207 evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
208 evt_dump_worker_lcores(opt);
209 if (!evt_has_distributed_sched(opt->dev_id))
210 evt_dump_scheduler_lcore(opt);
211 evt_dump_nb_stages(opt);
212 evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
213 evt_dump("nb_evdev_queues", "%d", nb_queues);
214 evt_dump_queue_priority(opt);
215 evt_dump_sched_type_list(opt);
219 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
223 rte_event_dev_stop(opt->dev_id);
224 rte_event_dev_close(opt->dev_id);
228 perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
229 void *obj, unsigned i __rte_unused)
231 memset(obj, 0, mp->elt_size);
235 perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
237 struct test_perf *t = evt_test_priv(test);
239 t->pool = rte_mempool_create(test->name, /* mempool name */
240 opt->pool_sz, /* number of elements*/
241 sizeof(struct perf_elt), /* element size*/
244 perf_elt_init, /* obj constructor */
245 NULL, opt->socket_id, 0); /* flags */
246 if (t->pool == NULL) {
247 evt_err("failed to create mempool");
255 perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
258 struct test_perf *t = evt_test_priv(test);
260 rte_mempool_free(t->pool);
264 perf_test_setup(struct evt_test *test, struct evt_options *opt)
268 test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
269 RTE_CACHE_LINE_SIZE, opt->socket_id);
270 if (test_perf == NULL) {
271 evt_err("failed to allocate test_perf memory");
274 test->test_priv = test_perf;
276 struct test_perf *t = evt_test_priv(test);
278 t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores);
279 t->nb_workers = evt_nr_active_lcores(opt->wlcores);
281 t->nb_pkts = opt->nb_pkts;
282 t->nb_flows = opt->nb_flows;
283 t->result = EVT_TEST_FAILED;
285 memcpy(t->sched_type_list, opt->sched_type_list,
286 sizeof(opt->sched_type_list));
293 perf_test_destroy(struct evt_test *test, struct evt_options *opt)
297 rte_free(test->test_priv);