1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include "test_ring_stress.h"
8 * Stress test for ring enqueue/dequeue operations.
9 * Performs the following pattern on each slave worker:
10 * dequeue/read-write data from the dequeued objects/enqueue.
11 * Serves as both functional and performance test of ring
12 * enqueue/dequeue operations under high contention
13 * (for both over committed and non-over committed scenarios).
16 #define RING_NAME "RING_STRESS"
18 #define RING_SIZE (2 * BULK_NUM * RTE_MAX_LCORE)
25 static volatile uint32_t wrk_cmd __rte_cache_aligned;
27 /* test run-time in seconds */
28 static const uint32_t run_time = 60;
29 static const uint32_t verbose;
44 struct lcore_stat stats;
45 } __rte_cache_aligned;
48 uint32_t cnt[RTE_CACHE_LINE_SIZE / sizeof(uint32_t)];
49 } __rte_cache_aligned;
52 * redefinable functions
55 _st_ring_dequeue_bulk(struct rte_ring *r, void **obj, uint32_t n,
59 _st_ring_enqueue_bulk(struct rte_ring *r, void * const *obj, uint32_t n,
63 _st_ring_init(struct rte_ring *r, const char *name, uint32_t num);
67 lcore_stat_update(struct lcore_stat *ls, uint64_t call, uint64_t obj,
68 uint64_t tm, int32_t prcs)
70 ls->op.nb_call += call;
72 ls->op.nb_cycle += tm;
74 ls->op.max_cycle = RTE_MAX(ls->op.max_cycle, tm);
75 ls->op.min_cycle = RTE_MIN(ls->op.min_cycle, tm);
80 lcore_op_stat_aggr(struct lcore_stat *ms, const struct lcore_stat *ls)
83 ms->op.nb_call += ls->op.nb_call;
84 ms->op.nb_obj += ls->op.nb_obj;
85 ms->op.nb_cycle += ls->op.nb_cycle;
86 ms->op.max_cycle = RTE_MAX(ms->op.max_cycle, ls->op.max_cycle);
87 ms->op.min_cycle = RTE_MIN(ms->op.min_cycle, ls->op.min_cycle);
91 lcore_stat_aggr(struct lcore_stat *ms, const struct lcore_stat *ls)
93 ms->nb_cycle = RTE_MAX(ms->nb_cycle, ls->nb_cycle);
94 lcore_op_stat_aggr(ms, ls);
98 lcore_stat_dump(FILE *f, uint32_t lc, const struct lcore_stat *ls)
102 st = (long double)rte_get_timer_hz() / US_PER_S;
104 if (lc == UINT32_MAX)
105 fprintf(f, "%s(AGGREGATE)={\n", __func__);
107 fprintf(f, "%s(lcore=%u)={\n", __func__, lc);
109 fprintf(f, "\tnb_cycle=%" PRIu64 "(%.2Lf usec),\n",
110 ls->nb_cycle, (long double)ls->nb_cycle / st);
112 fprintf(f, "\tDEQ+ENQ={\n");
114 fprintf(f, "\t\tnb_call=%" PRIu64 ",\n", ls->op.nb_call);
115 fprintf(f, "\t\tnb_obj=%" PRIu64 ",\n", ls->op.nb_obj);
116 fprintf(f, "\t\tnb_cycle=%" PRIu64 ",\n", ls->op.nb_cycle);
117 fprintf(f, "\t\tobj/call(avg): %.2Lf\n",
118 (long double)ls->op.nb_obj / ls->op.nb_call);
119 fprintf(f, "\t\tcycles/obj(avg): %.2Lf\n",
120 (long double)ls->op.nb_cycle / ls->op.nb_obj);
121 fprintf(f, "\t\tcycles/call(avg): %.2Lf\n",
122 (long double)ls->op.nb_cycle / ls->op.nb_call);
124 /* if min/max cycles per call stats was collected */
125 if (ls->op.min_cycle != UINT64_MAX) {
126 fprintf(f, "\t\tmax cycles/call=%" PRIu64 "(%.2Lf usec),\n",
128 (long double)ls->op.max_cycle / st);
129 fprintf(f, "\t\tmin cycles/call=%" PRIu64 "(%.2Lf usec),\n",
131 (long double)ls->op.min_cycle / st);
134 fprintf(f, "\t},\n");
139 fill_ring_elm(struct ring_elem *elm, uint32_t fill)
143 for (i = 0; i != RTE_DIM(elm->cnt); i++)
148 check_updt_elem(struct ring_elem *elm[], uint32_t num,
149 const struct ring_elem *check, const struct ring_elem *fill)
153 static rte_spinlock_t dump_lock;
155 for (i = 0; i != num; i++) {
156 if (memcmp(check, elm[i], sizeof(*check)) != 0) {
157 rte_spinlock_lock(&dump_lock);
158 printf("%s(lc=%u, num=%u) failed at %u-th iter, "
159 "offending object: %p\n",
160 __func__, rte_lcore_id(), num, i, elm[i]);
161 rte_memdump(stdout, "expected", check, sizeof(*check));
162 rte_memdump(stdout, "result", elm[i], sizeof(elm[i]));
163 rte_spinlock_unlock(&dump_lock);
166 memcpy(elm[i], fill, sizeof(*elm[i]));
173 check_ring_op(uint32_t exp, uint32_t res, uint32_t lc,
174 const char *fname, const char *opname)
177 printf("%s(lc=%u) failure: %s expected: %u, returned %u\n",
178 fname, lc, opname, exp, res);
185 test_worker(void *arg, const char *fname, int32_t prcs)
189 uint64_t cl, tm0, tm1;
190 struct lcore_arg *la;
191 struct ring_elem def_elm, loc_elm;
192 struct ring_elem *obj[2 * BULK_NUM];
197 fill_ring_elm(&def_elm, UINT32_MAX);
198 fill_ring_elm(&loc_elm, lc);
200 while (wrk_cmd != WRK_CMD_RUN) {
205 cl = rte_rdtsc_precise();
208 /* num in interval [7/8, 11/8] of BULK_NUM */
209 num = 7 * BULK_NUM / 8 + rte_rand() % (BULK_NUM / 2);
211 /* reset all pointer values */
212 memset(obj, 0, sizeof(obj));
214 /* dequeue num elems */
215 tm0 = (prcs != 0) ? rte_rdtsc_precise() : 0;
216 n = _st_ring_dequeue_bulk(la->rng, (void **)obj, num, NULL);
217 tm0 = (prcs != 0) ? rte_rdtsc_precise() - tm0 : 0;
219 /* check return value and objects */
220 rc = check_ring_op(num, n, lc, fname,
221 RTE_STR(_st_ring_dequeue_bulk));
223 rc = check_updt_elem(obj, num, &def_elm, &loc_elm);
227 /* enqueue num elems */
228 rte_compiler_barrier();
229 rc = check_updt_elem(obj, num, &loc_elm, &def_elm);
233 tm1 = (prcs != 0) ? rte_rdtsc_precise() : 0;
234 n = _st_ring_enqueue_bulk(la->rng, (void **)obj, num, NULL);
235 tm1 = (prcs != 0) ? rte_rdtsc_precise() - tm1 : 0;
237 /* check return value */
238 rc = check_ring_op(num, n, lc, fname,
239 RTE_STR(_st_ring_enqueue_bulk));
243 lcore_stat_update(&la->stats, 1, num, tm0 + tm1, prcs);
245 } while (wrk_cmd == WRK_CMD_RUN);
247 cl = rte_rdtsc_precise() - cl;
249 lcore_stat_update(&la->stats, 0, 0, cl, 0);
250 la->stats.nb_cycle = cl;
254 test_worker_prcs(void *arg)
256 return test_worker(arg, __func__, 1);
260 test_worker_avg(void *arg)
262 return test_worker(arg, __func__, 0);
266 mt1_fini(struct rte_ring *rng, void *data)
273 mt1_init(struct rte_ring **rng, void **data, uint32_t num)
279 struct ring_elem *elm;
285 sz = num * sizeof(*elm);
286 elm = rte_zmalloc(NULL, sz, __alignof__(*elm));
288 printf("%s: alloc(%zu) for %u elems data failed",
297 sz = rte_ring_get_memsize(nr);
298 r = rte_zmalloc(NULL, sz, __alignof__(*r));
300 printf("%s: alloc(%zu) for FIFO with %u elems failed",
307 rc = _st_ring_init(r, RING_NAME, nr);
309 printf("%s: _st_ring_init(%p, %u) failed, error: %d(%s)\n",
310 __func__, r, nr, rc, strerror(-rc));
314 for (i = 0; i != num; i++) {
315 fill_ring_elm(elm + i, UINT32_MAX);
317 if (_st_ring_enqueue_bulk(r, &p, 1, NULL) != 1)
322 printf("%s: _st_ring_enqueue(%p, %u) returned %u\n",
323 __func__, r, num, i);
331 test_mt1(int (*test)(void *))
337 struct lcore_arg arg[RTE_MAX_LCORE];
339 static const struct lcore_stat init_stat = {
340 .op.min_cycle = UINT64_MAX,
343 rc = mt1_init(&r, &data, RING_SIZE);
349 memset(arg, 0, sizeof(arg));
351 /* launch on all slaves */
352 RTE_LCORE_FOREACH_SLAVE(lc) {
354 arg[lc].stats = init_stat;
355 rte_eal_remote_launch(test, &arg[lc], lc);
358 /* signal worker to start test */
359 wrk_cmd = WRK_CMD_RUN;
362 usleep(run_time * US_PER_S);
364 /* signal worker to start test */
365 wrk_cmd = WRK_CMD_STOP;
368 /* wait for slaves and collect stats. */
370 arg[mc].stats = init_stat;
373 RTE_LCORE_FOREACH_SLAVE(lc) {
374 rc |= rte_eal_wait_lcore(lc);
375 lcore_stat_aggr(&arg[mc].stats, &arg[lc].stats);
377 lcore_stat_dump(stdout, lc, &arg[lc].stats);
380 lcore_stat_dump(stdout, UINT32_MAX, &arg[mc].stats);
385 static const struct test_case tests[] = {
387 .name = "MT-WRK_ENQ_DEQ-MST_NONE-PRCS",
389 .wfunc = test_worker_prcs,
392 .name = "MT-WRK_ENQ_DEQ-MST_NONE-AVG",
394 .wfunc = test_worker_avg,