1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
11 #include "pipeline_common.h"
13 struct fastpath_data *fdata;
15 struct config_data cdata = {
16 .num_packets = (1L << 25), /* do ~32M packets */
18 .queue_type = RTE_SCHED_TYPE_ATOMIC,
26 core_in_use(unsigned int lcore_id) {
27 return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] ||
28 fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);
32 * Parse the coremask given as argument (hexadecimal string) and fill
33 * the global configuration (core role and core count) with the parsed
36 static int xdigit2val(unsigned char c)
50 parse_coremask(const char *coremask)
53 unsigned int count = 0;
57 const int32_t BITS_HEX = 4;
61 /* Remove all blank characters ahead and after .
62 * Remove 0x/0X if exists.
64 while (isblank(*coremask))
66 if (coremask[0] == '0' && ((coremask[1] == 'x')
67 || (coremask[1] == 'X')))
70 while ((i > 0) && isblank(coremask[i - 1]))
75 for (i = i - 1; i >= 0 && idx < MAX_NUM_CORE; i--) {
77 if (isxdigit(c) == 0) {
78 /* invalid characters */
82 for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) {
84 mask |= (1ULL << idx);
90 if (coremask[i] != '0')
97 static struct option long_options[] = {
98 {"workers", required_argument, 0, 'w'},
99 {"packets", required_argument, 0, 'n'},
100 {"atomic-flows", required_argument, 0, 'f'},
101 {"num_stages", required_argument, 0, 's'},
102 {"rx-mask", required_argument, 0, 'r'},
103 {"tx-mask", required_argument, 0, 't'},
104 {"sched-mask", required_argument, 0, 'e'},
105 {"cq-depth", required_argument, 0, 'c'},
106 {"work-cycles", required_argument, 0, 'W'},
107 {"mempool-size", required_argument, 0, 'm'},
108 {"queue-priority", no_argument, 0, 'P'},
109 {"parallel", no_argument, 0, 'p'},
110 {"ordered", no_argument, 0, 'o'},
111 {"quiet", no_argument, 0, 'q'},
112 {"use-atq", no_argument, 0, 'a'},
113 {"dump", no_argument, 0, 'D'},
120 const char *usage_str =
121 " Usage: eventdev_demo [options]\n"
123 " -n, --packets=N Send N packets (default ~32M), 0 implies no limit\n"
124 " -f, --atomic-flows=N Use N random flows from 1 to N (default 16)\n"
125 " -s, --num_stages=N Use N atomic stages (default 1)\n"
126 " -r, --rx-mask=core mask Run NIC rx on CPUs in core mask\n"
127 " -w, --worker-mask=core mask Run worker on CPUs in core mask\n"
128 " -t, --tx-mask=core mask Run NIC tx on CPUs in core mask\n"
129 " -e --sched-mask=core mask Run scheduler on CPUs in core mask\n"
130 " -c --cq-depth=N Worker CQ depth (default 16)\n"
131 " -W --work-cycles=N Worker cycles (default 0)\n"
132 " -P --queue-priority Enable scheduler queue prioritization\n"
133 " -o, --ordered Use ordered scheduling\n"
134 " -p, --parallel Use parallel scheduling\n"
135 " -q, --quiet Minimize printed output\n"
136 " -a, --use-atq Use all type queues\n"
137 " -m, --mempool-size=N Dictate the mempool size\n"
138 " -D, --dump Print detailed statistics before exit"
140 fprintf(stderr, "%s", usage_str);
145 parse_app_args(int argc, char **argv)
147 /* Parse cli options*/
151 uint64_t rx_lcore_mask = 0;
152 uint64_t tx_lcore_mask = 0;
153 uint64_t sched_lcore_mask = 0;
154 uint64_t worker_lcore_mask = 0;
158 c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:m:paoPqDW:",
159 long_options, &option_index);
166 cdata.num_packets = (int64_t)atol(optarg);
167 if (cdata.num_packets == 0)
168 cdata.num_packets = INT64_MAX;
171 cdata.num_fids = (unsigned int)atoi(optarg);
174 cdata.num_stages = (unsigned int)atoi(optarg);
177 cdata.worker_cq_depth = (unsigned int)atoi(optarg);
180 cdata.worker_cycles = (unsigned int)atoi(optarg);
183 cdata.enable_queue_priorities = 1;
186 cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
189 cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
192 cdata.all_type_queues = 1;
201 worker_lcore_mask = parse_coremask(optarg);
204 rx_lcore_mask = parse_coremask(optarg);
205 popcnt = __builtin_popcountll(rx_lcore_mask);
206 fdata->rx_single = (popcnt == 1);
209 tx_lcore_mask = parse_coremask(optarg);
210 popcnt = __builtin_popcountll(tx_lcore_mask);
211 fdata->tx_single = (popcnt == 1);
214 sched_lcore_mask = parse_coremask(optarg);
215 popcnt = __builtin_popcountll(sched_lcore_mask);
216 fdata->sched_single = (popcnt == 1);
219 cdata.num_mbuf = (uint64_t)atol(optarg);
226 cdata.worker_lcore_mask = worker_lcore_mask;
227 cdata.sched_lcore_mask = sched_lcore_mask;
228 cdata.rx_lcore_mask = rx_lcore_mask;
229 cdata.tx_lcore_mask = tx_lcore_mask;
231 if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
234 for (i = 0; i < MAX_NUM_CORE; i++) {
235 fdata->rx_core[i] = !!(rx_lcore_mask & (1ULL << i));
236 fdata->tx_core[i] = !!(tx_lcore_mask & (1ULL << i));
237 fdata->sched_core[i] = !!(sched_lcore_mask & (1ULL << i));
238 fdata->worker_core[i] = !!(worker_lcore_mask & (1ULL << i));
240 if (fdata->worker_core[i])
243 cdata.active_cores++;
248 do_capability_setup(uint8_t eventdev_id)
252 uint8_t generic_pipeline = 0;
255 RTE_ETH_FOREACH_DEV(i) {
258 ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
260 rte_exit(EXIT_FAILURE,
261 "Invalid capability for Tx adptr port %d\n", i);
262 generic_pipeline |= !(caps &
263 RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
266 struct rte_event_dev_info eventdev_info;
267 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
269 rte_event_dev_info_get(eventdev_id, &eventdev_info);
270 burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :
273 if (generic_pipeline)
274 set_worker_generic_setup_data(&fdata->cap, burst);
276 set_worker_tx_enq_setup_data(&fdata->cap, burst);
280 signal_handler(int signum)
286 rte_exit(1, "Exiting on signal %d\n", signum);
287 if ((signum == SIGINT || signum == SIGTERM) && !once) {
288 printf("\n\nSignal %d received, preparing to exit...\n",
291 rte_event_dev_dump(0, stdout);
296 RTE_ETH_FOREACH_DEV(portid) {
297 rte_event_eth_rx_adapter_stop(portid);
298 rte_event_eth_tx_adapter_stop(portid);
299 rte_eth_dev_stop(portid);
302 rte_eal_mp_wait_lcore();
305 if (signum == SIGTSTP)
306 rte_event_dev_dump(0, stdout);
309 static inline uint64_t
310 port_stat(int dev_id, int32_t p)
313 snprintf(statname, sizeof(statname), "port_%u_rx", p);
314 return rte_event_dev_xstats_by_name_get(dev_id, statname, NULL);
318 main(int argc, char **argv)
320 struct worker_data *worker_data;
326 signal(SIGINT, signal_handler);
327 signal(SIGTERM, signal_handler);
328 signal(SIGTSTP, signal_handler);
330 err = rte_eal_init(argc, argv);
332 rte_panic("Invalid EAL arguments\n");
337 fdata = rte_malloc(NULL, sizeof(struct fastpath_data), 0);
339 rte_panic("Out of memory\n");
341 /* Parse cli options*/
342 parse_app_args(argc, argv);
344 num_ports = rte_eth_dev_count_avail();
346 rte_panic("No ethernet ports found\n");
348 const unsigned int cores_needed = cdata.active_cores;
351 printf(" Config:\n");
352 printf("\tports: %u\n", num_ports);
353 printf("\tworkers: %u\n", cdata.num_workers);
354 printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
355 printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
356 if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
357 printf("\tqid0 type: ordered\n");
358 if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
359 printf("\tqid0 type: atomic\n");
360 printf("\tCores available: %u\n", rte_lcore_count());
361 printf("\tCores used: %u\n", cores_needed);
364 if (rte_lcore_count() < cores_needed)
365 rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(),
368 const unsigned int ndevs = rte_event_dev_count();
370 rte_panic("No dev_id devs found. Pasl in a --vdev eventdev.\n");
372 fprintf(stderr, "Warning: More than one eventdev, using idx 0");
375 do_capability_setup(0);
376 fdata->cap.check_opt();
378 worker_data = rte_calloc(0, cdata.num_workers,
379 sizeof(worker_data[0]), 0);
380 if (worker_data == NULL)
381 rte_panic("rte_calloc failed\n");
383 int dev_id = fdata->cap.evdev_setup(worker_data);
385 rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
387 fdata->cap.adptr_setup(num_ports);
389 /* Start the Ethernet port. */
390 RTE_ETH_FOREACH_DEV(portid) {
391 err = rte_eth_dev_start(portid);
393 rte_exit(EXIT_FAILURE, "Error starting ethdev %d\n",
398 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
399 if (lcore_id >= MAX_NUM_CORE)
402 if (!fdata->rx_core[lcore_id] &&
403 !fdata->worker_core[lcore_id] &&
404 !fdata->tx_core[lcore_id] &&
405 !fdata->sched_core[lcore_id])
408 if (fdata->rx_core[lcore_id])
410 "[%s()] lcore %d executing NIC Rx\n",
413 if (fdata->tx_core[lcore_id])
415 "[%s()] lcore %d executing NIC Tx\n",
418 if (fdata->sched_core[lcore_id])
419 printf("[%s()] lcore %d executing scheduler\n",
422 if (fdata->worker_core[lcore_id])
424 "[%s()] lcore %d executing worker, using eventdev port %u\n",
426 worker_data[worker_idx].port_id);
428 err = rte_eal_remote_launch(fdata->cap.worker,
429 &worker_data[worker_idx], lcore_id);
431 rte_panic("Failed to launch worker on core %d\n",
435 if (fdata->worker_core[lcore_id])
439 lcore_id = rte_lcore_id();
441 if (core_in_use(lcore_id))
442 fdata->cap.worker(&worker_data[worker_idx++]);
444 rte_eal_mp_wait_lcore();
446 if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=
447 (uint64_t)-ENOTSUP)) {
448 printf("\nPort Workload distribution:\n");
450 uint64_t tot_pkts = 0;
451 uint64_t pkts_per_wkr[RTE_MAX_LCORE] = {0};
452 for (i = 0; i < cdata.num_workers; i++) {
454 port_stat(dev_id, worker_data[i].port_id);
455 tot_pkts += pkts_per_wkr[i];
457 for (i = 0; i < cdata.num_workers; i++) {
458 float pc = pkts_per_wkr[i] * 100 /
460 printf("worker %i :\t%.1f %% (%"PRIu64" pkts)\n",
461 i, pc, pkts_per_wkr[i]);
466 RTE_ETH_FOREACH_DEV(portid) {
467 rte_eth_dev_close(portid);
470 rte_event_dev_stop(0);
471 rte_event_dev_close(0);