1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
11 #include "pipeline_common.h"
13 struct config_data cdata = {
14 .num_packets = (1L << 25), /* do ~32M packets */
16 .queue_type = RTE_SCHED_TYPE_ATOMIC,
24 core_in_use(unsigned int lcore_id) {
25 return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] ||
26 fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);
30 eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
33 int port_id = (uintptr_t) userdata;
34 unsigned int _sent = 0;
37 /* Note: hard-coded TX queue */
38 _sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent],
40 } while (_sent != unsent);
44 * Parse the coremask given as argument (hexadecimal string) and fill
45 * the global configuration (core role and core count) with the parsed
48 static int xdigit2val(unsigned char c)
62 parse_coremask(const char *coremask)
65 unsigned int count = 0;
69 const int32_t BITS_HEX = 4;
73 /* Remove all blank characters ahead and after .
74 * Remove 0x/0X if exists.
76 while (isblank(*coremask))
78 if (coremask[0] == '0' && ((coremask[1] == 'x')
79 || (coremask[1] == 'X')))
82 while ((i > 0) && isblank(coremask[i - 1]))
87 for (i = i - 1; i >= 0 && idx < MAX_NUM_CORE; i--) {
89 if (isxdigit(c) == 0) {
90 /* invalid characters */
94 for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) {
102 if (coremask[i] != '0')
109 static struct option long_options[] = {
110 {"workers", required_argument, 0, 'w'},
111 {"packets", required_argument, 0, 'n'},
112 {"atomic-flows", required_argument, 0, 'f'},
113 {"num_stages", required_argument, 0, 's'},
114 {"rx-mask", required_argument, 0, 'r'},
115 {"tx-mask", required_argument, 0, 't'},
116 {"sched-mask", required_argument, 0, 'e'},
117 {"cq-depth", required_argument, 0, 'c'},
118 {"work-cycles", required_argument, 0, 'W'},
119 {"mempool-size", required_argument, 0, 'm'},
120 {"queue-priority", no_argument, 0, 'P'},
121 {"parallel", no_argument, 0, 'p'},
122 {"ordered", no_argument, 0, 'o'},
123 {"quiet", no_argument, 0, 'q'},
124 {"use-atq", no_argument, 0, 'a'},
125 {"dump", no_argument, 0, 'D'},
132 const char *usage_str =
133 " Usage: eventdev_demo [options]\n"
135 " -n, --packets=N Send N packets (default ~32M), 0 implies no limit\n"
136 " -f, --atomic-flows=N Use N random flows from 1 to N (default 16)\n"
137 " -s, --num_stages=N Use N atomic stages (default 1)\n"
138 " -r, --rx-mask=core mask Run NIC rx on CPUs in core mask\n"
139 " -w, --worker-mask=core mask Run worker on CPUs in core mask\n"
140 " -t, --tx-mask=core mask Run NIC tx on CPUs in core mask\n"
141 " -e --sched-mask=core mask Run scheduler on CPUs in core mask\n"
142 " -c --cq-depth=N Worker CQ depth (default 16)\n"
143 " -W --work-cycles=N Worker cycles (default 0)\n"
144 " -P --queue-priority Enable scheduler queue prioritization\n"
145 " -o, --ordered Use ordered scheduling\n"
146 " -p, --parallel Use parallel scheduling\n"
147 " -q, --quiet Minimize printed output\n"
148 " -a, --use-atq Use all type queues\n"
149 " -m, --mempool-size=N Dictate the mempool size\n"
150 " -D, --dump Print detailed statistics before exit"
152 fprintf(stderr, "%s", usage_str);
157 parse_app_args(int argc, char **argv)
159 /* Parse cli options*/
163 uint64_t rx_lcore_mask = 0;
164 uint64_t tx_lcore_mask = 0;
165 uint64_t sched_lcore_mask = 0;
166 uint64_t worker_lcore_mask = 0;
170 c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:m:paoPqDW:",
171 long_options, &option_index);
178 cdata.num_packets = (int64_t)atol(optarg);
179 if (cdata.num_packets == 0)
180 cdata.num_packets = INT64_MAX;
183 cdata.num_fids = (unsigned int)atoi(optarg);
186 cdata.num_stages = (unsigned int)atoi(optarg);
189 cdata.worker_cq_depth = (unsigned int)atoi(optarg);
192 cdata.worker_cycles = (unsigned int)atoi(optarg);
195 cdata.enable_queue_priorities = 1;
198 cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
201 cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
204 cdata.all_type_queues = 1;
213 worker_lcore_mask = parse_coremask(optarg);
216 rx_lcore_mask = parse_coremask(optarg);
217 popcnt = __builtin_popcountll(rx_lcore_mask);
218 fdata->rx_single = (popcnt == 1);
221 tx_lcore_mask = parse_coremask(optarg);
222 popcnt = __builtin_popcountll(tx_lcore_mask);
223 fdata->tx_single = (popcnt == 1);
226 sched_lcore_mask = parse_coremask(optarg);
227 popcnt = __builtin_popcountll(sched_lcore_mask);
228 fdata->sched_single = (popcnt == 1);
231 cdata.num_mbuf = (uint64_t)atol(optarg);
238 cdata.worker_lcore_mask = worker_lcore_mask;
239 cdata.sched_lcore_mask = sched_lcore_mask;
240 cdata.rx_lcore_mask = rx_lcore_mask;
241 cdata.tx_lcore_mask = tx_lcore_mask;
243 if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
246 for (i = 0; i < MAX_NUM_CORE; i++) {
247 fdata->rx_core[i] = !!(rx_lcore_mask & (1UL << i));
248 fdata->tx_core[i] = !!(tx_lcore_mask & (1UL << i));
249 fdata->sched_core[i] = !!(sched_lcore_mask & (1UL << i));
250 fdata->worker_core[i] = !!(worker_lcore_mask & (1UL << i));
252 if (fdata->worker_core[i])
255 cdata.active_cores++;
260 * Initializes a given port using global settings and with the RX buffers
261 * coming from the mbuf_pool passed as a parameter.
264 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
266 static const struct rte_eth_conf port_conf_default = {
268 .mq_mode = ETH_MQ_RX_RSS,
269 .max_rx_pkt_len = ETHER_MAX_LEN,
270 .ignore_offload_bitfield = 1,
274 .rss_hf = ETH_RSS_IP |
280 const uint16_t rx_rings = 1, tx_rings = 1;
281 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
282 struct rte_eth_conf port_conf = port_conf_default;
285 struct rte_eth_dev_info dev_info;
286 struct rte_eth_txconf txconf;
288 if (port >= rte_eth_dev_count())
291 rte_eth_dev_info_get(port, &dev_info);
292 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
293 port_conf.txmode.offloads |=
294 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
296 /* Configure the Ethernet device. */
297 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
301 /* Allocate and set up 1 RX queue per Ethernet port. */
302 for (q = 0; q < rx_rings; q++) {
303 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
304 rte_eth_dev_socket_id(port), NULL, mbuf_pool);
309 txconf = dev_info.default_txconf;
310 txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
311 txconf.offloads = port_conf_default.txmode.offloads;
312 /* Allocate and set up 1 TX queue per Ethernet port. */
313 for (q = 0; q < tx_rings; q++) {
314 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
315 rte_eth_dev_socket_id(port), &txconf);
320 /* Start the Ethernet port. */
321 retval = rte_eth_dev_start(port);
325 /* Display the port MAC address. */
326 struct ether_addr addr;
327 rte_eth_macaddr_get(port, &addr);
328 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
329 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
331 addr.addr_bytes[0], addr.addr_bytes[1],
332 addr.addr_bytes[2], addr.addr_bytes[3],
333 addr.addr_bytes[4], addr.addr_bytes[5]);
335 /* Enable RX in promiscuous mode for the Ethernet device. */
336 rte_eth_promiscuous_enable(port);
342 init_ports(uint16_t num_ports)
347 cdata.num_mbuf = 16384 * num_ports;
349 struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
350 /* mbufs */ cdata.num_mbuf,
351 /* cache_size */ 512,
353 /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
356 RTE_ETH_FOREACH_DEV(portid)
357 if (port_init(portid, mp) != 0)
358 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
361 RTE_ETH_FOREACH_DEV(i) {
362 void *userdata = (void *)(uintptr_t) i;
364 rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
365 if (fdata->tx_buf[i] == NULL)
366 rte_panic("Out of memory\n");
367 rte_eth_tx_buffer_init(fdata->tx_buf[i], 32);
368 rte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i],
377 do_capability_setup(uint8_t eventdev_id)
380 uint8_t mt_unsafe = 0;
383 RTE_ETH_FOREACH_DEV(i) {
384 struct rte_eth_dev_info dev_info;
385 memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
387 rte_eth_dev_info_get(i, &dev_info);
388 /* Check if it is safe ask worker to tx. */
389 mt_unsafe |= !(dev_info.tx_offload_capa &
390 DEV_TX_OFFLOAD_MT_LOCKFREE);
393 struct rte_event_dev_info eventdev_info;
394 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
396 rte_event_dev_info_get(eventdev_id, &eventdev_info);
397 burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :
401 set_worker_generic_setup_data(&fdata->cap, burst);
403 set_worker_tx_setup_data(&fdata->cap, burst);
407 signal_handler(int signum)
410 rte_exit(1, "Exiting on signal %d\n", signum);
411 if (signum == SIGINT || signum == SIGTERM) {
412 printf("\n\nSignal %d received, preparing to exit...\n",
416 if (signum == SIGTSTP)
417 rte_event_dev_dump(0, stdout);
420 static inline uint64_t
421 port_stat(int dev_id, int32_t p)
424 snprintf(statname, sizeof(statname), "port_%u_rx", p);
425 return rte_event_dev_xstats_by_name_get(dev_id, statname, NULL);
429 main(int argc, char **argv)
431 struct worker_data *worker_data;
432 unsigned int num_ports;
436 signal(SIGINT, signal_handler);
437 signal(SIGTERM, signal_handler);
438 signal(SIGTSTP, signal_handler);
440 err = rte_eal_init(argc, argv);
442 rte_panic("Invalid EAL arguments\n");
447 fdata = rte_malloc(NULL, sizeof(struct fastpath_data), 0);
449 rte_panic("Out of memory\n");
451 /* Parse cli options*/
452 parse_app_args(argc, argv);
454 num_ports = rte_eth_dev_count();
456 rte_panic("No ethernet ports found\n");
458 const unsigned int cores_needed = cdata.active_cores;
461 printf(" Config:\n");
462 printf("\tports: %u\n", num_ports);
463 printf("\tworkers: %u\n", cdata.num_workers);
464 printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
465 printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
466 if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
467 printf("\tqid0 type: ordered\n");
468 if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
469 printf("\tqid0 type: atomic\n");
470 printf("\tCores available: %u\n", rte_lcore_count());
471 printf("\tCores used: %u\n", cores_needed);
474 if (rte_lcore_count() < cores_needed)
475 rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(),
478 const unsigned int ndevs = rte_event_dev_count();
480 rte_panic("No dev_id devs found. Pasl in a --vdev eventdev.\n");
482 fprintf(stderr, "Warning: More than one eventdev, using idx 0");
485 do_capability_setup(0);
486 fdata->cap.check_opt();
488 worker_data = rte_calloc(0, cdata.num_workers,
489 sizeof(worker_data[0]), 0);
490 if (worker_data == NULL)
491 rte_panic("rte_calloc failed\n");
493 int dev_id = fdata->cap.evdev_setup(&cons_data, worker_data);
495 rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
497 init_ports(num_ports);
498 fdata->cap.adptr_setup(num_ports);
501 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
502 if (lcore_id >= MAX_NUM_CORE)
505 if (!fdata->rx_core[lcore_id] &&
506 !fdata->worker_core[lcore_id] &&
507 !fdata->tx_core[lcore_id] &&
508 !fdata->sched_core[lcore_id])
511 if (fdata->rx_core[lcore_id])
513 "[%s()] lcore %d executing NIC Rx\n",
516 if (fdata->tx_core[lcore_id])
518 "[%s()] lcore %d executing NIC Tx, and using eventdev port %u\n",
519 __func__, lcore_id, cons_data.port_id);
521 if (fdata->sched_core[lcore_id])
522 printf("[%s()] lcore %d executing scheduler\n",
525 if (fdata->worker_core[lcore_id])
527 "[%s()] lcore %d executing worker, using eventdev port %u\n",
529 worker_data[worker_idx].port_id);
531 err = rte_eal_remote_launch(fdata->cap.worker,
532 &worker_data[worker_idx], lcore_id);
534 rte_panic("Failed to launch worker on core %d\n",
538 if (fdata->worker_core[lcore_id])
542 lcore_id = rte_lcore_id();
544 if (core_in_use(lcore_id))
545 fdata->cap.worker(&worker_data[worker_idx++]);
547 rte_eal_mp_wait_lcore();
550 rte_event_dev_dump(dev_id, stdout);
552 if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=
553 (uint64_t)-ENOTSUP)) {
554 printf("\nPort Workload distribution:\n");
556 uint64_t tot_pkts = 0;
557 uint64_t pkts_per_wkr[RTE_MAX_LCORE] = {0};
558 for (i = 0; i < cdata.num_workers; i++) {
560 port_stat(dev_id, worker_data[i].port_id);
561 tot_pkts += pkts_per_wkr[i];
563 for (i = 0; i < cdata.num_workers; i++) {
564 float pc = pkts_per_wkr[i] * 100 /
566 printf("worker %i :\t%.1f %% (%"PRIu64" pkts)\n",
567 i, pc, pkts_per_wkr[i]);