1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
11 #include "pipeline_common.h"
13 struct config_data cdata = {
14 .num_packets = (1L << 25), /* do ~32M packets */
16 .queue_type = RTE_SCHED_TYPE_ATOMIC,
24 core_in_use(unsigned int lcore_id) {
25 return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] ||
26 fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);
30 eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
33 int port_id = (uintptr_t) userdata;
34 unsigned int _sent = 0;
37 /* Note: hard-coded TX queue */
38 _sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent],
40 } while (_sent != unsent);
44 * Parse the coremask given as argument (hexadecimal string) and fill
45 * the global configuration (core role and core count) with the parsed
48 static int xdigit2val(unsigned char c)
62 parse_coremask(const char *coremask)
65 unsigned int count = 0;
69 const int32_t BITS_HEX = 4;
73 /* Remove all blank characters ahead and after .
74 * Remove 0x/0X if exists.
76 while (isblank(*coremask))
78 if (coremask[0] == '0' && ((coremask[1] == 'x')
79 || (coremask[1] == 'X')))
82 while ((i > 0) && isblank(coremask[i - 1]))
87 for (i = i - 1; i >= 0 && idx < MAX_NUM_CORE; i--) {
89 if (isxdigit(c) == 0) {
90 /* invalid characters */
94 for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) {
102 if (coremask[i] != '0')
109 static struct option long_options[] = {
110 {"workers", required_argument, 0, 'w'},
111 {"packets", required_argument, 0, 'n'},
112 {"atomic-flows", required_argument, 0, 'f'},
113 {"num_stages", required_argument, 0, 's'},
114 {"rx-mask", required_argument, 0, 'r'},
115 {"tx-mask", required_argument, 0, 't'},
116 {"sched-mask", required_argument, 0, 'e'},
117 {"cq-depth", required_argument, 0, 'c'},
118 {"work-cycles", required_argument, 0, 'W'},
119 {"mempool-size", required_argument, 0, 'm'},
120 {"queue-priority", no_argument, 0, 'P'},
121 {"parallel", no_argument, 0, 'p'},
122 {"ordered", no_argument, 0, 'o'},
123 {"quiet", no_argument, 0, 'q'},
124 {"use-atq", no_argument, 0, 'a'},
125 {"dump", no_argument, 0, 'D'},
132 const char *usage_str =
133 " Usage: eventdev_demo [options]\n"
135 " -n, --packets=N Send N packets (default ~32M), 0 implies no limit\n"
136 " -f, --atomic-flows=N Use N random flows from 1 to N (default 16)\n"
137 " -s, --num_stages=N Use N atomic stages (default 1)\n"
138 " -r, --rx-mask=core mask Run NIC rx on CPUs in core mask\n"
139 " -w, --worker-mask=core mask Run worker on CPUs in core mask\n"
140 " -t, --tx-mask=core mask Run NIC tx on CPUs in core mask\n"
141 " -e --sched-mask=core mask Run scheduler on CPUs in core mask\n"
142 " -c --cq-depth=N Worker CQ depth (default 16)\n"
143 " -W --work-cycles=N Worker cycles (default 0)\n"
144 " -P --queue-priority Enable scheduler queue prioritization\n"
145 " -o, --ordered Use ordered scheduling\n"
146 " -p, --parallel Use parallel scheduling\n"
147 " -q, --quiet Minimize printed output\n"
148 " -a, --use-atq Use all type queues\n"
149 " -m, --mempool-size=N Dictate the mempool size\n"
150 " -D, --dump Print detailed statistics before exit"
152 fprintf(stderr, "%s", usage_str);
157 parse_app_args(int argc, char **argv)
159 /* Parse cli options*/
163 uint64_t rx_lcore_mask = 0;
164 uint64_t tx_lcore_mask = 0;
165 uint64_t sched_lcore_mask = 0;
166 uint64_t worker_lcore_mask = 0;
170 c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:m:paoPqDW:",
171 long_options, &option_index);
178 cdata.num_packets = (int64_t)atol(optarg);
179 if (cdata.num_packets == 0)
180 cdata.num_packets = INT64_MAX;
183 cdata.num_fids = (unsigned int)atoi(optarg);
186 cdata.num_stages = (unsigned int)atoi(optarg);
189 cdata.worker_cq_depth = (unsigned int)atoi(optarg);
192 cdata.worker_cycles = (unsigned int)atoi(optarg);
195 cdata.enable_queue_priorities = 1;
198 cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
201 cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
204 cdata.all_type_queues = 1;
213 worker_lcore_mask = parse_coremask(optarg);
216 rx_lcore_mask = parse_coremask(optarg);
217 popcnt = __builtin_popcountll(rx_lcore_mask);
218 fdata->rx_single = (popcnt == 1);
221 tx_lcore_mask = parse_coremask(optarg);
222 popcnt = __builtin_popcountll(tx_lcore_mask);
223 fdata->tx_single = (popcnt == 1);
226 sched_lcore_mask = parse_coremask(optarg);
227 popcnt = __builtin_popcountll(sched_lcore_mask);
228 fdata->sched_single = (popcnt == 1);
231 cdata.num_mbuf = (uint64_t)atol(optarg);
238 cdata.worker_lcore_mask = worker_lcore_mask;
239 cdata.sched_lcore_mask = sched_lcore_mask;
240 cdata.rx_lcore_mask = rx_lcore_mask;
241 cdata.tx_lcore_mask = tx_lcore_mask;
243 if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
246 for (i = 0; i < MAX_NUM_CORE; i++) {
247 fdata->rx_core[i] = !!(rx_lcore_mask & (1UL << i));
248 fdata->tx_core[i] = !!(tx_lcore_mask & (1UL << i));
249 fdata->sched_core[i] = !!(sched_lcore_mask & (1UL << i));
250 fdata->worker_core[i] = !!(worker_lcore_mask & (1UL << i));
252 if (fdata->worker_core[i])
255 cdata.active_cores++;
260 * Initializes a given port using global settings and with the RX buffers
261 * coming from the mbuf_pool passed as a parameter.
264 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
266 static const struct rte_eth_conf port_conf_default = {
268 .mq_mode = ETH_MQ_RX_RSS,
269 .max_rx_pkt_len = ETHER_MAX_LEN,
273 .rss_hf = ETH_RSS_IP |
279 const uint16_t rx_rings = 1, tx_rings = 1;
280 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
281 struct rte_eth_conf port_conf = port_conf_default;
284 struct rte_eth_dev_info dev_info;
285 struct rte_eth_txconf txconf;
287 if (!rte_eth_dev_is_valid_port(port))
290 rte_eth_dev_info_get(port, &dev_info);
291 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
292 port_conf.txmode.offloads |=
293 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
295 /* Configure the Ethernet device. */
296 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
300 /* Allocate and set up 1 RX queue per Ethernet port. */
301 for (q = 0; q < rx_rings; q++) {
302 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
303 rte_eth_dev_socket_id(port), NULL, mbuf_pool);
308 txconf = dev_info.default_txconf;
309 txconf.offloads = port_conf_default.txmode.offloads;
310 /* Allocate and set up 1 TX queue per Ethernet port. */
311 for (q = 0; q < tx_rings; q++) {
312 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
313 rte_eth_dev_socket_id(port), &txconf);
318 /* Start the Ethernet port. */
319 retval = rte_eth_dev_start(port);
323 /* Display the port MAC address. */
324 struct ether_addr addr;
325 rte_eth_macaddr_get(port, &addr);
326 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
327 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
329 addr.addr_bytes[0], addr.addr_bytes[1],
330 addr.addr_bytes[2], addr.addr_bytes[3],
331 addr.addr_bytes[4], addr.addr_bytes[5]);
333 /* Enable RX in promiscuous mode for the Ethernet device. */
334 rte_eth_promiscuous_enable(port);
340 init_ports(uint16_t num_ports)
345 cdata.num_mbuf = 16384 * num_ports;
347 struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
348 /* mbufs */ cdata.num_mbuf,
349 /* cache_size */ 512,
351 /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
354 RTE_ETH_FOREACH_DEV(portid)
355 if (port_init(portid, mp) != 0)
356 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
359 RTE_ETH_FOREACH_DEV(i) {
360 void *userdata = (void *)(uintptr_t) i;
362 rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
363 if (fdata->tx_buf[i] == NULL)
364 rte_panic("Out of memory\n");
365 rte_eth_tx_buffer_init(fdata->tx_buf[i], 32);
366 rte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i],
375 do_capability_setup(uint8_t eventdev_id)
378 uint8_t mt_unsafe = 0;
381 RTE_ETH_FOREACH_DEV(i) {
382 struct rte_eth_dev_info dev_info;
383 memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
385 rte_eth_dev_info_get(i, &dev_info);
386 /* Check if it is safe ask worker to tx. */
387 mt_unsafe |= !(dev_info.tx_offload_capa &
388 DEV_TX_OFFLOAD_MT_LOCKFREE);
391 struct rte_event_dev_info eventdev_info;
392 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
394 rte_event_dev_info_get(eventdev_id, &eventdev_info);
395 burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :
399 set_worker_generic_setup_data(&fdata->cap, burst);
401 set_worker_tx_setup_data(&fdata->cap, burst);
405 signal_handler(int signum)
408 rte_exit(1, "Exiting on signal %d\n", signum);
409 if (signum == SIGINT || signum == SIGTERM) {
410 printf("\n\nSignal %d received, preparing to exit...\n",
414 if (signum == SIGTSTP)
415 rte_event_dev_dump(0, stdout);
418 static inline uint64_t
419 port_stat(int dev_id, int32_t p)
422 snprintf(statname, sizeof(statname), "port_%u_rx", p);
423 return rte_event_dev_xstats_by_name_get(dev_id, statname, NULL);
427 main(int argc, char **argv)
429 struct worker_data *worker_data;
434 signal(SIGINT, signal_handler);
435 signal(SIGTERM, signal_handler);
436 signal(SIGTSTP, signal_handler);
438 err = rte_eal_init(argc, argv);
440 rte_panic("Invalid EAL arguments\n");
445 fdata = rte_malloc(NULL, sizeof(struct fastpath_data), 0);
447 rte_panic("Out of memory\n");
449 /* Parse cli options*/
450 parse_app_args(argc, argv);
452 num_ports = rte_eth_dev_count_avail();
454 rte_panic("No ethernet ports found\n");
456 const unsigned int cores_needed = cdata.active_cores;
459 printf(" Config:\n");
460 printf("\tports: %u\n", num_ports);
461 printf("\tworkers: %u\n", cdata.num_workers);
462 printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
463 printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
464 if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
465 printf("\tqid0 type: ordered\n");
466 if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
467 printf("\tqid0 type: atomic\n");
468 printf("\tCores available: %u\n", rte_lcore_count());
469 printf("\tCores used: %u\n", cores_needed);
472 if (rte_lcore_count() < cores_needed)
473 rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(),
476 const unsigned int ndevs = rte_event_dev_count();
478 rte_panic("No dev_id devs found. Pasl in a --vdev eventdev.\n");
480 fprintf(stderr, "Warning: More than one eventdev, using idx 0");
483 do_capability_setup(0);
484 fdata->cap.check_opt();
486 worker_data = rte_calloc(0, cdata.num_workers,
487 sizeof(worker_data[0]), 0);
488 if (worker_data == NULL)
489 rte_panic("rte_calloc failed\n");
491 int dev_id = fdata->cap.evdev_setup(&cons_data, worker_data);
493 rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
495 init_ports(num_ports);
496 fdata->cap.adptr_setup(num_ports);
499 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
500 if (lcore_id >= MAX_NUM_CORE)
503 if (!fdata->rx_core[lcore_id] &&
504 !fdata->worker_core[lcore_id] &&
505 !fdata->tx_core[lcore_id] &&
506 !fdata->sched_core[lcore_id])
509 if (fdata->rx_core[lcore_id])
511 "[%s()] lcore %d executing NIC Rx\n",
514 if (fdata->tx_core[lcore_id])
516 "[%s()] lcore %d executing NIC Tx, and using eventdev port %u\n",
517 __func__, lcore_id, cons_data.port_id);
519 if (fdata->sched_core[lcore_id])
520 printf("[%s()] lcore %d executing scheduler\n",
523 if (fdata->worker_core[lcore_id])
525 "[%s()] lcore %d executing worker, using eventdev port %u\n",
527 worker_data[worker_idx].port_id);
529 err = rte_eal_remote_launch(fdata->cap.worker,
530 &worker_data[worker_idx], lcore_id);
532 rte_panic("Failed to launch worker on core %d\n",
536 if (fdata->worker_core[lcore_id])
540 lcore_id = rte_lcore_id();
542 if (core_in_use(lcore_id))
543 fdata->cap.worker(&worker_data[worker_idx++]);
545 rte_eal_mp_wait_lcore();
548 rte_event_dev_dump(dev_id, stdout);
550 if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=
551 (uint64_t)-ENOTSUP)) {
552 printf("\nPort Workload distribution:\n");
554 uint64_t tot_pkts = 0;
555 uint64_t pkts_per_wkr[RTE_MAX_LCORE] = {0};
556 for (i = 0; i < cdata.num_workers; i++) {
558 port_stat(dev_id, worker_data[i].port_id);
559 tot_pkts += pkts_per_wkr[i];
561 for (i = 0; i < cdata.num_workers; i++) {
562 float pc = pkts_per_wkr[i] * 100 /
564 printf("worker %i :\t%.1f %% (%"PRIu64" pkts)\n",
565 i, pc, pkts_per_wkr[i]);