1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
11 #include "pipeline_common.h"
13 struct config_data cdata = {
14 .num_packets = (1L << 25), /* do ~32M packets */
16 .queue_type = RTE_SCHED_TYPE_ATOMIC,
24 core_in_use(unsigned int lcore_id) {
25 return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] ||
26 fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);
30 eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
33 int port_id = (uintptr_t) userdata;
34 unsigned int _sent = 0;
37 /* Note: hard-coded TX queue */
38 _sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent],
40 } while (_sent != unsent);
44 * Parse the coremask given as argument (hexadecimal string) and fill
45 * the global configuration (core role and core count) with the parsed
48 static int xdigit2val(unsigned char c)
62 parse_coremask(const char *coremask)
65 unsigned int count = 0;
69 const int32_t BITS_HEX = 4;
73 /* Remove all blank characters ahead and after .
74 * Remove 0x/0X if exists.
76 while (isblank(*coremask))
78 if (coremask[0] == '0' && ((coremask[1] == 'x')
79 || (coremask[1] == 'X')))
82 while ((i > 0) && isblank(coremask[i - 1]))
87 for (i = i - 1; i >= 0 && idx < MAX_NUM_CORE; i--) {
89 if (isxdigit(c) == 0) {
90 /* invalid characters */
94 for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) {
102 if (coremask[i] != '0')
109 static struct option long_options[] = {
110 {"workers", required_argument, 0, 'w'},
111 {"packets", required_argument, 0, 'n'},
112 {"atomic-flows", required_argument, 0, 'f'},
113 {"num_stages", required_argument, 0, 's'},
114 {"rx-mask", required_argument, 0, 'r'},
115 {"tx-mask", required_argument, 0, 't'},
116 {"sched-mask", required_argument, 0, 'e'},
117 {"cq-depth", required_argument, 0, 'c'},
118 {"work-cycles", required_argument, 0, 'W'},
119 {"queue-priority", no_argument, 0, 'P'},
120 {"parallel", no_argument, 0, 'p'},
121 {"ordered", no_argument, 0, 'o'},
122 {"quiet", no_argument, 0, 'q'},
123 {"dump", no_argument, 0, 'D'},
130 const char *usage_str =
131 " Usage: eventdev_demo [options]\n"
133 " -n, --packets=N Send N packets (default ~32M), 0 implies no limit\n"
134 " -f, --atomic-flows=N Use N random flows from 1 to N (default 16)\n"
135 " -s, --num_stages=N Use N atomic stages (default 1)\n"
136 " -r, --rx-mask=core mask Run NIC rx on CPUs in core mask\n"
137 " -w, --worker-mask=core mask Run worker on CPUs in core mask\n"
138 " -t, --tx-mask=core mask Run NIC tx on CPUs in core mask\n"
139 " -e --sched-mask=core mask Run scheduler on CPUs in core mask\n"
140 " -c --cq-depth=N Worker CQ depth (default 16)\n"
141 " -W --work-cycles=N Worker cycles (default 0)\n"
142 " -P --queue-priority Enable scheduler queue prioritization\n"
143 " -o, --ordered Use ordered scheduling\n"
144 " -p, --parallel Use parallel scheduling\n"
145 " -q, --quiet Minimize printed output\n"
146 " -D, --dump Print detailed statistics before exit"
148 fprintf(stderr, "%s", usage_str);
153 parse_app_args(int argc, char **argv)
155 /* Parse cli options*/
159 uint64_t rx_lcore_mask = 0;
160 uint64_t tx_lcore_mask = 0;
161 uint64_t sched_lcore_mask = 0;
162 uint64_t worker_lcore_mask = 0;
166 c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:poPqDW:",
167 long_options, &option_index);
174 cdata.num_packets = (int64_t)atol(optarg);
175 if (cdata.num_packets == 0)
176 cdata.num_packets = INT64_MAX;
179 cdata.num_fids = (unsigned int)atoi(optarg);
182 cdata.num_stages = (unsigned int)atoi(optarg);
185 cdata.worker_cq_depth = (unsigned int)atoi(optarg);
188 cdata.worker_cycles = (unsigned int)atoi(optarg);
191 cdata.enable_queue_priorities = 1;
194 cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
197 cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
206 worker_lcore_mask = parse_coremask(optarg);
209 rx_lcore_mask = parse_coremask(optarg);
210 popcnt = __builtin_popcountll(rx_lcore_mask);
211 fdata->rx_single = (popcnt == 1);
214 tx_lcore_mask = parse_coremask(optarg);
215 popcnt = __builtin_popcountll(tx_lcore_mask);
216 fdata->tx_single = (popcnt == 1);
219 sched_lcore_mask = parse_coremask(optarg);
220 popcnt = __builtin_popcountll(sched_lcore_mask);
221 fdata->sched_single = (popcnt == 1);
228 cdata.worker_lcore_mask = worker_lcore_mask;
229 cdata.sched_lcore_mask = sched_lcore_mask;
230 cdata.rx_lcore_mask = rx_lcore_mask;
231 cdata.tx_lcore_mask = tx_lcore_mask;
233 if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
236 for (i = 0; i < MAX_NUM_CORE; i++) {
237 fdata->rx_core[i] = !!(rx_lcore_mask & (1UL << i));
238 fdata->tx_core[i] = !!(tx_lcore_mask & (1UL << i));
239 fdata->sched_core[i] = !!(sched_lcore_mask & (1UL << i));
240 fdata->worker_core[i] = !!(worker_lcore_mask & (1UL << i));
242 if (fdata->worker_core[i])
245 cdata.active_cores++;
250 * Initializes a given port using global settings and with the RX buffers
251 * coming from the mbuf_pool passed as a parameter.
254 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
256 static const struct rte_eth_conf port_conf_default = {
258 .mq_mode = ETH_MQ_RX_RSS,
259 .max_rx_pkt_len = ETHER_MAX_LEN,
260 .ignore_offload_bitfield = 1,
264 .rss_hf = ETH_RSS_IP |
270 const uint16_t rx_rings = 1, tx_rings = 1;
271 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
272 struct rte_eth_conf port_conf = port_conf_default;
275 struct rte_eth_dev_info dev_info;
276 struct rte_eth_txconf txconf;
278 if (port >= rte_eth_dev_count())
281 rte_eth_dev_info_get(port, &dev_info);
282 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
283 port_conf.txmode.offloads |=
284 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
286 /* Configure the Ethernet device. */
287 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
291 /* Allocate and set up 1 RX queue per Ethernet port. */
292 for (q = 0; q < rx_rings; q++) {
293 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
294 rte_eth_dev_socket_id(port), NULL, mbuf_pool);
299 txconf = dev_info.default_txconf;
300 txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
301 txconf.offloads = port_conf_default.txmode.offloads;
302 /* Allocate and set up 1 TX queue per Ethernet port. */
303 for (q = 0; q < tx_rings; q++) {
304 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
305 rte_eth_dev_socket_id(port), &txconf);
310 /* Start the Ethernet port. */
311 retval = rte_eth_dev_start(port);
315 /* Display the port MAC address. */
316 struct ether_addr addr;
317 rte_eth_macaddr_get(port, &addr);
318 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
319 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
321 addr.addr_bytes[0], addr.addr_bytes[1],
322 addr.addr_bytes[2], addr.addr_bytes[3],
323 addr.addr_bytes[4], addr.addr_bytes[5]);
325 /* Enable RX in promiscuous mode for the Ethernet device. */
326 rte_eth_promiscuous_enable(port);
332 init_ports(unsigned int num_ports)
337 struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
338 /* mbufs */ 16384 * num_ports,
339 /* cache_size */ 512,
341 /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
344 for (portid = 0; portid < num_ports; portid++)
345 if (port_init(portid, mp) != 0)
346 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
349 for (i = 0; i < num_ports; i++) {
350 void *userdata = (void *)(uintptr_t) i;
352 rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
353 if (fdata->tx_buf[i] == NULL)
354 rte_panic("Out of memory\n");
355 rte_eth_tx_buffer_init(fdata->tx_buf[i], 32);
356 rte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i],
365 do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
368 uint8_t mt_unsafe = 0;
371 for (i = 0; i < nb_ethdev; i++) {
372 struct rte_eth_dev_info dev_info;
373 memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
375 rte_eth_dev_info_get(i, &dev_info);
376 /* Check if it is safe ask worker to tx. */
377 mt_unsafe |= !(dev_info.tx_offload_capa &
378 DEV_TX_OFFLOAD_MT_LOCKFREE);
381 struct rte_event_dev_info eventdev_info;
382 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
384 rte_event_dev_info_get(eventdev_id, &eventdev_info);
385 burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :
389 set_worker_generic_setup_data(&fdata->cap, burst);
391 set_worker_tx_setup_data(&fdata->cap, burst);
395 signal_handler(int signum)
398 rte_exit(1, "Exiting on signal %d\n", signum);
399 if (signum == SIGINT || signum == SIGTERM) {
400 printf("\n\nSignal %d received, preparing to exit...\n",
404 if (signum == SIGTSTP)
405 rte_event_dev_dump(0, stdout);
408 static inline uint64_t
409 port_stat(int dev_id, int32_t p)
412 snprintf(statname, sizeof(statname), "port_%u_rx", p);
413 return rte_event_dev_xstats_by_name_get(dev_id, statname, NULL);
417 main(int argc, char **argv)
419 struct worker_data *worker_data;
420 unsigned int num_ports;
424 signal(SIGINT, signal_handler);
425 signal(SIGTERM, signal_handler);
426 signal(SIGTSTP, signal_handler);
428 err = rte_eal_init(argc, argv);
430 rte_panic("Invalid EAL arguments\n");
435 fdata = rte_malloc(NULL, sizeof(struct fastpath_data), 0);
437 rte_panic("Out of memory\n");
439 /* Parse cli options*/
440 parse_app_args(argc, argv);
442 num_ports = rte_eth_dev_count();
444 rte_panic("No ethernet ports found\n");
446 const unsigned int cores_needed = cdata.active_cores;
449 printf(" Config:\n");
450 printf("\tports: %u\n", num_ports);
451 printf("\tworkers: %u\n", cdata.num_workers);
452 printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
453 printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
454 if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
455 printf("\tqid0 type: ordered\n");
456 if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
457 printf("\tqid0 type: atomic\n");
458 printf("\tCores available: %u\n", rte_lcore_count());
459 printf("\tCores used: %u\n", cores_needed);
462 if (rte_lcore_count() < cores_needed)
463 rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(),
466 const unsigned int ndevs = rte_event_dev_count();
468 rte_panic("No dev_id devs found. Pasl in a --vdev eventdev.\n");
470 fprintf(stderr, "Warning: More than one eventdev, using idx 0");
473 do_capability_setup(num_ports, 0);
474 fdata->cap.check_opt();
476 worker_data = rte_calloc(0, cdata.num_workers,
477 sizeof(worker_data[0]), 0);
478 if (worker_data == NULL)
479 rte_panic("rte_calloc failed\n");
481 int dev_id = fdata->cap.evdev_setup(&cons_data, worker_data);
483 rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
485 init_ports(num_ports);
486 fdata->cap.adptr_setup(num_ports);
489 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
490 if (lcore_id >= MAX_NUM_CORE)
493 if (!fdata->rx_core[lcore_id] &&
494 !fdata->worker_core[lcore_id] &&
495 !fdata->tx_core[lcore_id] &&
496 !fdata->sched_core[lcore_id])
499 if (fdata->rx_core[lcore_id])
501 "[%s()] lcore %d executing NIC Rx\n",
504 if (fdata->tx_core[lcore_id])
506 "[%s()] lcore %d executing NIC Tx, and using eventdev port %u\n",
507 __func__, lcore_id, cons_data.port_id);
509 if (fdata->sched_core[lcore_id])
510 printf("[%s()] lcore %d executing scheduler\n",
513 if (fdata->worker_core[lcore_id])
515 "[%s()] lcore %d executing worker, using eventdev port %u\n",
517 worker_data[worker_idx].port_id);
519 err = rte_eal_remote_launch(fdata->cap.worker,
520 &worker_data[worker_idx], lcore_id);
522 rte_panic("Failed to launch worker on core %d\n",
526 if (fdata->worker_core[lcore_id])
530 lcore_id = rte_lcore_id();
532 if (core_in_use(lcore_id))
533 fdata->cap.worker(&worker_data[worker_idx++]);
535 rte_eal_mp_wait_lcore();
538 rte_event_dev_dump(dev_id, stdout);
540 if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=
541 (uint64_t)-ENOTSUP)) {
542 printf("\nPort Workload distribution:\n");
544 uint64_t tot_pkts = 0;
545 uint64_t pkts_per_wkr[RTE_MAX_LCORE] = {0};
546 for (i = 0; i < cdata.num_workers; i++) {
548 port_stat(dev_id, worker_data[i].port_id);
549 tot_pkts += pkts_per_wkr[i];
551 for (i = 0; i < cdata.num_workers; i++) {
552 float pc = pkts_per_wkr[i] * 100 /
554 printf("worker %i :\t%.1f %% (%"PRIu64" pkts)\n",
555 i, pc, pkts_per_wkr[i]);