fa832ccd19fd91b5af97998ef5c7c1f7ed6fe55f
[dpdk.git] / examples / eventdev_pipeline_sw_pmd / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <getopt.h>
6 #include <stdint.h>
7 #include <stdio.h>
8 #include <signal.h>
9 #include <sched.h>
10 #include <stdbool.h>
11
12 #include <rte_eal.h>
13 #include <rte_mempool.h>
14 #include <rte_mbuf.h>
15 #include <rte_launch.h>
16 #include <rte_malloc.h>
17 #include <rte_random.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
20 #include <rte_eventdev.h>
21 #include <rte_service.h>
22
23 #define MAX_NUM_STAGES 8
24 #define BATCH_SIZE 16
25 #define MAX_NUM_CORE 64
26
27 struct prod_data {
28         uint8_t dev_id;
29         uint8_t port_id;
30         int32_t qid;
31         unsigned int num_nic_ports;
32 } __rte_cache_aligned;
33
34 struct cons_data {
35         uint8_t dev_id;
36         uint8_t port_id;
37         uint8_t release;
38 } __rte_cache_aligned;
39
40 static struct prod_data prod_data;
41 static struct cons_data cons_data;
42
43 struct worker_data {
44         uint8_t dev_id;
45         uint8_t port_id;
46 } __rte_cache_aligned;
47
48 struct fastpath_data {
49         volatile int done;
50         uint32_t rx_lock;
51         uint32_t tx_lock;
52         uint32_t sched_lock;
53         uint32_t evdev_service_id;
54         bool rx_single;
55         bool tx_single;
56         bool sched_single;
57         unsigned int rx_core[MAX_NUM_CORE];
58         unsigned int tx_core[MAX_NUM_CORE];
59         unsigned int sched_core[MAX_NUM_CORE];
60         unsigned int worker_core[MAX_NUM_CORE];
61         struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
62 };
63
64 static struct fastpath_data *fdata;
65
66 struct config_data {
67         unsigned int active_cores;
68         unsigned int num_workers;
69         int64_t num_packets;
70         unsigned int num_fids;
71         int queue_type;
72         int worker_cycles;
73         int enable_queue_priorities;
74         int quiet;
75         int dump_dev;
76         int dump_dev_signal;
77         unsigned int num_stages;
78         unsigned int worker_cq_depth;
79         int16_t next_qid[MAX_NUM_STAGES+2];
80         int16_t qid[MAX_NUM_STAGES];
81 };
82
83 static struct config_data cdata = {
84         .num_packets = (1L << 25), /* do ~32M packets */
85         .num_fids = 512,
86         .queue_type = RTE_SCHED_TYPE_ATOMIC,
87         .next_qid = {-1},
88         .qid = {-1},
89         .num_stages = 1,
90         .worker_cq_depth = 16
91 };
92
93 static bool
94 core_in_use(unsigned int lcore_id) {
95         return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] ||
96                 fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);
97 }
98
99 static void
100 eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
101                         void *userdata)
102 {
103         int port_id = (uintptr_t) userdata;
104         unsigned int _sent = 0;
105
106         do {
107                 /* Note: hard-coded TX queue */
108                 _sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent],
109                                           unsent - _sent);
110         } while (_sent != unsent);
111 }
112
113 static int
114 consumer(void)
115 {
116         const uint64_t freq_khz = rte_get_timer_hz() / 1000;
117         struct rte_event packets[BATCH_SIZE];
118
119         static uint64_t received;
120         static uint64_t last_pkts;
121         static uint64_t last_time;
122         static uint64_t start_time;
123         unsigned int i, j;
124         uint8_t dev_id = cons_data.dev_id;
125         uint8_t port_id = cons_data.port_id;
126
127         uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
128                         packets, RTE_DIM(packets), 0);
129
130         if (n == 0) {
131                 for (j = 0; j < rte_eth_dev_count(); j++)
132                         rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
133                 return 0;
134         }
135         if (start_time == 0)
136                 last_time = start_time = rte_get_timer_cycles();
137
138         received += n;
139         for (i = 0; i < n; i++) {
140                 uint8_t outport = packets[i].mbuf->port;
141                 rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
142                                 packets[i].mbuf);
143
144                 packets[i].op = RTE_EVENT_OP_RELEASE;
145         }
146
147         if (cons_data.release) {
148                 uint16_t nb_tx;
149
150                 nb_tx = rte_event_enqueue_burst(dev_id, port_id, packets, n);
151                 while (nb_tx < n)
152                         nb_tx += rte_event_enqueue_burst(dev_id, port_id,
153                                                          packets + nb_tx,
154                                                          n - nb_tx);
155         }
156
157         /* Print out mpps every 1<22 packets */
158         if (!cdata.quiet && received >= last_pkts + (1<<22)) {
159                 const uint64_t now = rte_get_timer_cycles();
160                 const uint64_t total_ms = (now - start_time) / freq_khz;
161                 const uint64_t delta_ms = (now - last_time) / freq_khz;
162                 uint64_t delta_pkts = received - last_pkts;
163
164                 printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
165                         "avg %.3f mpps [current %.3f mpps]\n",
166                                 received,
167                                 total_ms,
168                                 received / (total_ms * 1000.0),
169                                 delta_pkts / (delta_ms * 1000.0));
170                 last_pkts = received;
171                 last_time = now;
172         }
173
174         cdata.num_packets -= n;
175         if (cdata.num_packets <= 0)
176                 fdata->done = 1;
177
178         return 0;
179 }
180
181 static int
182 producer(void)
183 {
184         static uint8_t eth_port;
185         struct rte_mbuf *mbufs[BATCH_SIZE+2];
186         struct rte_event ev[BATCH_SIZE+2];
187         uint32_t i, num_ports = prod_data.num_nic_ports;
188         int32_t qid = prod_data.qid;
189         uint8_t dev_id = prod_data.dev_id;
190         uint8_t port_id = prod_data.port_id;
191         uint32_t prio_idx = 0;
192
193         const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE);
194         if (++eth_port == num_ports)
195                 eth_port = 0;
196         if (nb_rx == 0) {
197                 rte_pause();
198                 return 0;
199         }
200
201         for (i = 0; i < nb_rx; i++) {
202                 ev[i].flow_id = mbufs[i]->hash.rss;
203                 ev[i].op = RTE_EVENT_OP_NEW;
204                 ev[i].sched_type = cdata.queue_type;
205                 ev[i].queue_id = qid;
206                 ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
207                 ev[i].sub_event_type = 0;
208                 ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
209                 ev[i].mbuf = mbufs[i];
210                 RTE_SET_USED(prio_idx);
211         }
212
213         const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx);
214         if (nb_tx != nb_rx) {
215                 for (i = nb_tx; i < nb_rx; i++)
216                         rte_pktmbuf_free(mbufs[i]);
217         }
218
219         return 0;
220 }
221
222 static inline void
223 schedule_devices(unsigned int lcore_id)
224 {
225         if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
226             rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
227                 producer();
228                 rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
229         }
230
231         if (fdata->sched_core[lcore_id] && (fdata->sched_single ||
232             rte_atomic32_cmpset(&(fdata->sched_lock), 0, 1))) {
233                 rte_service_run_iter_on_app_lcore(fdata->evdev_service_id, 1);
234                 if (cdata.dump_dev_signal) {
235                         rte_event_dev_dump(0, stdout);
236                         cdata.dump_dev_signal = 0;
237                 }
238                 rte_atomic32_clear((rte_atomic32_t *)&(fdata->sched_lock));
239         }
240
241         if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
242             rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
243                 consumer();
244                 rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
245         }
246 }
247
248 static inline void
249 work(struct rte_mbuf *m)
250 {
251         struct ether_hdr *eth;
252         struct ether_addr addr;
253
254         /* change mac addresses on packet (to use mbuf data) */
255         /*
256          * FIXME Swap mac address properly and also handle the
257          * case for both odd and even number of stages that the
258          * addresses end up the same at the end of the pipeline
259          */
260         eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
261         ether_addr_copy(&eth->d_addr, &addr);
262         ether_addr_copy(&addr, &eth->d_addr);
263
264         /* do a number of cycles of work per packet */
265         volatile uint64_t start_tsc = rte_rdtsc();
266         while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
267                 rte_pause();
268 }
269
270 static int
271 worker(void *arg)
272 {
273         struct rte_event events[BATCH_SIZE];
274
275         struct worker_data *data = (struct worker_data *)arg;
276         uint8_t dev_id = data->dev_id;
277         uint8_t port_id = data->port_id;
278         size_t sent = 0, received = 0;
279         unsigned int lcore_id = rte_lcore_id();
280
281         while (!fdata->done) {
282                 uint16_t i;
283
284                 schedule_devices(lcore_id);
285
286                 if (!fdata->worker_core[lcore_id]) {
287                         rte_pause();
288                         continue;
289                 }
290
291                 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
292                                 events, RTE_DIM(events), 0);
293
294                 if (nb_rx == 0) {
295                         rte_pause();
296                         continue;
297                 }
298                 received += nb_rx;
299
300                 for (i = 0; i < nb_rx; i++) {
301
302                         /* The first worker stage does classification */
303                         if (events[i].queue_id == cdata.qid[0])
304                                 events[i].flow_id = events[i].mbuf->hash.rss
305                                                         % cdata.num_fids;
306
307                         events[i].queue_id = cdata.next_qid[events[i].queue_id];
308                         events[i].op = RTE_EVENT_OP_FORWARD;
309                         events[i].sched_type = cdata.queue_type;
310
311                         work(events[i].mbuf);
312                 }
313                 uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
314                                 events, nb_rx);
315                 while (nb_tx < nb_rx && !fdata->done)
316                         nb_tx += rte_event_enqueue_burst(dev_id, port_id,
317                                                         events + nb_tx,
318                                                         nb_rx - nb_tx);
319                 sent += nb_tx;
320         }
321
322         if (!cdata.quiet)
323                 printf("  worker %u thread done. RX=%zu TX=%zu\n",
324                                 rte_lcore_id(), received, sent);
325
326         return 0;
327 }
328
329 /*
330  * Parse the coremask given as argument (hexadecimal string) and fill
331  * the global configuration (core role and core count) with the parsed
332  * value.
333  */
334 static int xdigit2val(unsigned char c)
335 {
336         int val;
337
338         if (isdigit(c))
339                 val = c - '0';
340         else if (isupper(c))
341                 val = c - 'A' + 10;
342         else
343                 val = c - 'a' + 10;
344         return val;
345 }
346
347 static uint64_t
348 parse_coremask(const char *coremask)
349 {
350         int i, j, idx = 0;
351         unsigned int count = 0;
352         char c;
353         int val;
354         uint64_t mask = 0;
355         const int32_t BITS_HEX = 4;
356
357         if (coremask == NULL)
358                 return -1;
359         /* Remove all blank characters ahead and after .
360          * Remove 0x/0X if exists.
361          */
362         while (isblank(*coremask))
363                 coremask++;
364         if (coremask[0] == '0' && ((coremask[1] == 'x')
365                 || (coremask[1] == 'X')))
366                 coremask += 2;
367         i = strlen(coremask);
368         while ((i > 0) && isblank(coremask[i - 1]))
369                 i--;
370         if (i == 0)
371                 return -1;
372
373         for (i = i - 1; i >= 0 && idx < MAX_NUM_CORE; i--) {
374                 c = coremask[i];
375                 if (isxdigit(c) == 0) {
376                         /* invalid characters */
377                         return -1;
378                 }
379                 val = xdigit2val(c);
380                 for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) {
381                         if ((1 << j) & val) {
382                                 mask |= (1UL << idx);
383                                 count++;
384                         }
385                 }
386         }
387         for (; i >= 0; i--)
388                 if (coremask[i] != '0')
389                         return -1;
390         if (count == 0)
391                 return -1;
392         return mask;
393 }
394
395 static struct option long_options[] = {
396         {"workers", required_argument, 0, 'w'},
397         {"packets", required_argument, 0, 'n'},
398         {"atomic-flows", required_argument, 0, 'f'},
399         {"num_stages", required_argument, 0, 's'},
400         {"rx-mask", required_argument, 0, 'r'},
401         {"tx-mask", required_argument, 0, 't'},
402         {"sched-mask", required_argument, 0, 'e'},
403         {"cq-depth", required_argument, 0, 'c'},
404         {"work-cycles", required_argument, 0, 'W'},
405         {"queue-priority", no_argument, 0, 'P'},
406         {"parallel", no_argument, 0, 'p'},
407         {"ordered", no_argument, 0, 'o'},
408         {"quiet", no_argument, 0, 'q'},
409         {"dump", no_argument, 0, 'D'},
410         {0, 0, 0, 0}
411 };
412
413 static void
414 usage(void)
415 {
416         const char *usage_str =
417                 "  Usage: eventdev_demo [options]\n"
418                 "  Options:\n"
419                 "  -n, --packets=N              Send N packets (default ~32M), 0 implies no limit\n"
420                 "  -f, --atomic-flows=N         Use N random flows from 1 to N (default 16)\n"
421                 "  -s, --num_stages=N           Use N atomic stages (default 1)\n"
422                 "  -r, --rx-mask=core mask      Run NIC rx on CPUs in core mask\n"
423                 "  -w, --worker-mask=core mask  Run worker on CPUs in core mask\n"
424                 "  -t, --tx-mask=core mask      Run NIC tx on CPUs in core mask\n"
425                 "  -e  --sched-mask=core mask   Run scheduler on CPUs in core mask\n"
426                 "  -c  --cq-depth=N             Worker CQ depth (default 16)\n"
427                 "  -W  --work-cycles=N          Worker cycles (default 0)\n"
428                 "  -P  --queue-priority         Enable scheduler queue prioritization\n"
429                 "  -o, --ordered                Use ordered scheduling\n"
430                 "  -p, --parallel               Use parallel scheduling\n"
431                 "  -q, --quiet                  Minimize printed output\n"
432                 "  -D, --dump                   Print detailed statistics before exit"
433                 "\n";
434         fprintf(stderr, "%s", usage_str);
435         exit(1);
436 }
437
438 static void
439 parse_app_args(int argc, char **argv)
440 {
441         /* Parse cli options*/
442         int option_index;
443         int c;
444         opterr = 0;
445         uint64_t rx_lcore_mask = 0;
446         uint64_t tx_lcore_mask = 0;
447         uint64_t sched_lcore_mask = 0;
448         uint64_t worker_lcore_mask = 0;
449         int i;
450
451         for (;;) {
452                 c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:poPqDW:",
453                                 long_options, &option_index);
454                 if (c == -1)
455                         break;
456
457                 int popcnt = 0;
458                 switch (c) {
459                 case 'n':
460                         cdata.num_packets = (int64_t)atol(optarg);
461                         if (cdata.num_packets == 0)
462                                 cdata.num_packets = INT64_MAX;
463                         break;
464                 case 'f':
465                         cdata.num_fids = (unsigned int)atoi(optarg);
466                         break;
467                 case 's':
468                         cdata.num_stages = (unsigned int)atoi(optarg);
469                         break;
470                 case 'c':
471                         cdata.worker_cq_depth = (unsigned int)atoi(optarg);
472                         break;
473                 case 'W':
474                         cdata.worker_cycles = (unsigned int)atoi(optarg);
475                         break;
476                 case 'P':
477                         cdata.enable_queue_priorities = 1;
478                         break;
479                 case 'o':
480                         cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
481                         break;
482                 case 'p':
483                         cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
484                         break;
485                 case 'q':
486                         cdata.quiet = 1;
487                         break;
488                 case 'D':
489                         cdata.dump_dev = 1;
490                         break;
491                 case 'w':
492                         worker_lcore_mask = parse_coremask(optarg);
493                         break;
494                 case 'r':
495                         rx_lcore_mask = parse_coremask(optarg);
496                         popcnt = __builtin_popcountll(rx_lcore_mask);
497                         fdata->rx_single = (popcnt == 1);
498                         break;
499                 case 't':
500                         tx_lcore_mask = parse_coremask(optarg);
501                         popcnt = __builtin_popcountll(tx_lcore_mask);
502                         fdata->tx_single = (popcnt == 1);
503                         break;
504                 case 'e':
505                         sched_lcore_mask = parse_coremask(optarg);
506                         popcnt = __builtin_popcountll(sched_lcore_mask);
507                         fdata->sched_single = (popcnt == 1);
508                         break;
509                 default:
510                         usage();
511                 }
512         }
513
514         if (worker_lcore_mask == 0 || rx_lcore_mask == 0 ||
515             sched_lcore_mask == 0 || tx_lcore_mask == 0) {
516                 printf("Core part of pipeline was not assigned any cores. "
517                         "This will stall the pipeline, please check core masks "
518                         "(use -h for details on setting core masks):\n"
519                         "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
520                         "\n\tworkers: %"PRIu64"\n",
521                         rx_lcore_mask, tx_lcore_mask, sched_lcore_mask,
522                         worker_lcore_mask);
523                 rte_exit(-1, "Fix core masks\n");
524         }
525         if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
526                 usage();
527
528         for (i = 0; i < MAX_NUM_CORE; i++) {
529                 fdata->rx_core[i] = !!(rx_lcore_mask & (1UL << i));
530                 fdata->tx_core[i] = !!(tx_lcore_mask & (1UL << i));
531                 fdata->sched_core[i] = !!(sched_lcore_mask & (1UL << i));
532                 fdata->worker_core[i] = !!(worker_lcore_mask & (1UL << i));
533
534                 if (fdata->worker_core[i])
535                         cdata.num_workers++;
536                 if (core_in_use(i))
537                         cdata.active_cores++;
538         }
539 }
540
541 /*
542  * Initializes a given port using global settings and with the RX buffers
543  * coming from the mbuf_pool passed as a parameter.
544  */
545 static inline int
546 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
547 {
548         static const struct rte_eth_conf port_conf_default = {
549                 .rxmode = {
550                         .mq_mode = ETH_MQ_RX_RSS,
551                         .max_rx_pkt_len = ETHER_MAX_LEN,
552                         .ignore_offload_bitfield = 1,
553                 },
554                 .rx_adv_conf = {
555                         .rss_conf = {
556                                 .rss_hf = ETH_RSS_IP |
557                                           ETH_RSS_TCP |
558                                           ETH_RSS_UDP,
559                         }
560                 }
561         };
562         const uint16_t rx_rings = 1, tx_rings = 1;
563         const uint16_t rx_ring_size = 512, tx_ring_size = 512;
564         struct rte_eth_conf port_conf = port_conf_default;
565         int retval;
566         uint16_t q;
567         struct rte_eth_dev_info dev_info;
568         struct rte_eth_txconf txconf;
569
570         if (port >= rte_eth_dev_count())
571                 return -1;
572
573         rte_eth_dev_info_get(port, &dev_info);
574         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
575                 port_conf.txmode.offloads |=
576                         DEV_TX_OFFLOAD_MBUF_FAST_FREE;
577
578         /* Configure the Ethernet device. */
579         retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
580         if (retval != 0)
581                 return retval;
582
583         /* Allocate and set up 1 RX queue per Ethernet port. */
584         for (q = 0; q < rx_rings; q++) {
585                 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
586                                 rte_eth_dev_socket_id(port), NULL, mbuf_pool);
587                 if (retval < 0)
588                         return retval;
589         }
590
591         txconf = dev_info.default_txconf;
592         txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
593         txconf.offloads = port_conf_default.txmode.offloads;
594         /* Allocate and set up 1 TX queue per Ethernet port. */
595         for (q = 0; q < tx_rings; q++) {
596                 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
597                                 rte_eth_dev_socket_id(port), &txconf);
598                 if (retval < 0)
599                         return retval;
600         }
601
602         /* Start the Ethernet port. */
603         retval = rte_eth_dev_start(port);
604         if (retval < 0)
605                 return retval;
606
607         /* Display the port MAC address. */
608         struct ether_addr addr;
609         rte_eth_macaddr_get(port, &addr);
610         printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
611                            " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
612                         (unsigned int)port,
613                         addr.addr_bytes[0], addr.addr_bytes[1],
614                         addr.addr_bytes[2], addr.addr_bytes[3],
615                         addr.addr_bytes[4], addr.addr_bytes[5]);
616
617         /* Enable RX in promiscuous mode for the Ethernet device. */
618         rte_eth_promiscuous_enable(port);
619
620         return 0;
621 }
622
623 static int
624 init_ports(unsigned int num_ports)
625 {
626         uint8_t portid;
627         unsigned int i;
628
629         struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
630                         /* mbufs */ 16384 * num_ports,
631                         /* cache_size */ 512,
632                         /* priv_size*/ 0,
633                         /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
634                         rte_socket_id());
635
636         for (portid = 0; portid < num_ports; portid++)
637                 if (port_init(portid, mp) != 0)
638                         rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
639                                         portid);
640
641         for (i = 0; i < num_ports; i++) {
642                 void *userdata = (void *)(uintptr_t) i;
643                 fdata->tx_buf[i] =
644                         rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
645                 if (fdata->tx_buf[i] == NULL)
646                         rte_panic("Out of memory\n");
647                 rte_eth_tx_buffer_init(fdata->tx_buf[i], 32);
648                 rte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i],
649                                                    eth_tx_buffer_retry,
650                                                    userdata);
651         }
652
653         return 0;
654 }
655
656 struct port_link {
657         uint8_t queue_id;
658         uint8_t priority;
659 };
660
661 static int
662 setup_eventdev(struct prod_data *prod_data,
663                 struct cons_data *cons_data,
664                 struct worker_data *worker_data)
665 {
666         const uint8_t dev_id = 0;
667         /* +1 stages is for a SINGLE_LINK TX stage */
668         const uint8_t nb_queues = cdata.num_stages + 1;
669         /* + 2 is one port for producer and one for consumer */
670         const uint8_t nb_ports = cdata.num_workers + 2;
671         struct rte_event_dev_config config = {
672                         .nb_event_queues = nb_queues,
673                         .nb_event_ports = nb_ports,
674                         .nb_events_limit  = 4096,
675                         .nb_event_queue_flows = 1024,
676                         .nb_event_port_dequeue_depth = 128,
677                         .nb_event_port_enqueue_depth = 128,
678         };
679         struct rte_event_port_conf wkr_p_conf = {
680                         .dequeue_depth = cdata.worker_cq_depth,
681                         .enqueue_depth = 64,
682                         .new_event_threshold = 4096,
683         };
684         struct rte_event_queue_conf wkr_q_conf = {
685                         .schedule_type = cdata.queue_type,
686                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
687                         .nb_atomic_flows = 1024,
688                         .nb_atomic_order_sequences = 1024,
689         };
690         struct rte_event_port_conf tx_p_conf = {
691                         .dequeue_depth = 128,
692                         .enqueue_depth = 128,
693                         .new_event_threshold = 4096,
694         };
695         const struct rte_event_queue_conf tx_q_conf = {
696                         .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
697                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
698         };
699
700         struct port_link worker_queues[MAX_NUM_STAGES];
701         uint8_t disable_implicit_release;
702         struct port_link tx_queue;
703         unsigned int i;
704
705         int ret, ndev = rte_event_dev_count();
706         if (ndev < 1) {
707                 printf("%d: No Eventdev Devices Found\n", __LINE__);
708                 return -1;
709         }
710
711         struct rte_event_dev_info dev_info;
712         ret = rte_event_dev_info_get(dev_id, &dev_info);
713         printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
714
715         disable_implicit_release = (dev_info.event_dev_cap &
716                                     RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
717
718         wkr_p_conf.disable_implicit_release = disable_implicit_release;
719         tx_p_conf.disable_implicit_release = disable_implicit_release;
720
721         if (dev_info.max_event_port_dequeue_depth <
722                         config.nb_event_port_dequeue_depth)
723                 config.nb_event_port_dequeue_depth =
724                                 dev_info.max_event_port_dequeue_depth;
725         if (dev_info.max_event_port_enqueue_depth <
726                         config.nb_event_port_enqueue_depth)
727                 config.nb_event_port_enqueue_depth =
728                                 dev_info.max_event_port_enqueue_depth;
729
730         ret = rte_event_dev_configure(dev_id, &config);
731         if (ret < 0) {
732                 printf("%d: Error configuring device\n", __LINE__);
733                 return -1;
734         }
735
736         /* Q creation - one load balanced per pipeline stage*/
737         printf("  Stages:\n");
738         for (i = 0; i < cdata.num_stages; i++) {
739                 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
740                         printf("%d: error creating qid %d\n", __LINE__, i);
741                         return -1;
742                 }
743                 cdata.qid[i] = i;
744                 cdata.next_qid[i] = i+1;
745                 worker_queues[i].queue_id = i;
746                 if (cdata.enable_queue_priorities) {
747                         /* calculate priority stepping for each stage, leaving
748                          * headroom of 1 for the SINGLE_LINK TX below
749                          */
750                         const uint32_t prio_delta =
751                                 (RTE_EVENT_DEV_PRIORITY_LOWEST-1) /  nb_queues;
752
753                         /* higher priority for queues closer to tx */
754                         wkr_q_conf.priority =
755                                 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
756                 }
757
758                 const char *type_str = "Atomic";
759                 switch (wkr_q_conf.schedule_type) {
760                 case RTE_SCHED_TYPE_ORDERED:
761                         type_str = "Ordered";
762                         break;
763                 case RTE_SCHED_TYPE_PARALLEL:
764                         type_str = "Parallel";
765                         break;
766                 }
767                 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
768                                 wkr_q_conf.priority);
769         }
770         printf("\n");
771
772         /* final queue for sending to TX core */
773         if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
774                 printf("%d: error creating qid %d\n", __LINE__, i);
775                 return -1;
776         }
777         tx_queue.queue_id = i;
778         tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
779
780         if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
781                 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
782         if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
783                 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
784
785         /* set up one port per worker, linking to all stage queues */
786         for (i = 0; i < cdata.num_workers; i++) {
787                 struct worker_data *w = &worker_data[i];
788                 w->dev_id = dev_id;
789                 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
790                         printf("Error setting up port %d\n", i);
791                         return -1;
792                 }
793
794                 uint32_t s;
795                 for (s = 0; s < cdata.num_stages; s++) {
796                         if (rte_event_port_link(dev_id, i,
797                                                 &worker_queues[s].queue_id,
798                                                 &worker_queues[s].priority,
799                                                 1) != 1) {
800                                 printf("%d: error creating link for port %d\n",
801                                                 __LINE__, i);
802                                 return -1;
803                         }
804                 }
805                 w->port_id = i;
806         }
807
808         if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
809                 tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
810         if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
811                 tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
812
813         /* port for consumer, linked to TX queue */
814         if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
815                 printf("Error setting up port %d\n", i);
816                 return -1;
817         }
818         if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
819                                 &tx_queue.priority, 1) != 1) {
820                 printf("%d: error creating link for port %d\n",
821                                 __LINE__, i);
822                 return -1;
823         }
824         /* port for producer, no links */
825         struct rte_event_port_conf rx_p_conf = {
826                         .dequeue_depth = 8,
827                         .enqueue_depth = 8,
828                         .new_event_threshold = 1200,
829                         .disable_implicit_release = disable_implicit_release,
830         };
831
832         if (rx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
833                 rx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
834         if (rx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
835                 rx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
836
837         if (rte_event_port_setup(dev_id, i + 1, &rx_p_conf) < 0) {
838                 printf("Error setting up port %d\n", i);
839                 return -1;
840         }
841
842         *prod_data = (struct prod_data){.dev_id = dev_id,
843                                         .port_id = i + 1,
844                                         .qid = cdata.qid[0] };
845         *cons_data = (struct cons_data){.dev_id = dev_id,
846                                         .port_id = i,
847                                         .release = disable_implicit_release };
848
849         ret = rte_event_dev_service_id_get(dev_id,
850                                 &fdata->evdev_service_id);
851         if (ret != -ESRCH && ret != 0) {
852                 printf("Error getting the service ID for sw eventdev\n");
853                 return -1;
854         }
855         rte_service_runstate_set(fdata->evdev_service_id, 1);
856         rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
857         if (rte_event_dev_start(dev_id) < 0) {
858                 printf("Error starting eventdev\n");
859                 return -1;
860         }
861
862         return dev_id;
863 }
864
865 static void
866 signal_handler(int signum)
867 {
868         if (fdata->done)
869                 rte_exit(1, "Exiting on signal %d\n", signum);
870         if (signum == SIGINT || signum == SIGTERM) {
871                 printf("\n\nSignal %d received, preparing to exit...\n",
872                                 signum);
873                 fdata->done = 1;
874         }
875         if (signum == SIGTSTP)
876                 rte_event_dev_dump(0, stdout);
877 }
878
879 static inline uint64_t
880 port_stat(int dev_id, int32_t p)
881 {
882         char statname[64];
883         snprintf(statname, sizeof(statname), "port_%u_rx", p);
884         return rte_event_dev_xstats_by_name_get(dev_id, statname, NULL);
885 }
886
887 int
888 main(int argc, char **argv)
889 {
890         struct worker_data *worker_data;
891         unsigned int num_ports;
892         int lcore_id;
893         int err;
894
895         signal(SIGINT, signal_handler);
896         signal(SIGTERM, signal_handler);
897         signal(SIGTSTP, signal_handler);
898
899         err = rte_eal_init(argc, argv);
900         if (err < 0)
901                 rte_panic("Invalid EAL arguments\n");
902
903         argc -= err;
904         argv += err;
905
906         fdata = rte_malloc(NULL, sizeof(struct fastpath_data), 0);
907         if (fdata == NULL)
908                 rte_panic("Out of memory\n");
909
910         /* Parse cli options*/
911         parse_app_args(argc, argv);
912
913         num_ports = rte_eth_dev_count();
914         if (num_ports == 0)
915                 rte_panic("No ethernet ports found\n");
916
917         const unsigned int cores_needed = cdata.active_cores;
918
919         if (!cdata.quiet) {
920                 printf("  Config:\n");
921                 printf("\tports: %u\n", num_ports);
922                 printf("\tworkers: %u\n", cdata.num_workers);
923                 printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
924                 printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
925                 if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
926                         printf("\tqid0 type: ordered\n");
927                 if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
928                         printf("\tqid0 type: atomic\n");
929                 printf("\tCores available: %u\n", rte_lcore_count());
930                 printf("\tCores used: %u\n", cores_needed);
931         }
932
933         if (rte_lcore_count() < cores_needed)
934                 rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(),
935                                 cores_needed);
936
937         const unsigned int ndevs = rte_event_dev_count();
938         if (ndevs == 0)
939                 rte_panic("No dev_id devs found. Pasl in a --vdev eventdev.\n");
940         if (ndevs > 1)
941                 fprintf(stderr, "Warning: More than one eventdev, using idx 0");
942
943         worker_data = rte_calloc(0, cdata.num_workers,
944                         sizeof(worker_data[0]), 0);
945         if (worker_data == NULL)
946                 rte_panic("rte_calloc failed\n");
947
948         int dev_id = setup_eventdev(&prod_data, &cons_data, worker_data);
949         if (dev_id < 0)
950                 rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
951
952         prod_data.num_nic_ports = num_ports;
953         init_ports(num_ports);
954
955         int worker_idx = 0;
956         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
957                 if (lcore_id >= MAX_NUM_CORE)
958                         break;
959
960                 if (!fdata->rx_core[lcore_id] &&
961                         !fdata->worker_core[lcore_id] &&
962                         !fdata->tx_core[lcore_id] &&
963                         !fdata->sched_core[lcore_id])
964                         continue;
965
966                 if (fdata->rx_core[lcore_id])
967                         printf(
968                                 "[%s()] lcore %d executing NIC Rx, and using eventdev port %u\n",
969                                 __func__, lcore_id, prod_data.port_id);
970
971                 if (fdata->tx_core[lcore_id])
972                         printf(
973                                 "[%s()] lcore %d executing NIC Tx, and using eventdev port %u\n",
974                                 __func__, lcore_id, cons_data.port_id);
975
976                 if (fdata->sched_core[lcore_id])
977                         printf("[%s()] lcore %d executing scheduler\n",
978                                         __func__, lcore_id);
979
980                 if (fdata->worker_core[lcore_id])
981                         printf(
982                                 "[%s()] lcore %d executing worker, using eventdev port %u\n",
983                                 __func__, lcore_id,
984                                 worker_data[worker_idx].port_id);
985
986                 err = rte_eal_remote_launch(worker, &worker_data[worker_idx],
987                                             lcore_id);
988                 if (err) {
989                         rte_panic("Failed to launch worker on core %d\n",
990                                         lcore_id);
991                         continue;
992                 }
993                 if (fdata->worker_core[lcore_id])
994                         worker_idx++;
995         }
996
997         lcore_id = rte_lcore_id();
998
999         if (core_in_use(lcore_id))
1000                 worker(&worker_data[worker_idx++]);
1001
1002         rte_eal_mp_wait_lcore();
1003
1004         if (cdata.dump_dev)
1005                 rte_event_dev_dump(dev_id, stdout);
1006
1007         if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=
1008                         (uint64_t)-ENOTSUP)) {
1009                 printf("\nPort Workload distribution:\n");
1010                 uint32_t i;
1011                 uint64_t tot_pkts = 0;
1012                 uint64_t pkts_per_wkr[RTE_MAX_LCORE] = {0};
1013                 for (i = 0; i < cdata.num_workers; i++) {
1014                         pkts_per_wkr[i] =
1015                                 port_stat(dev_id, worker_data[i].port_id);
1016                         tot_pkts += pkts_per_wkr[i];
1017                 }
1018                 for (i = 0; i < cdata.num_workers; i++) {
1019                         float pc = pkts_per_wkr[i]  * 100 /
1020                                 ((float)tot_pkts);
1021                         printf("worker %i :\t%.1f %% (%"PRIu64" pkts)\n",
1022                                         i, pc, pkts_per_wkr[i]);
1023                 }
1024
1025         }
1026
1027         return 0;
1028 }