4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_mempool.h>
71 #include <rte_string_fns.h>
75 struct app_params app;
77 static const char usage[] =
79 " load_balancer <EAL PARAMS> -- <APP PARAMS> \n"
81 "Application manadatory parameters: \n"
82 " --rx \"(PORT, QUEUE, LCORE), ...\" : List of NIC RX ports and queues \n"
83 " handled by the I/O RX lcores \n"
84 " --tx \"(PORT, LCORE), ...\" : List of NIC TX ports handled by the I/O TX \n"
86 " --w \"LCORE, ...\" : List of the worker lcores \n"
87 " --lpm \"IP / PREFIX => PORT; ...\" : List of LPM rules used by the worker \n"
88 " lcores for packet forwarding \n"
90 "Application optional parameters: \n"
91 " --rsz \"A, B, C, D\" : Ring sizes \n"
92 " A = Size (in number of buffer descriptors) of each of the NIC RX \n"
93 " rings read by the I/O RX lcores (default value is %u) \n"
94 " B = Size (in number of elements) of each of the SW rings used by the\n"
95 " I/O RX lcores to send packets to worker lcores (default value is\n"
97 " C = Size (in number of elements) of each of the SW rings used by the\n"
98 " worker lcores to send packets to I/O TX lcores (default value is\n"
100 " D = Size (in number of buffer descriptors) of each of the NIC TX \n"
101 " rings written by I/O TX lcores (default value is %u) \n"
102 " --bsz \"(A, B), (C, D), (E, F)\" : Burst sizes \n"
103 " A = I/O RX lcore read burst size from NIC RX (default value is %u) \n"
104 " B = I/O RX lcore write burst size to output SW rings (default value \n"
106 " C = Worker lcore read burst size from input SW rings (default value \n"
108 " D = Worker lcore write burst size to output SW rings (default value \n"
110 " E = I/O TX lcore read burst size from input SW rings (default value \n"
112 " F = I/O TX lcore write burst size to NIC TX (default value is %u) \n"
113 " --pos-lb POS : Position of the 1-byte field within the input packet used by\n"
114 " the I/O RX lcores to identify the worker lcore for the current \n"
115 " packet (default value is %u) \n";
118 app_print_usage(void)
121 APP_DEFAULT_NIC_RX_RING_SIZE,
122 APP_DEFAULT_RING_RX_SIZE,
123 APP_DEFAULT_RING_TX_SIZE,
124 APP_DEFAULT_NIC_TX_RING_SIZE,
125 APP_DEFAULT_BURST_SIZE_IO_RX_READ,
126 APP_DEFAULT_BURST_SIZE_IO_RX_WRITE,
127 APP_DEFAULT_BURST_SIZE_WORKER_READ,
128 APP_DEFAULT_BURST_SIZE_WORKER_WRITE,
129 APP_DEFAULT_BURST_SIZE_IO_TX_READ,
130 APP_DEFAULT_BURST_SIZE_IO_TX_WRITE,
131 APP_DEFAULT_IO_RX_LB_POS
135 #ifndef APP_ARG_RX_MAX_CHARS
136 #define APP_ARG_RX_MAX_CHARS 4096
139 #ifndef APP_ARG_RX_MAX_TUPLES
140 #define APP_ARG_RX_MAX_TUPLES 128
144 str_to_unsigned_array(
145 const char *s, size_t sbuflen,
151 char *splits[num_vals];
153 int i, num_splits = 0;
155 /* copy s so we don't modify original string */
156 snprintf(str, sizeof(str), "%s", s);
157 num_splits = rte_strsplit(str, sizeof(str), splits, num_vals, separator);
160 for (i = 0; i < num_splits; i++) {
161 vals[i] = strtoul(splits[i], &endptr, 0);
162 if (errno != 0 || *endptr != '\0')
170 str_to_unsigned_vals(
174 unsigned num_vals, ...)
176 unsigned i, vals[num_vals];
179 num_vals = str_to_unsigned_array(s, sbuflen, separator, num_vals, vals);
181 va_start(ap, num_vals);
182 for (i = 0; i < num_vals; i++) {
183 unsigned *u = va_arg(ap, unsigned *);
191 parse_arg_rx(const char *arg)
193 const char *p0 = arg, *p = arg;
196 if (strnlen(arg, APP_ARG_RX_MAX_CHARS + 1) == APP_ARG_RX_MAX_CHARS + 1) {
201 while ((p = strchr(p0,'(')) != NULL) {
202 struct app_lcore_params *lp;
203 uint32_t port, queue, lcore, i;
205 p0 = strchr(p++, ')');
207 (str_to_unsigned_vals(p, p0 - p, ',', 3, &port, &queue, &lcore) != 3)) {
211 /* Enable port and queue for later initialization */
212 if ((port >= APP_MAX_NIC_PORTS) || (queue >= APP_MAX_RX_QUEUES_PER_NIC_PORT)) {
215 if (app.nic_rx_queue_mask[port][queue] != 0) {
218 app.nic_rx_queue_mask[port][queue] = 1;
220 /* Check and assign (port, queue) to I/O lcore */
221 if (rte_lcore_is_enabled(lcore) == 0) {
225 if (lcore >= APP_MAX_LCORES) {
228 lp = &app.lcore_params[lcore];
229 if (lp->type == e_APP_LCORE_WORKER) {
232 lp->type = e_APP_LCORE_IO;
233 const size_t n_queues = RTE_MIN(lp->io.rx.n_nic_queues,
234 RTE_DIM(lp->io.rx.nic_queues));
235 for (i = 0; i < n_queues; i ++) {
236 if ((lp->io.rx.nic_queues[i].port == port) &&
237 (lp->io.rx.nic_queues[i].queue == queue)) {
241 if (lp->io.rx.n_nic_queues >= APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE) {
244 lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].port = (uint8_t) port;
245 lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].queue = (uint8_t) queue;
246 lp->io.rx.n_nic_queues ++;
249 if (n_tuples > APP_ARG_RX_MAX_TUPLES) {
261 #ifndef APP_ARG_TX_MAX_CHARS
262 #define APP_ARG_TX_MAX_CHARS 4096
265 #ifndef APP_ARG_TX_MAX_TUPLES
266 #define APP_ARG_TX_MAX_TUPLES 128
270 parse_arg_tx(const char *arg)
272 const char *p0 = arg, *p = arg;
275 if (strnlen(arg, APP_ARG_TX_MAX_CHARS + 1) == APP_ARG_TX_MAX_CHARS + 1) {
280 while ((p = strchr(p0,'(')) != NULL) {
281 struct app_lcore_params *lp;
282 uint32_t port, lcore, i;
284 p0 = strchr(p++, ')');
286 (str_to_unsigned_vals(p, p0 - p, ',', 2, &port, &lcore) != 2)) {
290 /* Enable port and queue for later initialization */
291 if (port >= APP_MAX_NIC_PORTS) {
294 if (app.nic_tx_port_mask[port] != 0) {
297 app.nic_tx_port_mask[port] = 1;
299 /* Check and assign (port, queue) to I/O lcore */
300 if (rte_lcore_is_enabled(lcore) == 0) {
304 if (lcore >= APP_MAX_LCORES) {
307 lp = &app.lcore_params[lcore];
308 if (lp->type == e_APP_LCORE_WORKER) {
311 lp->type = e_APP_LCORE_IO;
312 const size_t n_ports = RTE_MIN(lp->io.tx.n_nic_ports,
313 RTE_DIM(lp->io.tx.nic_ports));
314 for (i = 0; i < n_ports; i ++) {
315 if (lp->io.tx.nic_ports[i] == port) {
319 if (lp->io.tx.n_nic_ports >= APP_MAX_NIC_TX_PORTS_PER_IO_LCORE) {
322 lp->io.tx.nic_ports[lp->io.tx.n_nic_ports] = (uint8_t) port;
323 lp->io.tx.n_nic_ports ++;
326 if (n_tuples > APP_ARG_TX_MAX_TUPLES) {
338 #ifndef APP_ARG_W_MAX_CHARS
339 #define APP_ARG_W_MAX_CHARS 4096
342 #ifndef APP_ARG_W_MAX_TUPLES
343 #define APP_ARG_W_MAX_TUPLES APP_MAX_WORKER_LCORES
347 parse_arg_w(const char *arg)
352 if (strnlen(arg, APP_ARG_W_MAX_CHARS + 1) == APP_ARG_W_MAX_CHARS + 1) {
358 struct app_lcore_params *lp;
362 lcore = strtoul(p, NULL, 0);
367 /* Check and enable worker lcore */
368 if (rte_lcore_is_enabled(lcore) == 0) {
372 if (lcore >= APP_MAX_LCORES) {
375 lp = &app.lcore_params[lcore];
376 if (lp->type == e_APP_LCORE_IO) {
379 lp->type = e_APP_LCORE_WORKER;
382 if (n_tuples > APP_ARG_W_MAX_TUPLES) {
397 if ((n_tuples & (n_tuples - 1)) != 0) {
404 #ifndef APP_ARG_LPM_MAX_CHARS
405 #define APP_ARG_LPM_MAX_CHARS 4096
409 parse_arg_lpm(const char *arg)
411 const char *p = arg, *p0;
413 if (strnlen(arg, APP_ARG_LPM_MAX_CHARS + 1) == APP_ARG_TX_MAX_CHARS + 1) {
418 uint32_t ip_a, ip_b, ip_c, ip_d, ip, depth, if_out;
423 (str_to_unsigned_vals(p, p0 - p, '.', 4, &ip_a, &ip_b, &ip_c, &ip_d) != 4)) {
429 depth = strtoul(p, &endptr, 0);
430 if (errno != 0 || *endptr != '=') {
437 if_out = strtoul(++p, &endptr, 0);
438 if (errno != 0 || (*endptr != '\0' && *endptr != ';')) {
442 if ((ip_a >= 256) || (ip_b >= 256) || (ip_c >= 256) || (ip_d >= 256) ||
443 (depth == 0) || (depth >= 32) ||
444 (if_out >= APP_MAX_NIC_PORTS)) {
447 ip = (ip_a << 24) | (ip_b << 16) | (ip_c << 8) | ip_d;
449 if (app.n_lpm_rules >= APP_MAX_LPM_RULES) {
452 app.lpm_rules[app.n_lpm_rules].ip = ip;
453 app.lpm_rules[app.n_lpm_rules].depth = (uint8_t) depth;
454 app.lpm_rules[app.n_lpm_rules].if_out = (uint8_t) if_out;
464 if (app.n_lpm_rules == 0) {
472 app_check_lpm_table(void)
476 /* For each rule, check that the output I/F is enabled */
477 for (rule = 0; rule < app.n_lpm_rules; rule ++)
479 uint32_t port = app.lpm_rules[rule].if_out;
481 if (app.nic_tx_port_mask[port] == 0) {
490 app_check_every_rx_port_is_tx_enabled(void)
494 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
495 if ((app_get_nic_rx_queues_per_port(port) > 0) && (app.nic_tx_port_mask[port] == 0)) {
503 #ifndef APP_ARG_RSZ_CHARS
504 #define APP_ARG_RSZ_CHARS 63
508 parse_arg_rsz(const char *arg)
510 if (strnlen(arg, APP_ARG_RSZ_CHARS + 1) == APP_ARG_RSZ_CHARS + 1) {
514 if (str_to_unsigned_vals(arg, APP_ARG_RSZ_CHARS, ',', 4,
515 &app.nic_rx_ring_size,
518 &app.nic_tx_ring_size) != 4)
522 if ((app.nic_rx_ring_size == 0) ||
523 (app.nic_tx_ring_size == 0) ||
524 (app.ring_rx_size == 0) ||
525 (app.ring_tx_size == 0)) {
532 #ifndef APP_ARG_BSZ_CHARS
533 #define APP_ARG_BSZ_CHARS 63
537 parse_arg_bsz(const char *arg)
539 const char *p = arg, *p0;
540 if (strnlen(arg, APP_ARG_BSZ_CHARS + 1) == APP_ARG_BSZ_CHARS + 1) {
544 p0 = strchr(p++, ')');
546 (str_to_unsigned_vals(p, p0 - p, ',', 2, &app.burst_size_io_rx_read, &app.burst_size_io_rx_write) != 2)) {
555 p0 = strchr(p++, ')');
557 (str_to_unsigned_vals(p, p0 - p, ',', 2, &app.burst_size_worker_read, &app.burst_size_worker_write) != 2)) {
566 p0 = strchr(p++, ')');
568 (str_to_unsigned_vals(p, p0 - p, ',', 2, &app.burst_size_io_tx_read, &app.burst_size_io_tx_write) != 2)) {
572 if ((app.burst_size_io_rx_read == 0) ||
573 (app.burst_size_io_rx_write == 0) ||
574 (app.burst_size_worker_read == 0) ||
575 (app.burst_size_worker_write == 0) ||
576 (app.burst_size_io_tx_read == 0) ||
577 (app.burst_size_io_tx_write == 0)) {
581 if ((app.burst_size_io_rx_read > APP_MBUF_ARRAY_SIZE) ||
582 (app.burst_size_io_rx_write > APP_MBUF_ARRAY_SIZE) ||
583 (app.burst_size_worker_read > APP_MBUF_ARRAY_SIZE) ||
584 (app.burst_size_worker_write > APP_MBUF_ARRAY_SIZE) ||
585 ((2 * app.burst_size_io_tx_read) > APP_MBUF_ARRAY_SIZE) ||
586 (app.burst_size_io_tx_write > APP_MBUF_ARRAY_SIZE)) {
593 #ifndef APP_ARG_NUMERICAL_SIZE_CHARS
594 #define APP_ARG_NUMERICAL_SIZE_CHARS 15
598 parse_arg_pos_lb(const char *arg)
603 if (strnlen(arg, APP_ARG_NUMERICAL_SIZE_CHARS + 1) == APP_ARG_NUMERICAL_SIZE_CHARS + 1) {
608 x = strtoul(arg, &endpt, 10);
609 if (errno != 0 || endpt == arg || *endpt != '\0'){
617 app.pos_lb = (uint8_t) x;
622 /* Parse the argument given in the command line of the application */
624 app_parse_args(int argc, char **argv)
629 char *prgname = argv[0];
630 static struct option lgopts[] = {
643 uint32_t arg_lpm = 0;
644 uint32_t arg_rsz = 0;
645 uint32_t arg_bsz = 0;
646 uint32_t arg_pos_lb = 0;
650 while ((opt = getopt_long(argc, argvopt, "",
651 lgopts, &option_index)) != EOF) {
656 if (!strcmp(lgopts[option_index].name, "rx")) {
658 ret = parse_arg_rx(optarg);
660 printf("Incorrect value for --rx argument (%d)\n", ret);
664 if (!strcmp(lgopts[option_index].name, "tx")) {
666 ret = parse_arg_tx(optarg);
668 printf("Incorrect value for --tx argument (%d)\n", ret);
672 if (!strcmp(lgopts[option_index].name, "w")) {
674 ret = parse_arg_w(optarg);
676 printf("Incorrect value for --w argument (%d)\n", ret);
680 if (!strcmp(lgopts[option_index].name, "lpm")) {
682 ret = parse_arg_lpm(optarg);
684 printf("Incorrect value for --lpm argument (%d)\n", ret);
688 if (!strcmp(lgopts[option_index].name, "rsz")) {
690 ret = parse_arg_rsz(optarg);
692 printf("Incorrect value for --rsz argument (%d)\n", ret);
696 if (!strcmp(lgopts[option_index].name, "bsz")) {
698 ret = parse_arg_bsz(optarg);
700 printf("Incorrect value for --bsz argument (%d)\n", ret);
704 if (!strcmp(lgopts[option_index].name, "pos-lb")) {
706 ret = parse_arg_pos_lb(optarg);
708 printf("Incorrect value for --pos-lb argument (%d)\n", ret);
719 /* Check that all mandatory arguments are provided */
720 if ((arg_rx == 0) || (arg_tx == 0) || (arg_w == 0) || (arg_lpm == 0)){
721 printf("Not all mandatory arguments are present\n");
725 /* Assign default values for the optional arguments not provided */
727 app.nic_rx_ring_size = APP_DEFAULT_NIC_RX_RING_SIZE;
728 app.nic_tx_ring_size = APP_DEFAULT_NIC_TX_RING_SIZE;
729 app.ring_rx_size = APP_DEFAULT_RING_RX_SIZE;
730 app.ring_tx_size = APP_DEFAULT_RING_TX_SIZE;
734 app.burst_size_io_rx_read = APP_DEFAULT_BURST_SIZE_IO_RX_READ;
735 app.burst_size_io_rx_write = APP_DEFAULT_BURST_SIZE_IO_RX_WRITE;
736 app.burst_size_io_tx_read = APP_DEFAULT_BURST_SIZE_IO_TX_READ;
737 app.burst_size_io_tx_write = APP_DEFAULT_BURST_SIZE_IO_TX_WRITE;
738 app.burst_size_worker_read = APP_DEFAULT_BURST_SIZE_WORKER_READ;
739 app.burst_size_worker_write = APP_DEFAULT_BURST_SIZE_WORKER_WRITE;
742 if (arg_pos_lb == 0) {
743 app.pos_lb = APP_DEFAULT_IO_RX_LB_POS;
746 /* Check cross-consistency of arguments */
747 if ((ret = app_check_lpm_table()) < 0) {
748 printf("At least one LPM rule is inconsistent (%d)\n", ret);
751 if (app_check_every_rx_port_is_tx_enabled() < 0) {
752 printf("On LPM lookup miss, packet is sent back on the input port.\n");
753 printf("At least one RX port is not enabled for TX.\n");
758 argv[optind - 1] = prgname;
761 optind = 0; /* reset getopt lib */
766 app_get_nic_rx_queues_per_port(uint8_t port)
770 if (port >= APP_MAX_NIC_PORTS) {
775 for (i = 0; i < APP_MAX_RX_QUEUES_PER_NIC_PORT; i ++) {
776 if (app.nic_rx_queue_mask[port][i] == 1) {
785 app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out)
789 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
790 struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
793 if (app.lcore_params[lcore].type != e_APP_LCORE_IO) {
797 const size_t n_queues = RTE_MIN(lp->rx.n_nic_queues,
798 RTE_DIM(lp->rx.nic_queues));
799 for (i = 0; i < n_queues; i ++) {
800 if ((lp->rx.nic_queues[i].port == port) &&
801 (lp->rx.nic_queues[i].queue == queue)) {
812 app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out)
816 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
817 struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
820 if (app.lcore_params[lcore].type != e_APP_LCORE_IO) {
824 const size_t n_ports = RTE_MIN(lp->tx.n_nic_ports,
825 RTE_DIM(lp->tx.nic_ports));
826 for (i = 0; i < n_ports; i ++) {
827 if (lp->tx.nic_ports[i] == port) {
838 app_is_socket_used(uint32_t socket)
842 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
843 if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) {
847 if (socket == rte_lcore_to_socket_id(lcore)) {
856 app_get_lcores_io_rx(void)
858 uint32_t lcore, count;
861 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
862 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
864 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
865 (lp_io->rx.n_nic_queues == 0)) {
876 app_get_lcores_worker(void)
878 uint32_t lcore, count;
881 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
882 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
889 if (count > APP_MAX_WORKER_LCORES) {
890 rte_panic("Algorithmic error (too many worker lcores)\n");
898 app_print_params(void)
900 unsigned port, queue, lcore, rule, i, j;
902 /* Print NIC RX configuration */
903 printf("NIC RX ports: ");
904 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
905 uint32_t n_rx_queues = app_get_nic_rx_queues_per_port((uint8_t) port);
907 if (n_rx_queues == 0) {
911 printf("%u (", port);
912 for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
913 if (app.nic_rx_queue_mask[port][queue] == 1) {
914 printf("%u ", queue);
921 /* Print I/O lcore RX params */
922 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
923 struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
925 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
926 (lp->rx.n_nic_queues == 0)) {
930 printf("I/O lcore %u (socket %u): ", lcore, rte_lcore_to_socket_id(lcore));
933 for (i = 0; i < lp->rx.n_nic_queues; i ++) {
935 (unsigned) lp->rx.nic_queues[i].port,
936 (unsigned) lp->rx.nic_queues[i].queue);
940 printf("Output rings ");
941 for (i = 0; i < lp->rx.n_rings; i ++) {
942 printf("%p ", lp->rx.rings[i]);
947 /* Print worker lcore RX params */
948 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
949 struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker;
951 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
955 printf("Worker lcore %u (socket %u) ID %u: ",
957 rte_lcore_to_socket_id(lcore),
958 (unsigned)lp->worker_id);
960 printf("Input rings ");
961 for (i = 0; i < lp->n_rings_in; i ++) {
962 printf("%p ", lp->rings_in[i]);
970 /* Print NIC TX configuration */
971 printf("NIC TX ports: ");
972 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
973 if (app.nic_tx_port_mask[port] == 1) {
979 /* Print I/O TX lcore params */
980 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
981 struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
982 uint32_t n_workers = app_get_lcores_worker();
984 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
985 (lp->tx.n_nic_ports == 0)) {
989 printf("I/O lcore %u (socket %u): ", lcore, rte_lcore_to_socket_id(lcore));
991 printf("Input rings per TX port ");
992 for (i = 0; i < lp->tx.n_nic_ports; i ++) {
993 port = lp->tx.nic_ports[i];
995 printf("%u (", port);
996 for (j = 0; j < n_workers; j ++) {
997 printf("%p ", lp->tx.rings[port][j]);
1006 /* Print worker lcore TX params */
1007 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
1008 struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker;
1010 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
1014 printf("Worker lcore %u (socket %u) ID %u: \n",
1016 rte_lcore_to_socket_id(lcore),
1017 (unsigned)lp->worker_id);
1019 printf("Output rings per TX port ");
1020 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
1021 if (lp->rings_out[port] != NULL) {
1022 printf("%u (%p) ", port, lp->rings_out[port]);
1029 /* Print LPM rules */
1030 printf("LPM rules: \n");
1031 for (rule = 0; rule < app.n_lpm_rules; rule ++) {
1032 uint32_t ip = app.lpm_rules[rule].ip;
1033 uint8_t depth = app.lpm_rules[rule].depth;
1034 uint8_t if_out = app.lpm_rules[rule].if_out;
1036 printf("\t%u: %u.%u.%u.%u/%u => %u;\n",
1038 (unsigned) (ip & 0xFF000000) >> 24,
1039 (unsigned) (ip & 0x00FF0000) >> 16,
1040 (unsigned) (ip & 0x0000FF00) >> 8,
1041 (unsigned) ip & 0x000000FF,
1048 printf("Ring sizes: NIC RX = %u; Worker in = %u; Worker out = %u; NIC TX = %u;\n",
1049 (unsigned) app.nic_rx_ring_size,
1050 (unsigned) app.ring_rx_size,
1051 (unsigned) app.ring_tx_size,
1052 (unsigned) app.nic_tx_ring_size);
1055 printf("Burst sizes: I/O RX (rd = %u, wr = %u); Worker (rd = %u, wr = %u); I/O TX (rd = %u, wr = %u)\n",
1056 (unsigned) app.burst_size_io_rx_read,
1057 (unsigned) app.burst_size_io_rx_write,
1058 (unsigned) app.burst_size_worker_read,
1059 (unsigned) app.burst_size_worker_write,
1060 (unsigned) app.burst_size_io_tx_read,
1061 (unsigned) app.burst_size_io_tx_write);