app/testpmd: support extended RSS offload types
[dpdk.git] / app / test-pmd / config.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2013-2014 6WIND S.A.
4  */
5
6 #include <stdarg.h>
7 #include <errno.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12
13 #include <sys/queue.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 #include <fcntl.h>
17 #include <unistd.h>
18
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
22 #include <rte_log.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_mbuf.h>
34 #include <rte_interrupts.h>
35 #include <rte_pci.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
40 #include <rte_flow.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
44 #endif
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
47 #endif
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
50 #endif
51 #include <rte_gro.h>
52 #include <rte_config.h>
53
54 #include "testpmd.h"
55
56 static char *flowtype_to_str(uint16_t flow_type);
57
58 static const struct {
59         enum tx_pkt_split split;
60         const char *name;
61 } tx_split_name[] = {
62         {
63                 .split = TX_PKT_SPLIT_OFF,
64                 .name = "off",
65         },
66         {
67                 .split = TX_PKT_SPLIT_ON,
68                 .name = "on",
69         },
70         {
71                 .split = TX_PKT_SPLIT_RND,
72                 .name = "rand",
73         },
74 };
75
76 const struct rss_type_info rss_type_table[] = {
77         { "all", ETH_RSS_IP | ETH_RSS_TCP |
78                         ETH_RSS_UDP | ETH_RSS_SCTP |
79                         ETH_RSS_L2_PAYLOAD },
80         { "none", 0 },
81         { "ipv4", ETH_RSS_IPV4 },
82         { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
83         { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
84         { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
85         { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
86         { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
87         { "ipv6", ETH_RSS_IPV6 },
88         { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
89         { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
90         { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
91         { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
92         { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
93         { "l2-payload", ETH_RSS_L2_PAYLOAD },
94         { "ipv6-ex", ETH_RSS_IPV6_EX },
95         { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
96         { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
97         { "port", ETH_RSS_PORT },
98         { "vxlan", ETH_RSS_VXLAN },
99         { "geneve", ETH_RSS_GENEVE },
100         { "nvgre", ETH_RSS_NVGRE },
101         { "ip", ETH_RSS_IP },
102         { "udp", ETH_RSS_UDP },
103         { "tcp", ETH_RSS_TCP },
104         { "sctp", ETH_RSS_SCTP },
105         { "tunnel", ETH_RSS_TUNNEL },
106         { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
107         { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
108         { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
109         { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
110         { NULL, 0 },
111 };
112
113 static void
114 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
115 {
116         char buf[RTE_ETHER_ADDR_FMT_SIZE];
117         rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
118         printf("%s%s", name, buf);
119 }
120
121 void
122 nic_stats_display(portid_t port_id)
123 {
124         static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
125         static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
126         static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
127         static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
128         static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
129         uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
130                                                                 diff_cycles;
131         uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
132         struct rte_eth_stats stats;
133         struct rte_port *port = &ports[port_id];
134         uint8_t i;
135
136         static const char *nic_stats_border = "########################";
137
138         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
139                 print_valid_ports();
140                 return;
141         }
142         rte_eth_stats_get(port_id, &stats);
143         printf("\n  %s NIC statistics for port %-2d %s\n",
144                nic_stats_border, port_id, nic_stats_border);
145
146         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
147                 printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
148                        "%-"PRIu64"\n",
149                        stats.ipackets, stats.imissed, stats.ibytes);
150                 printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
151                 printf("  RX-nombuf:  %-10"PRIu64"\n",
152                        stats.rx_nombuf);
153                 printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
154                        "%-"PRIu64"\n",
155                        stats.opackets, stats.oerrors, stats.obytes);
156         }
157         else {
158                 printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
159                        "    RX-bytes: %10"PRIu64"\n",
160                        stats.ipackets, stats.ierrors, stats.ibytes);
161                 printf("  RX-errors:  %10"PRIu64"\n", stats.ierrors);
162                 printf("  RX-nombuf:               %10"PRIu64"\n",
163                        stats.rx_nombuf);
164                 printf("  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
165                        "    TX-bytes: %10"PRIu64"\n",
166                        stats.opackets, stats.oerrors, stats.obytes);
167         }
168
169         if (port->rx_queue_stats_mapping_enabled) {
170                 printf("\n");
171                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
172                         printf("  Stats reg %2d RX-packets: %10"PRIu64
173                                "    RX-errors: %10"PRIu64
174                                "    RX-bytes: %10"PRIu64"\n",
175                                i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
176                 }
177         }
178         if (port->tx_queue_stats_mapping_enabled) {
179                 printf("\n");
180                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
181                         printf("  Stats reg %2d TX-packets: %10"PRIu64
182                                "                             TX-bytes: %10"PRIu64"\n",
183                                i, stats.q_opackets[i], stats.q_obytes[i]);
184                 }
185         }
186
187         diff_cycles = prev_cycles[port_id];
188         prev_cycles[port_id] = rte_rdtsc();
189         if (diff_cycles > 0)
190                 diff_cycles = prev_cycles[port_id] - diff_cycles;
191
192         diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
193                 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
194         diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
195                 (stats.opackets - prev_pkts_tx[port_id]) : 0;
196         prev_pkts_rx[port_id] = stats.ipackets;
197         prev_pkts_tx[port_id] = stats.opackets;
198         mpps_rx = diff_cycles > 0 ?
199                 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
200         mpps_tx = diff_cycles > 0 ?
201                 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
202
203         diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
204                 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
205         diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
206                 (stats.obytes - prev_bytes_tx[port_id]) : 0;
207         prev_bytes_rx[port_id] = stats.ibytes;
208         prev_bytes_tx[port_id] = stats.obytes;
209         mbps_rx = diff_cycles > 0 ?
210                 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0;
211         mbps_tx = diff_cycles > 0 ?
212                 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0;
213
214         printf("\n  Throughput (since last show)\n");
215         printf("  Rx-pps: %12"PRIu64"          Rx-bps: %12"PRIu64"\n  Tx-pps: %12"
216                PRIu64"          Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
217                mpps_tx, mbps_tx * 8);
218
219         printf("  %s############################%s\n",
220                nic_stats_border, nic_stats_border);
221 }
222
223 void
224 nic_stats_clear(portid_t port_id)
225 {
226         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
227                 print_valid_ports();
228                 return;
229         }
230         rte_eth_stats_reset(port_id);
231         printf("\n  NIC statistics for port %d cleared\n", port_id);
232 }
233
234 void
235 nic_xstats_display(portid_t port_id)
236 {
237         struct rte_eth_xstat *xstats;
238         int cnt_xstats, idx_xstat;
239         struct rte_eth_xstat_name *xstats_names;
240
241         printf("###### NIC extended statistics for port %-2d\n", port_id);
242         if (!rte_eth_dev_is_valid_port(port_id)) {
243                 printf("Error: Invalid port number %i\n", port_id);
244                 return;
245         }
246
247         /* Get count */
248         cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
249         if (cnt_xstats  < 0) {
250                 printf("Error: Cannot get count of xstats\n");
251                 return;
252         }
253
254         /* Get id-name lookup table */
255         xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
256         if (xstats_names == NULL) {
257                 printf("Cannot allocate memory for xstats lookup\n");
258                 return;
259         }
260         if (cnt_xstats != rte_eth_xstats_get_names(
261                         port_id, xstats_names, cnt_xstats)) {
262                 printf("Error: Cannot get xstats lookup\n");
263                 free(xstats_names);
264                 return;
265         }
266
267         /* Get stats themselves */
268         xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
269         if (xstats == NULL) {
270                 printf("Cannot allocate memory for xstats\n");
271                 free(xstats_names);
272                 return;
273         }
274         if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
275                 printf("Error: Unable to get xstats\n");
276                 free(xstats_names);
277                 free(xstats);
278                 return;
279         }
280
281         /* Display xstats */
282         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
283                 if (xstats_hide_zero && !xstats[idx_xstat].value)
284                         continue;
285                 printf("%s: %"PRIu64"\n",
286                         xstats_names[idx_xstat].name,
287                         xstats[idx_xstat].value);
288         }
289         free(xstats_names);
290         free(xstats);
291 }
292
293 void
294 nic_xstats_clear(portid_t port_id)
295 {
296         int ret;
297
298         ret = rte_eth_xstats_reset(port_id);
299         if (ret != 0) {
300                 printf("%s: Error: failed to reset xstats (port %u): %s",
301                        __func__, port_id, strerror(ret));
302         }
303 }
304
305 void
306 nic_stats_mapping_display(portid_t port_id)
307 {
308         struct rte_port *port = &ports[port_id];
309         uint16_t i;
310
311         static const char *nic_stats_mapping_border = "########################";
312
313         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
314                 print_valid_ports();
315                 return;
316         }
317
318         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
319                 printf("Port id %d - either does not support queue statistic mapping or"
320                        " no queue statistic mapping set\n", port_id);
321                 return;
322         }
323
324         printf("\n  %s NIC statistics mapping for port %-2d %s\n",
325                nic_stats_mapping_border, port_id, nic_stats_mapping_border);
326
327         if (port->rx_queue_stats_mapping_enabled) {
328                 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
329                         if (rx_queue_stats_mappings[i].port_id == port_id) {
330                                 printf("  RX-queue %2d mapped to Stats Reg %2d\n",
331                                        rx_queue_stats_mappings[i].queue_id,
332                                        rx_queue_stats_mappings[i].stats_counter_id);
333                         }
334                 }
335                 printf("\n");
336         }
337
338
339         if (port->tx_queue_stats_mapping_enabled) {
340                 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
341                         if (tx_queue_stats_mappings[i].port_id == port_id) {
342                                 printf("  TX-queue %2d mapped to Stats Reg %2d\n",
343                                        tx_queue_stats_mappings[i].queue_id,
344                                        tx_queue_stats_mappings[i].stats_counter_id);
345                         }
346                 }
347         }
348
349         printf("  %s####################################%s\n",
350                nic_stats_mapping_border, nic_stats_mapping_border);
351 }
352
353 static void
354 burst_mode_options_display(uint64_t options)
355 {
356         int offset;
357
358         while (options != 0) {
359                 offset = rte_bsf64(options);
360
361                 printf(" %s",
362                        rte_eth_burst_mode_option_name(1ULL << offset));
363
364                 options &= ~(1ULL << offset);
365         }
366 }
367
368 void
369 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
370 {
371         struct rte_eth_burst_mode mode;
372         struct rte_eth_rxq_info qinfo;
373         int32_t rc;
374         static const char *info_border = "*********************";
375
376         rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
377         if (rc != 0) {
378                 printf("Failed to retrieve information for port: %u, "
379                         "RX queue: %hu\nerror desc: %s(%d)\n",
380                         port_id, queue_id, strerror(-rc), rc);
381                 return;
382         }
383
384         printf("\n%s Infos for port %-2u, RX queue %-2u %s",
385                info_border, port_id, queue_id, info_border);
386
387         printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
388         printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
389         printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
390         printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
391         printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
392         printf("\nRX drop packets: %s",
393                 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
394         printf("\nRX deferred start: %s",
395                 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
396         printf("\nRX scattered packets: %s",
397                 (qinfo.scattered_rx != 0) ? "on" : "off");
398         printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
399
400         if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) {
401                 printf("\nBurst mode:");
402                 burst_mode_options_display(mode.options);
403         }
404
405         printf("\n");
406 }
407
408 void
409 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
410 {
411         struct rte_eth_burst_mode mode;
412         struct rte_eth_txq_info qinfo;
413         int32_t rc;
414         static const char *info_border = "*********************";
415
416         rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
417         if (rc != 0) {
418                 printf("Failed to retrieve information for port: %u, "
419                         "TX queue: %hu\nerror desc: %s(%d)\n",
420                         port_id, queue_id, strerror(-rc), rc);
421                 return;
422         }
423
424         printf("\n%s Infos for port %-2u, TX queue %-2u %s",
425                info_border, port_id, queue_id, info_border);
426
427         printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
428         printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
429         printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
430         printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
431         printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
432         printf("\nTX deferred start: %s",
433                 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
434         printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
435
436         if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) {
437                 printf("\nBurst mode:");
438                 burst_mode_options_display(mode.options);
439         }
440
441         printf("\n");
442 }
443
444 static int bus_match_all(const struct rte_bus *bus, const void *data)
445 {
446         RTE_SET_USED(bus);
447         RTE_SET_USED(data);
448         return 0;
449 }
450
451 void
452 device_infos_display(const char *identifier)
453 {
454         static const char *info_border = "*********************";
455         struct rte_bus *start = NULL, *next;
456         struct rte_dev_iterator dev_iter;
457         char name[RTE_ETH_NAME_MAX_LEN];
458         struct rte_ether_addr mac_addr;
459         struct rte_device *dev;
460         struct rte_devargs da;
461         portid_t port_id;
462         char devstr[128];
463
464         memset(&da, 0, sizeof(da));
465         if (!identifier)
466                 goto skip_parse;
467
468         if (rte_devargs_parsef(&da, "%s", identifier)) {
469                 printf("cannot parse identifier\n");
470                 if (da.args)
471                         free(da.args);
472                 return;
473         }
474
475 skip_parse:
476         while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
477
478                 start = next;
479                 if (identifier && da.bus != next)
480                         continue;
481
482                 /* Skip buses that don't have iterate method */
483                 if (!next->dev_iterate)
484                         continue;
485
486                 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
487                 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
488
489                         if (!dev->driver)
490                                 continue;
491                         /* Check for matching device if identifier is present */
492                         if (identifier &&
493                             strncmp(da.name, dev->name, strlen(dev->name)))
494                                 continue;
495                         printf("\n%s Infos for device %s %s\n",
496                                info_border, dev->name, info_border);
497                         printf("Bus name: %s", dev->bus->name);
498                         printf("\nDriver name: %s", dev->driver->name);
499                         printf("\nDevargs: %s",
500                                dev->devargs ? dev->devargs->args : "");
501                         printf("\nConnect to socket: %d", dev->numa_node);
502                         printf("\n");
503
504                         /* List ports with matching device name */
505                         RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
506                                 printf("\n\tPort id: %-2d", port_id);
507                                 if (eth_macaddr_get_print_err(port_id,
508                                                               &mac_addr) == 0)
509                                         print_ethaddr("\n\tMAC address: ",
510                                                       &mac_addr);
511                                 rte_eth_dev_get_name_by_port(port_id, name);
512                                 printf("\n\tDevice name: %s", name);
513                                 printf("\n");
514                         }
515                 }
516         };
517 }
518
519 void
520 port_infos_display(portid_t port_id)
521 {
522         struct rte_port *port;
523         struct rte_ether_addr mac_addr;
524         struct rte_eth_link link;
525         struct rte_eth_dev_info dev_info;
526         int vlan_offload;
527         struct rte_mempool * mp;
528         static const char *info_border = "*********************";
529         uint16_t mtu;
530         char name[RTE_ETH_NAME_MAX_LEN];
531         int ret;
532
533         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
534                 print_valid_ports();
535                 return;
536         }
537         port = &ports[port_id];
538         ret = eth_link_get_nowait_print_err(port_id, &link);
539         if (ret < 0)
540                 return;
541
542         ret = eth_dev_info_get_print_err(port_id, &dev_info);
543         if (ret != 0)
544                 return;
545
546         printf("\n%s Infos for port %-2d %s\n",
547                info_border, port_id, info_border);
548         if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
549                 print_ethaddr("MAC address: ", &mac_addr);
550         rte_eth_dev_get_name_by_port(port_id, name);
551         printf("\nDevice name: %s", name);
552         printf("\nDriver name: %s", dev_info.driver_name);
553         if (dev_info.device->devargs && dev_info.device->devargs->args)
554                 printf("\nDevargs: %s", dev_info.device->devargs->args);
555         printf("\nConnect to socket: %u", port->socket_id);
556
557         if (port_numa[port_id] != NUMA_NO_CONFIG) {
558                 mp = mbuf_pool_find(port_numa[port_id]);
559                 if (mp)
560                         printf("\nmemory allocation on the socket: %d",
561                                                         port_numa[port_id]);
562         } else
563                 printf("\nmemory allocation on the socket: %u",port->socket_id);
564
565         printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
566         printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
567         printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
568                ("full-duplex") : ("half-duplex"));
569
570         if (!rte_eth_dev_get_mtu(port_id, &mtu))
571                 printf("MTU: %u\n", mtu);
572
573         printf("Promiscuous mode: %s\n",
574                rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
575         printf("Allmulticast mode: %s\n",
576                rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
577         printf("Maximum number of MAC addresses: %u\n",
578                (unsigned int)(port->dev_info.max_mac_addrs));
579         printf("Maximum number of MAC addresses of hash filtering: %u\n",
580                (unsigned int)(port->dev_info.max_hash_mac_addrs));
581
582         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
583         if (vlan_offload >= 0){
584                 printf("VLAN offload: \n");
585                 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
586                         printf("  strip on, ");
587                 else
588                         printf("  strip off, ");
589
590                 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
591                         printf("filter on, ");
592                 else
593                         printf("filter off, ");
594
595                 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
596                         printf("extend on, ");
597                 else
598                         printf("extend off, ");
599
600                 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
601                         printf("qinq strip on\n");
602                 else
603                         printf("qinq strip off\n");
604         }
605
606         if (dev_info.hash_key_size > 0)
607                 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
608         if (dev_info.reta_size > 0)
609                 printf("Redirection table size: %u\n", dev_info.reta_size);
610         if (!dev_info.flow_type_rss_offloads)
611                 printf("No RSS offload flow type is supported.\n");
612         else {
613                 uint16_t i;
614                 char *p;
615
616                 printf("Supported RSS offload flow types:\n");
617                 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
618                      i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
619                         if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
620                                 continue;
621                         p = flowtype_to_str(i);
622                         if (p)
623                                 printf("  %s\n", p);
624                         else
625                                 printf("  user defined %d\n", i);
626                 }
627         }
628
629         printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
630         printf("Maximum configurable length of RX packet: %u\n",
631                 dev_info.max_rx_pktlen);
632         if (dev_info.max_vfs)
633                 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
634         if (dev_info.max_vmdq_pools)
635                 printf("Maximum number of VMDq pools: %u\n",
636                         dev_info.max_vmdq_pools);
637
638         printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
639         printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
640         printf("Max possible number of RXDs per queue: %hu\n",
641                 dev_info.rx_desc_lim.nb_max);
642         printf("Min possible number of RXDs per queue: %hu\n",
643                 dev_info.rx_desc_lim.nb_min);
644         printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
645
646         printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
647         printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
648         printf("Max possible number of TXDs per queue: %hu\n",
649                 dev_info.tx_desc_lim.nb_max);
650         printf("Min possible number of TXDs per queue: %hu\n",
651                 dev_info.tx_desc_lim.nb_min);
652         printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
653         printf("Max segment number per packet: %hu\n",
654                 dev_info.tx_desc_lim.nb_seg_max);
655         printf("Max segment number per MTU/TSO: %hu\n",
656                 dev_info.tx_desc_lim.nb_mtu_seg_max);
657
658         /* Show switch info only if valid switch domain and port id is set */
659         if (dev_info.switch_info.domain_id !=
660                 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
661                 if (dev_info.switch_info.name)
662                         printf("Switch name: %s\n", dev_info.switch_info.name);
663
664                 printf("Switch domain Id: %u\n",
665                         dev_info.switch_info.domain_id);
666                 printf("Switch Port Id: %u\n",
667                         dev_info.switch_info.port_id);
668         }
669 }
670
671 void
672 port_summary_header_display(void)
673 {
674         uint16_t port_number;
675
676         port_number = rte_eth_dev_count_avail();
677         printf("Number of available ports: %i\n", port_number);
678         printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
679                         "Driver", "Status", "Link");
680 }
681
682 void
683 port_summary_display(portid_t port_id)
684 {
685         struct rte_ether_addr mac_addr;
686         struct rte_eth_link link;
687         struct rte_eth_dev_info dev_info;
688         char name[RTE_ETH_NAME_MAX_LEN];
689         int ret;
690
691         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
692                 print_valid_ports();
693                 return;
694         }
695
696         ret = eth_link_get_nowait_print_err(port_id, &link);
697         if (ret < 0)
698                 return;
699
700         ret = eth_dev_info_get_print_err(port_id, &dev_info);
701         if (ret != 0)
702                 return;
703
704         rte_eth_dev_get_name_by_port(port_id, name);
705         ret = eth_macaddr_get_print_err(port_id, &mac_addr);
706         if (ret != 0)
707                 return;
708
709         printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n",
710                 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
711                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
712                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
713                 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
714                 (unsigned int) link.link_speed);
715 }
716
717 void
718 port_offload_cap_display(portid_t port_id)
719 {
720         struct rte_eth_dev_info dev_info;
721         static const char *info_border = "************";
722         int ret;
723
724         if (port_id_is_invalid(port_id, ENABLED_WARN))
725                 return;
726
727         ret = eth_dev_info_get_print_err(port_id, &dev_info);
728         if (ret != 0)
729                 return;
730
731         printf("\n%s Port %d supported offload features: %s\n",
732                 info_border, port_id, info_border);
733
734         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
735                 printf("VLAN stripped:                 ");
736                 if (ports[port_id].dev_conf.rxmode.offloads &
737                     DEV_RX_OFFLOAD_VLAN_STRIP)
738                         printf("on\n");
739                 else
740                         printf("off\n");
741         }
742
743         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
744                 printf("Double VLANs stripped:         ");
745                 if (ports[port_id].dev_conf.rxmode.offloads &
746                     DEV_RX_OFFLOAD_QINQ_STRIP)
747                         printf("on\n");
748                 else
749                         printf("off\n");
750         }
751
752         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
753                 printf("RX IPv4 checksum:              ");
754                 if (ports[port_id].dev_conf.rxmode.offloads &
755                     DEV_RX_OFFLOAD_IPV4_CKSUM)
756                         printf("on\n");
757                 else
758                         printf("off\n");
759         }
760
761         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
762                 printf("RX UDP checksum:               ");
763                 if (ports[port_id].dev_conf.rxmode.offloads &
764                     DEV_RX_OFFLOAD_UDP_CKSUM)
765                         printf("on\n");
766                 else
767                         printf("off\n");
768         }
769
770         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
771                 printf("RX TCP checksum:               ");
772                 if (ports[port_id].dev_conf.rxmode.offloads &
773                     DEV_RX_OFFLOAD_TCP_CKSUM)
774                         printf("on\n");
775                 else
776                         printf("off\n");
777         }
778
779         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
780                 printf("RX SCTP checksum:              ");
781                 if (ports[port_id].dev_conf.rxmode.offloads &
782                     DEV_RX_OFFLOAD_SCTP_CKSUM)
783                         printf("on\n");
784                 else
785                         printf("off\n");
786         }
787
788         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
789                 printf("RX Outer IPv4 checksum:        ");
790                 if (ports[port_id].dev_conf.rxmode.offloads &
791                     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
792                         printf("on\n");
793                 else
794                         printf("off\n");
795         }
796
797         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
798                 printf("RX Outer UDP checksum:         ");
799                 if (ports[port_id].dev_conf.rxmode.offloads &
800                     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
801                         printf("on\n");
802                 else
803                         printf("off\n");
804         }
805
806         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
807                 printf("Large receive offload:         ");
808                 if (ports[port_id].dev_conf.rxmode.offloads &
809                     DEV_RX_OFFLOAD_TCP_LRO)
810                         printf("on\n");
811                 else
812                         printf("off\n");
813         }
814
815         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
816                 printf("HW timestamp:                  ");
817                 if (ports[port_id].dev_conf.rxmode.offloads &
818                     DEV_RX_OFFLOAD_TIMESTAMP)
819                         printf("on\n");
820                 else
821                         printf("off\n");
822         }
823
824         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
825                 printf("Rx Keep CRC:                   ");
826                 if (ports[port_id].dev_conf.rxmode.offloads &
827                     DEV_RX_OFFLOAD_KEEP_CRC)
828                         printf("on\n");
829                 else
830                         printf("off\n");
831         }
832
833         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
834                 printf("RX offload security:           ");
835                 if (ports[port_id].dev_conf.rxmode.offloads &
836                     DEV_RX_OFFLOAD_SECURITY)
837                         printf("on\n");
838                 else
839                         printf("off\n");
840         }
841
842         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
843                 printf("VLAN insert:                   ");
844                 if (ports[port_id].dev_conf.txmode.offloads &
845                     DEV_TX_OFFLOAD_VLAN_INSERT)
846                         printf("on\n");
847                 else
848                         printf("off\n");
849         }
850
851         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
852                 printf("Double VLANs insert:           ");
853                 if (ports[port_id].dev_conf.txmode.offloads &
854                     DEV_TX_OFFLOAD_QINQ_INSERT)
855                         printf("on\n");
856                 else
857                         printf("off\n");
858         }
859
860         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
861                 printf("TX IPv4 checksum:              ");
862                 if (ports[port_id].dev_conf.txmode.offloads &
863                     DEV_TX_OFFLOAD_IPV4_CKSUM)
864                         printf("on\n");
865                 else
866                         printf("off\n");
867         }
868
869         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
870                 printf("TX UDP checksum:               ");
871                 if (ports[port_id].dev_conf.txmode.offloads &
872                     DEV_TX_OFFLOAD_UDP_CKSUM)
873                         printf("on\n");
874                 else
875                         printf("off\n");
876         }
877
878         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
879                 printf("TX TCP checksum:               ");
880                 if (ports[port_id].dev_conf.txmode.offloads &
881                     DEV_TX_OFFLOAD_TCP_CKSUM)
882                         printf("on\n");
883                 else
884                         printf("off\n");
885         }
886
887         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
888                 printf("TX SCTP checksum:              ");
889                 if (ports[port_id].dev_conf.txmode.offloads &
890                     DEV_TX_OFFLOAD_SCTP_CKSUM)
891                         printf("on\n");
892                 else
893                         printf("off\n");
894         }
895
896         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
897                 printf("TX Outer IPv4 checksum:        ");
898                 if (ports[port_id].dev_conf.txmode.offloads &
899                     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
900                         printf("on\n");
901                 else
902                         printf("off\n");
903         }
904
905         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
906                 printf("TX TCP segmentation:           ");
907                 if (ports[port_id].dev_conf.txmode.offloads &
908                     DEV_TX_OFFLOAD_TCP_TSO)
909                         printf("on\n");
910                 else
911                         printf("off\n");
912         }
913
914         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
915                 printf("TX UDP segmentation:           ");
916                 if (ports[port_id].dev_conf.txmode.offloads &
917                     DEV_TX_OFFLOAD_UDP_TSO)
918                         printf("on\n");
919                 else
920                         printf("off\n");
921         }
922
923         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
924                 printf("TSO for VXLAN tunnel packet:   ");
925                 if (ports[port_id].dev_conf.txmode.offloads &
926                     DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
927                         printf("on\n");
928                 else
929                         printf("off\n");
930         }
931
932         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
933                 printf("TSO for GRE tunnel packet:     ");
934                 if (ports[port_id].dev_conf.txmode.offloads &
935                     DEV_TX_OFFLOAD_GRE_TNL_TSO)
936                         printf("on\n");
937                 else
938                         printf("off\n");
939         }
940
941         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
942                 printf("TSO for IPIP tunnel packet:    ");
943                 if (ports[port_id].dev_conf.txmode.offloads &
944                     DEV_TX_OFFLOAD_IPIP_TNL_TSO)
945                         printf("on\n");
946                 else
947                         printf("off\n");
948         }
949
950         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
951                 printf("TSO for GENEVE tunnel packet:  ");
952                 if (ports[port_id].dev_conf.txmode.offloads &
953                     DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
954                         printf("on\n");
955                 else
956                         printf("off\n");
957         }
958
959         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
960                 printf("IP tunnel TSO:  ");
961                 if (ports[port_id].dev_conf.txmode.offloads &
962                     DEV_TX_OFFLOAD_IP_TNL_TSO)
963                         printf("on\n");
964                 else
965                         printf("off\n");
966         }
967
968         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
969                 printf("UDP tunnel TSO:  ");
970                 if (ports[port_id].dev_conf.txmode.offloads &
971                     DEV_TX_OFFLOAD_UDP_TNL_TSO)
972                         printf("on\n");
973                 else
974                         printf("off\n");
975         }
976
977         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
978                 printf("TX Outer UDP checksum:         ");
979                 if (ports[port_id].dev_conf.txmode.offloads &
980                     DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
981                         printf("on\n");
982                 else
983                         printf("off\n");
984         }
985
986 }
987
988 int
989 port_id_is_invalid(portid_t port_id, enum print_warning warning)
990 {
991         uint16_t pid;
992
993         if (port_id == (portid_t)RTE_PORT_ALL)
994                 return 0;
995
996         RTE_ETH_FOREACH_DEV(pid)
997                 if (port_id == pid)
998                         return 0;
999
1000         if (warning == ENABLED_WARN)
1001                 printf("Invalid port %d\n", port_id);
1002
1003         return 1;
1004 }
1005
1006 void print_valid_ports(void)
1007 {
1008         portid_t pid;
1009
1010         printf("The valid ports array is [");
1011         RTE_ETH_FOREACH_DEV(pid) {
1012                 printf(" %d", pid);
1013         }
1014         printf(" ]\n");
1015 }
1016
1017 static int
1018 vlan_id_is_invalid(uint16_t vlan_id)
1019 {
1020         if (vlan_id < 4096)
1021                 return 0;
1022         printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1023         return 1;
1024 }
1025
1026 static int
1027 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1028 {
1029         const struct rte_pci_device *pci_dev;
1030         const struct rte_bus *bus;
1031         uint64_t pci_len;
1032
1033         if (reg_off & 0x3) {
1034                 printf("Port register offset 0x%X not aligned on a 4-byte "
1035                        "boundary\n",
1036                        (unsigned)reg_off);
1037                 return 1;
1038         }
1039
1040         if (!ports[port_id].dev_info.device) {
1041                 printf("Invalid device\n");
1042                 return 0;
1043         }
1044
1045         bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1046         if (bus && !strcmp(bus->name, "pci")) {
1047                 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1048         } else {
1049                 printf("Not a PCI device\n");
1050                 return 1;
1051         }
1052
1053         pci_len = pci_dev->mem_resource[0].len;
1054         if (reg_off >= pci_len) {
1055                 printf("Port %d: register offset %u (0x%X) out of port PCI "
1056                        "resource (length=%"PRIu64")\n",
1057                        port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
1058                 return 1;
1059         }
1060         return 0;
1061 }
1062
1063 static int
1064 reg_bit_pos_is_invalid(uint8_t bit_pos)
1065 {
1066         if (bit_pos <= 31)
1067                 return 0;
1068         printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
1069         return 1;
1070 }
1071
1072 #define display_port_and_reg_off(port_id, reg_off) \
1073         printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1074
1075 static inline void
1076 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1077 {
1078         display_port_and_reg_off(port_id, (unsigned)reg_off);
1079         printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1080 }
1081
1082 void
1083 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1084 {
1085         uint32_t reg_v;
1086
1087
1088         if (port_id_is_invalid(port_id, ENABLED_WARN))
1089                 return;
1090         if (port_reg_off_is_invalid(port_id, reg_off))
1091                 return;
1092         if (reg_bit_pos_is_invalid(bit_x))
1093                 return;
1094         reg_v = port_id_pci_reg_read(port_id, reg_off);
1095         display_port_and_reg_off(port_id, (unsigned)reg_off);
1096         printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1097 }
1098
1099 void
1100 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1101                            uint8_t bit1_pos, uint8_t bit2_pos)
1102 {
1103         uint32_t reg_v;
1104         uint8_t  l_bit;
1105         uint8_t  h_bit;
1106
1107         if (port_id_is_invalid(port_id, ENABLED_WARN))
1108                 return;
1109         if (port_reg_off_is_invalid(port_id, reg_off))
1110                 return;
1111         if (reg_bit_pos_is_invalid(bit1_pos))
1112                 return;
1113         if (reg_bit_pos_is_invalid(bit2_pos))
1114                 return;
1115         if (bit1_pos > bit2_pos)
1116                 l_bit = bit2_pos, h_bit = bit1_pos;
1117         else
1118                 l_bit = bit1_pos, h_bit = bit2_pos;
1119
1120         reg_v = port_id_pci_reg_read(port_id, reg_off);
1121         reg_v >>= l_bit;
1122         if (h_bit < 31)
1123                 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1124         display_port_and_reg_off(port_id, (unsigned)reg_off);
1125         printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1126                ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1127 }
1128
1129 void
1130 port_reg_display(portid_t port_id, uint32_t reg_off)
1131 {
1132         uint32_t reg_v;
1133
1134         if (port_id_is_invalid(port_id, ENABLED_WARN))
1135                 return;
1136         if (port_reg_off_is_invalid(port_id, reg_off))
1137                 return;
1138         reg_v = port_id_pci_reg_read(port_id, reg_off);
1139         display_port_reg_value(port_id, reg_off, reg_v);
1140 }
1141
1142 void
1143 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1144                  uint8_t bit_v)
1145 {
1146         uint32_t reg_v;
1147
1148         if (port_id_is_invalid(port_id, ENABLED_WARN))
1149                 return;
1150         if (port_reg_off_is_invalid(port_id, reg_off))
1151                 return;
1152         if (reg_bit_pos_is_invalid(bit_pos))
1153                 return;
1154         if (bit_v > 1) {
1155                 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1156                 return;
1157         }
1158         reg_v = port_id_pci_reg_read(port_id, reg_off);
1159         if (bit_v == 0)
1160                 reg_v &= ~(1 << bit_pos);
1161         else
1162                 reg_v |= (1 << bit_pos);
1163         port_id_pci_reg_write(port_id, reg_off, reg_v);
1164         display_port_reg_value(port_id, reg_off, reg_v);
1165 }
1166
1167 void
1168 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1169                        uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1170 {
1171         uint32_t max_v;
1172         uint32_t reg_v;
1173         uint8_t  l_bit;
1174         uint8_t  h_bit;
1175
1176         if (port_id_is_invalid(port_id, ENABLED_WARN))
1177                 return;
1178         if (port_reg_off_is_invalid(port_id, reg_off))
1179                 return;
1180         if (reg_bit_pos_is_invalid(bit1_pos))
1181                 return;
1182         if (reg_bit_pos_is_invalid(bit2_pos))
1183                 return;
1184         if (bit1_pos > bit2_pos)
1185                 l_bit = bit2_pos, h_bit = bit1_pos;
1186         else
1187                 l_bit = bit1_pos, h_bit = bit2_pos;
1188
1189         if ((h_bit - l_bit) < 31)
1190                 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1191         else
1192                 max_v = 0xFFFFFFFF;
1193
1194         if (value > max_v) {
1195                 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1196                                 (unsigned)value, (unsigned)value,
1197                                 (unsigned)max_v, (unsigned)max_v);
1198                 return;
1199         }
1200         reg_v = port_id_pci_reg_read(port_id, reg_off);
1201         reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1202         reg_v |= (value << l_bit); /* Set changed bits */
1203         port_id_pci_reg_write(port_id, reg_off, reg_v);
1204         display_port_reg_value(port_id, reg_off, reg_v);
1205 }
1206
1207 void
1208 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1209 {
1210         if (port_id_is_invalid(port_id, ENABLED_WARN))
1211                 return;
1212         if (port_reg_off_is_invalid(port_id, reg_off))
1213                 return;
1214         port_id_pci_reg_write(port_id, reg_off, reg_v);
1215         display_port_reg_value(port_id, reg_off, reg_v);
1216 }
1217
1218 void
1219 port_mtu_set(portid_t port_id, uint16_t mtu)
1220 {
1221         int diag;
1222         struct rte_eth_dev_info dev_info;
1223         int ret;
1224
1225         if (port_id_is_invalid(port_id, ENABLED_WARN))
1226                 return;
1227
1228         ret = eth_dev_info_get_print_err(port_id, &dev_info);
1229         if (ret != 0)
1230                 return;
1231
1232         if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1233                 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1234                         mtu, dev_info.min_mtu, dev_info.max_mtu);
1235                 return;
1236         }
1237         diag = rte_eth_dev_set_mtu(port_id, mtu);
1238         if (diag == 0)
1239                 return;
1240         printf("Set MTU failed. diag=%d\n", diag);
1241 }
1242
1243 /* Generic flow management functions. */
1244
1245 /** Generate a port_flow entry from attributes/pattern/actions. */
1246 static struct port_flow *
1247 port_flow_new(const struct rte_flow_attr *attr,
1248               const struct rte_flow_item *pattern,
1249               const struct rte_flow_action *actions,
1250               struct rte_flow_error *error)
1251 {
1252         const struct rte_flow_conv_rule rule = {
1253                 .attr_ro = attr,
1254                 .pattern_ro = pattern,
1255                 .actions_ro = actions,
1256         };
1257         struct port_flow *pf;
1258         int ret;
1259
1260         ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1261         if (ret < 0)
1262                 return NULL;
1263         pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1264         if (!pf) {
1265                 rte_flow_error_set
1266                         (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1267                          "calloc() failed");
1268                 return NULL;
1269         }
1270         if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1271                           error) >= 0)
1272                 return pf;
1273         free(pf);
1274         return NULL;
1275 }
1276
1277 /** Print a message out of a flow error. */
1278 static int
1279 port_flow_complain(struct rte_flow_error *error)
1280 {
1281         static const char *const errstrlist[] = {
1282                 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1283                 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1284                 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1285                 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1286                 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1287                 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1288                 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1289                 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1290                 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1291                 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1292                 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1293                 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1294                 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1295                 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1296                 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1297                 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1298                 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1299         };
1300         const char *errstr;
1301         char buf[32];
1302         int err = rte_errno;
1303
1304         if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1305             !errstrlist[error->type])
1306                 errstr = "unknown type";
1307         else
1308                 errstr = errstrlist[error->type];
1309         printf("Caught error type %d (%s): %s%s: %s\n",
1310                error->type, errstr,
1311                error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1312                                         error->cause), buf) : "",
1313                error->message ? error->message : "(no stated reason)",
1314                rte_strerror(err));
1315         return -err;
1316 }
1317
1318 /** Validate flow rule. */
1319 int
1320 port_flow_validate(portid_t port_id,
1321                    const struct rte_flow_attr *attr,
1322                    const struct rte_flow_item *pattern,
1323                    const struct rte_flow_action *actions)
1324 {
1325         struct rte_flow_error error;
1326
1327         /* Poisoning to make sure PMDs update it in case of error. */
1328         memset(&error, 0x11, sizeof(error));
1329         if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1330                 return port_flow_complain(&error);
1331         printf("Flow rule validated\n");
1332         return 0;
1333 }
1334
1335 /** Create flow rule. */
1336 int
1337 port_flow_create(portid_t port_id,
1338                  const struct rte_flow_attr *attr,
1339                  const struct rte_flow_item *pattern,
1340                  const struct rte_flow_action *actions)
1341 {
1342         struct rte_flow *flow;
1343         struct rte_port *port;
1344         struct port_flow *pf;
1345         uint32_t id;
1346         struct rte_flow_error error;
1347
1348         /* Poisoning to make sure PMDs update it in case of error. */
1349         memset(&error, 0x22, sizeof(error));
1350         flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1351         if (!flow)
1352                 return port_flow_complain(&error);
1353         port = &ports[port_id];
1354         if (port->flow_list) {
1355                 if (port->flow_list->id == UINT32_MAX) {
1356                         printf("Highest rule ID is already assigned, delete"
1357                                " it first");
1358                         rte_flow_destroy(port_id, flow, NULL);
1359                         return -ENOMEM;
1360                 }
1361                 id = port->flow_list->id + 1;
1362         } else
1363                 id = 0;
1364         pf = port_flow_new(attr, pattern, actions, &error);
1365         if (!pf) {
1366                 rte_flow_destroy(port_id, flow, NULL);
1367                 return port_flow_complain(&error);
1368         }
1369         pf->next = port->flow_list;
1370         pf->id = id;
1371         pf->flow = flow;
1372         port->flow_list = pf;
1373         printf("Flow rule #%u created\n", pf->id);
1374         return 0;
1375 }
1376
1377 /** Destroy a number of flow rules. */
1378 int
1379 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1380 {
1381         struct rte_port *port;
1382         struct port_flow **tmp;
1383         uint32_t c = 0;
1384         int ret = 0;
1385
1386         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1387             port_id == (portid_t)RTE_PORT_ALL)
1388                 return -EINVAL;
1389         port = &ports[port_id];
1390         tmp = &port->flow_list;
1391         while (*tmp) {
1392                 uint32_t i;
1393
1394                 for (i = 0; i != n; ++i) {
1395                         struct rte_flow_error error;
1396                         struct port_flow *pf = *tmp;
1397
1398                         if (rule[i] != pf->id)
1399                                 continue;
1400                         /*
1401                          * Poisoning to make sure PMDs update it in case
1402                          * of error.
1403                          */
1404                         memset(&error, 0x33, sizeof(error));
1405                         if (rte_flow_destroy(port_id, pf->flow, &error)) {
1406                                 ret = port_flow_complain(&error);
1407                                 continue;
1408                         }
1409                         printf("Flow rule #%u destroyed\n", pf->id);
1410                         *tmp = pf->next;
1411                         free(pf);
1412                         break;
1413                 }
1414                 if (i == n)
1415                         tmp = &(*tmp)->next;
1416                 ++c;
1417         }
1418         return ret;
1419 }
1420
1421 /** Remove all flow rules. */
1422 int
1423 port_flow_flush(portid_t port_id)
1424 {
1425         struct rte_flow_error error;
1426         struct rte_port *port;
1427         int ret = 0;
1428
1429         /* Poisoning to make sure PMDs update it in case of error. */
1430         memset(&error, 0x44, sizeof(error));
1431         if (rte_flow_flush(port_id, &error)) {
1432                 ret = port_flow_complain(&error);
1433                 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1434                     port_id == (portid_t)RTE_PORT_ALL)
1435                         return ret;
1436         }
1437         port = &ports[port_id];
1438         while (port->flow_list) {
1439                 struct port_flow *pf = port->flow_list->next;
1440
1441                 free(port->flow_list);
1442                 port->flow_list = pf;
1443         }
1444         return ret;
1445 }
1446
1447 /** Query a flow rule. */
1448 int
1449 port_flow_query(portid_t port_id, uint32_t rule,
1450                 const struct rte_flow_action *action)
1451 {
1452         struct rte_flow_error error;
1453         struct rte_port *port;
1454         struct port_flow *pf;
1455         const char *name;
1456         union {
1457                 struct rte_flow_query_count count;
1458         } query;
1459         int ret;
1460
1461         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1462             port_id == (portid_t)RTE_PORT_ALL)
1463                 return -EINVAL;
1464         port = &ports[port_id];
1465         for (pf = port->flow_list; pf; pf = pf->next)
1466                 if (pf->id == rule)
1467                         break;
1468         if (!pf) {
1469                 printf("Flow rule #%u not found\n", rule);
1470                 return -ENOENT;
1471         }
1472         ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1473                             &name, sizeof(name),
1474                             (void *)(uintptr_t)action->type, &error);
1475         if (ret < 0)
1476                 return port_flow_complain(&error);
1477         switch (action->type) {
1478         case RTE_FLOW_ACTION_TYPE_COUNT:
1479                 break;
1480         default:
1481                 printf("Cannot query action type %d (%s)\n",
1482                         action->type, name);
1483                 return -ENOTSUP;
1484         }
1485         /* Poisoning to make sure PMDs update it in case of error. */
1486         memset(&error, 0x55, sizeof(error));
1487         memset(&query, 0, sizeof(query));
1488         if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1489                 return port_flow_complain(&error);
1490         switch (action->type) {
1491         case RTE_FLOW_ACTION_TYPE_COUNT:
1492                 printf("%s:\n"
1493                        " hits_set: %u\n"
1494                        " bytes_set: %u\n"
1495                        " hits: %" PRIu64 "\n"
1496                        " bytes: %" PRIu64 "\n",
1497                        name,
1498                        query.count.hits_set,
1499                        query.count.bytes_set,
1500                        query.count.hits,
1501                        query.count.bytes);
1502                 break;
1503         default:
1504                 printf("Cannot display result for action type %d (%s)\n",
1505                        action->type, name);
1506                 break;
1507         }
1508         return 0;
1509 }
1510
1511 /** List flow rules. */
1512 void
1513 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1514 {
1515         struct rte_port *port;
1516         struct port_flow *pf;
1517         struct port_flow *list = NULL;
1518         uint32_t i;
1519
1520         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1521             port_id == (portid_t)RTE_PORT_ALL)
1522                 return;
1523         port = &ports[port_id];
1524         if (!port->flow_list)
1525                 return;
1526         /* Sort flows by group, priority and ID. */
1527         for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1528                 struct port_flow **tmp;
1529                 const struct rte_flow_attr *curr = pf->rule.attr;
1530
1531                 if (n) {
1532                         /* Filter out unwanted groups. */
1533                         for (i = 0; i != n; ++i)
1534                                 if (curr->group == group[i])
1535                                         break;
1536                         if (i == n)
1537                                 continue;
1538                 }
1539                 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1540                         const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1541
1542                         if (curr->group > comp->group ||
1543                             (curr->group == comp->group &&
1544                              curr->priority > comp->priority) ||
1545                             (curr->group == comp->group &&
1546                              curr->priority == comp->priority &&
1547                              pf->id > (*tmp)->id))
1548                                 continue;
1549                         break;
1550                 }
1551                 pf->tmp = *tmp;
1552                 *tmp = pf;
1553         }
1554         printf("ID\tGroup\tPrio\tAttr\tRule\n");
1555         for (pf = list; pf != NULL; pf = pf->tmp) {
1556                 const struct rte_flow_item *item = pf->rule.pattern;
1557                 const struct rte_flow_action *action = pf->rule.actions;
1558                 const char *name;
1559
1560                 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1561                        pf->id,
1562                        pf->rule.attr->group,
1563                        pf->rule.attr->priority,
1564                        pf->rule.attr->ingress ? 'i' : '-',
1565                        pf->rule.attr->egress ? 'e' : '-',
1566                        pf->rule.attr->transfer ? 't' : '-');
1567                 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1568                         if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1569                                           &name, sizeof(name),
1570                                           (void *)(uintptr_t)item->type,
1571                                           NULL) <= 0)
1572                                 name = "[UNKNOWN]";
1573                         if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1574                                 printf("%s ", name);
1575                         ++item;
1576                 }
1577                 printf("=>");
1578                 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1579                         if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1580                                           &name, sizeof(name),
1581                                           (void *)(uintptr_t)action->type,
1582                                           NULL) <= 0)
1583                                 name = "[UNKNOWN]";
1584                         if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1585                                 printf(" %s", name);
1586                         ++action;
1587                 }
1588                 printf("\n");
1589         }
1590 }
1591
1592 /** Restrict ingress traffic to the defined flow rules. */
1593 int
1594 port_flow_isolate(portid_t port_id, int set)
1595 {
1596         struct rte_flow_error error;
1597
1598         /* Poisoning to make sure PMDs update it in case of error. */
1599         memset(&error, 0x66, sizeof(error));
1600         if (rte_flow_isolate(port_id, set, &error))
1601                 return port_flow_complain(&error);
1602         printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1603                port_id,
1604                set ? "now restricted" : "not restricted anymore");
1605         return 0;
1606 }
1607
1608 /*
1609  * RX/TX ring descriptors display functions.
1610  */
1611 int
1612 rx_queue_id_is_invalid(queueid_t rxq_id)
1613 {
1614         if (rxq_id < nb_rxq)
1615                 return 0;
1616         printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1617         return 1;
1618 }
1619
1620 int
1621 tx_queue_id_is_invalid(queueid_t txq_id)
1622 {
1623         if (txq_id < nb_txq)
1624                 return 0;
1625         printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1626         return 1;
1627 }
1628
1629 static int
1630 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1631 {
1632         if (rxdesc_id < nb_rxd)
1633                 return 0;
1634         printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1635                rxdesc_id, nb_rxd);
1636         return 1;
1637 }
1638
1639 static int
1640 tx_desc_id_is_invalid(uint16_t txdesc_id)
1641 {
1642         if (txdesc_id < nb_txd)
1643                 return 0;
1644         printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1645                txdesc_id, nb_txd);
1646         return 1;
1647 }
1648
1649 static const struct rte_memzone *
1650 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1651 {
1652         char mz_name[RTE_MEMZONE_NAMESIZE];
1653         const struct rte_memzone *mz;
1654
1655         snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
1656                         port_id, q_id, ring_name);
1657         mz = rte_memzone_lookup(mz_name);
1658         if (mz == NULL)
1659                 printf("%s ring memory zoneof (port %d, queue %d) not"
1660                        "found (zone name = %s\n",
1661                        ring_name, port_id, q_id, mz_name);
1662         return mz;
1663 }
1664
1665 union igb_ring_dword {
1666         uint64_t dword;
1667         struct {
1668 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1669                 uint32_t lo;
1670                 uint32_t hi;
1671 #else
1672                 uint32_t hi;
1673                 uint32_t lo;
1674 #endif
1675         } words;
1676 };
1677
1678 struct igb_ring_desc_32_bytes {
1679         union igb_ring_dword lo_dword;
1680         union igb_ring_dword hi_dword;
1681         union igb_ring_dword resv1;
1682         union igb_ring_dword resv2;
1683 };
1684
1685 struct igb_ring_desc_16_bytes {
1686         union igb_ring_dword lo_dword;
1687         union igb_ring_dword hi_dword;
1688 };
1689
1690 static void
1691 ring_rxd_display_dword(union igb_ring_dword dword)
1692 {
1693         printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1694                                         (unsigned)dword.words.hi);
1695 }
1696
1697 static void
1698 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1699 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1700                            portid_t port_id,
1701 #else
1702                            __rte_unused portid_t port_id,
1703 #endif
1704                            uint16_t desc_id)
1705 {
1706         struct igb_ring_desc_16_bytes *ring =
1707                 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1708 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1709         int ret;
1710         struct rte_eth_dev_info dev_info;
1711
1712         ret = eth_dev_info_get_print_err(port_id, &dev_info);
1713         if (ret != 0)
1714                 return;
1715
1716         if (strstr(dev_info.driver_name, "i40e") != NULL) {
1717                 /* 32 bytes RX descriptor, i40e only */
1718                 struct igb_ring_desc_32_bytes *ring =
1719                         (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1720                 ring[desc_id].lo_dword.dword =
1721                         rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1722                 ring_rxd_display_dword(ring[desc_id].lo_dword);
1723                 ring[desc_id].hi_dword.dword =
1724                         rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1725                 ring_rxd_display_dword(ring[desc_id].hi_dword);
1726                 ring[desc_id].resv1.dword =
1727                         rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1728                 ring_rxd_display_dword(ring[desc_id].resv1);
1729                 ring[desc_id].resv2.dword =
1730                         rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1731                 ring_rxd_display_dword(ring[desc_id].resv2);
1732
1733                 return;
1734         }
1735 #endif
1736         /* 16 bytes RX descriptor */
1737         ring[desc_id].lo_dword.dword =
1738                 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1739         ring_rxd_display_dword(ring[desc_id].lo_dword);
1740         ring[desc_id].hi_dword.dword =
1741                 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1742         ring_rxd_display_dword(ring[desc_id].hi_dword);
1743 }
1744
1745 static void
1746 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1747 {
1748         struct igb_ring_desc_16_bytes *ring;
1749         struct igb_ring_desc_16_bytes txd;
1750
1751         ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1752         txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1753         txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1754         printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1755                         (unsigned)txd.lo_dword.words.lo,
1756                         (unsigned)txd.lo_dword.words.hi,
1757                         (unsigned)txd.hi_dword.words.lo,
1758                         (unsigned)txd.hi_dword.words.hi);
1759 }
1760
1761 void
1762 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1763 {
1764         const struct rte_memzone *rx_mz;
1765
1766         if (port_id_is_invalid(port_id, ENABLED_WARN))
1767                 return;
1768         if (rx_queue_id_is_invalid(rxq_id))
1769                 return;
1770         if (rx_desc_id_is_invalid(rxd_id))
1771                 return;
1772         rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1773         if (rx_mz == NULL)
1774                 return;
1775         ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1776 }
1777
1778 void
1779 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1780 {
1781         const struct rte_memzone *tx_mz;
1782
1783         if (port_id_is_invalid(port_id, ENABLED_WARN))
1784                 return;
1785         if (tx_queue_id_is_invalid(txq_id))
1786                 return;
1787         if (tx_desc_id_is_invalid(txd_id))
1788                 return;
1789         tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1790         if (tx_mz == NULL)
1791                 return;
1792         ring_tx_descriptor_display(tx_mz, txd_id);
1793 }
1794
1795 void
1796 fwd_lcores_config_display(void)
1797 {
1798         lcoreid_t lc_id;
1799
1800         printf("List of forwarding lcores:");
1801         for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1802                 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1803         printf("\n");
1804 }
1805 void
1806 rxtx_config_display(void)
1807 {
1808         portid_t pid;
1809         queueid_t qid;
1810
1811         printf("  %s packet forwarding%s packets/burst=%d\n",
1812                cur_fwd_eng->fwd_mode_name,
1813                retry_enabled == 0 ? "" : " with retry",
1814                nb_pkt_per_burst);
1815
1816         if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1817                 printf("  packet len=%u - nb packet segments=%d\n",
1818                                 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1819
1820         printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
1821                nb_fwd_lcores, nb_fwd_ports);
1822
1823         RTE_ETH_FOREACH_DEV(pid) {
1824                 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
1825                 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
1826                 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
1827                 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
1828                 uint16_t nb_rx_desc_tmp;
1829                 uint16_t nb_tx_desc_tmp;
1830                 struct rte_eth_rxq_info rx_qinfo;
1831                 struct rte_eth_txq_info tx_qinfo;
1832                 int32_t rc;
1833
1834                 /* per port config */
1835                 printf("  port %d: RX queue number: %d Tx queue number: %d\n",
1836                                 (unsigned int)pid, nb_rxq, nb_txq);
1837
1838                 printf("    Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
1839                                 ports[pid].dev_conf.rxmode.offloads,
1840                                 ports[pid].dev_conf.txmode.offloads);
1841
1842                 /* per rx queue config only for first queue to be less verbose */
1843                 for (qid = 0; qid < 1; qid++) {
1844                         rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
1845                         if (rc)
1846                                 nb_rx_desc_tmp = nb_rx_desc[qid];
1847                         else
1848                                 nb_rx_desc_tmp = rx_qinfo.nb_desc;
1849
1850                         printf("    RX queue: %d\n", qid);
1851                         printf("      RX desc=%d - RX free threshold=%d\n",
1852                                 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
1853                         printf("      RX threshold registers: pthresh=%d hthresh=%d "
1854                                 " wthresh=%d\n",
1855                                 rx_conf[qid].rx_thresh.pthresh,
1856                                 rx_conf[qid].rx_thresh.hthresh,
1857                                 rx_conf[qid].rx_thresh.wthresh);
1858                         printf("      RX Offloads=0x%"PRIx64"\n",
1859                                 rx_conf[qid].offloads);
1860                 }
1861
1862                 /* per tx queue config only for first queue to be less verbose */
1863                 for (qid = 0; qid < 1; qid++) {
1864                         rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
1865                         if (rc)
1866                                 nb_tx_desc_tmp = nb_tx_desc[qid];
1867                         else
1868                                 nb_tx_desc_tmp = tx_qinfo.nb_desc;
1869
1870                         printf("    TX queue: %d\n", qid);
1871                         printf("      TX desc=%d - TX free threshold=%d\n",
1872                                 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
1873                         printf("      TX threshold registers: pthresh=%d hthresh=%d "
1874                                 " wthresh=%d\n",
1875                                 tx_conf[qid].tx_thresh.pthresh,
1876                                 tx_conf[qid].tx_thresh.hthresh,
1877                                 tx_conf[qid].tx_thresh.wthresh);
1878                         printf("      TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
1879                                 tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
1880                 }
1881         }
1882 }
1883
1884 void
1885 port_rss_reta_info(portid_t port_id,
1886                    struct rte_eth_rss_reta_entry64 *reta_conf,
1887                    uint16_t nb_entries)
1888 {
1889         uint16_t i, idx, shift;
1890         int ret;
1891
1892         if (port_id_is_invalid(port_id, ENABLED_WARN))
1893                 return;
1894
1895         ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1896         if (ret != 0) {
1897                 printf("Failed to get RSS RETA info, return code = %d\n", ret);
1898                 return;
1899         }
1900
1901         for (i = 0; i < nb_entries; i++) {
1902                 idx = i / RTE_RETA_GROUP_SIZE;
1903                 shift = i % RTE_RETA_GROUP_SIZE;
1904                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1905                         continue;
1906                 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1907                                         i, reta_conf[idx].reta[shift]);
1908         }
1909 }
1910
1911 /*
1912  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1913  * key of the port.
1914  */
1915 void
1916 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
1917 {
1918         struct rte_eth_rss_conf rss_conf = {0};
1919         uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1920         uint64_t rss_hf;
1921         uint8_t i;
1922         int diag;
1923         struct rte_eth_dev_info dev_info;
1924         uint8_t hash_key_size;
1925         int ret;
1926
1927         if (port_id_is_invalid(port_id, ENABLED_WARN))
1928                 return;
1929
1930         ret = eth_dev_info_get_print_err(port_id, &dev_info);
1931         if (ret != 0)
1932                 return;
1933
1934         if (dev_info.hash_key_size > 0 &&
1935                         dev_info.hash_key_size <= sizeof(rss_key))
1936                 hash_key_size = dev_info.hash_key_size;
1937         else {
1938                 printf("dev_info did not provide a valid hash key size\n");
1939                 return;
1940         }
1941
1942         /* Get RSS hash key if asked to display it */
1943         rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1944         rss_conf.rss_key_len = hash_key_size;
1945         diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1946         if (diag != 0) {
1947                 switch (diag) {
1948                 case -ENODEV:
1949                         printf("port index %d invalid\n", port_id);
1950                         break;
1951                 case -ENOTSUP:
1952                         printf("operation not supported by device\n");
1953                         break;
1954                 default:
1955                         printf("operation failed - diag=%d\n", diag);
1956                         break;
1957                 }
1958                 return;
1959         }
1960         rss_hf = rss_conf.rss_hf;
1961         if (rss_hf == 0) {
1962                 printf("RSS disabled\n");
1963                 return;
1964         }
1965         printf("RSS functions:\n ");
1966         for (i = 0; rss_type_table[i].str; i++) {
1967                 if (rss_hf & rss_type_table[i].rss_type)
1968                         printf("%s ", rss_type_table[i].str);
1969         }
1970         printf("\n");
1971         if (!show_rss_key)
1972                 return;
1973         printf("RSS key:\n");
1974         for (i = 0; i < hash_key_size; i++)
1975                 printf("%02X", rss_key[i]);
1976         printf("\n");
1977 }
1978
1979 void
1980 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1981                          uint hash_key_len)
1982 {
1983         struct rte_eth_rss_conf rss_conf;
1984         int diag;
1985         unsigned int i;
1986
1987         rss_conf.rss_key = NULL;
1988         rss_conf.rss_key_len = hash_key_len;
1989         rss_conf.rss_hf = 0;
1990         for (i = 0; rss_type_table[i].str; i++) {
1991                 if (!strcmp(rss_type_table[i].str, rss_type))
1992                         rss_conf.rss_hf = rss_type_table[i].rss_type;
1993         }
1994         diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1995         if (diag == 0) {
1996                 rss_conf.rss_key = hash_key;
1997                 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1998         }
1999         if (diag == 0)
2000                 return;
2001
2002         switch (diag) {
2003         case -ENODEV:
2004                 printf("port index %d invalid\n", port_id);
2005                 break;
2006         case -ENOTSUP:
2007                 printf("operation not supported by device\n");
2008                 break;
2009         default:
2010                 printf("operation failed - diag=%d\n", diag);
2011                 break;
2012         }
2013 }
2014
2015 /*
2016  * Setup forwarding configuration for each logical core.
2017  */
2018 static void
2019 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
2020 {
2021         streamid_t nb_fs_per_lcore;
2022         streamid_t nb_fs;
2023         streamid_t sm_id;
2024         lcoreid_t  nb_extra;
2025         lcoreid_t  nb_fc;
2026         lcoreid_t  nb_lc;
2027         lcoreid_t  lc_id;
2028
2029         nb_fs = cfg->nb_fwd_streams;
2030         nb_fc = cfg->nb_fwd_lcores;
2031         if (nb_fs <= nb_fc) {
2032                 nb_fs_per_lcore = 1;
2033                 nb_extra = 0;
2034         } else {
2035                 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2036                 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2037         }
2038
2039         nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2040         sm_id = 0;
2041         for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2042                 fwd_lcores[lc_id]->stream_idx = sm_id;
2043                 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2044                 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2045         }
2046
2047         /*
2048          * Assign extra remaining streams, if any.
2049          */
2050         nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2051         for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2052                 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2053                 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2054                 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2055         }
2056 }
2057
2058 static portid_t
2059 fwd_topology_tx_port_get(portid_t rxp)
2060 {
2061         static int warning_once = 1;
2062
2063         RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2064
2065         switch (port_topology) {
2066         default:
2067         case PORT_TOPOLOGY_PAIRED:
2068                 if ((rxp & 0x1) == 0) {
2069                         if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2070                                 return rxp + 1;
2071                         if (warning_once) {
2072                                 printf("\nWarning! port-topology=paired"
2073                                        " and odd forward ports number,"
2074                                        " the last port will pair with"
2075                                        " itself.\n\n");
2076                                 warning_once = 0;
2077                         }
2078                         return rxp;
2079                 }
2080                 return rxp - 1;
2081         case PORT_TOPOLOGY_CHAINED:
2082                 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2083         case PORT_TOPOLOGY_LOOP:
2084                 return rxp;
2085         }
2086 }
2087
2088 static void
2089 simple_fwd_config_setup(void)
2090 {
2091         portid_t i;
2092
2093         cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2094         cur_fwd_config.nb_fwd_streams =
2095                 (streamid_t) cur_fwd_config.nb_fwd_ports;
2096
2097         /* reinitialize forwarding streams */
2098         init_fwd_streams();
2099
2100         /*
2101          * In the simple forwarding test, the number of forwarding cores
2102          * must be lower or equal to the number of forwarding ports.
2103          */
2104         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2105         if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2106                 cur_fwd_config.nb_fwd_lcores =
2107                         (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2108         setup_fwd_config_of_each_lcore(&cur_fwd_config);
2109
2110         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2111                 fwd_streams[i]->rx_port   = fwd_ports_ids[i];
2112                 fwd_streams[i]->rx_queue  = 0;
2113                 fwd_streams[i]->tx_port   =
2114                                 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2115                 fwd_streams[i]->tx_queue  = 0;
2116                 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2117                 fwd_streams[i]->retry_enabled = retry_enabled;
2118         }
2119 }
2120
2121 /**
2122  * For the RSS forwarding test all streams distributed over lcores. Each stream
2123  * being composed of a RX queue to poll on a RX port for input messages,
2124  * associated with a TX queue of a TX port where to send forwarded packets.
2125  */
2126 static void
2127 rss_fwd_config_setup(void)
2128 {
2129         portid_t   rxp;
2130         portid_t   txp;
2131         queueid_t  rxq;
2132         queueid_t  nb_q;
2133         streamid_t  sm_id;
2134
2135         nb_q = nb_rxq;
2136         if (nb_q > nb_txq)
2137                 nb_q = nb_txq;
2138         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2139         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2140         cur_fwd_config.nb_fwd_streams =
2141                 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2142
2143         if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2144                 cur_fwd_config.nb_fwd_lcores =
2145                         (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2146
2147         /* reinitialize forwarding streams */
2148         init_fwd_streams();
2149
2150         setup_fwd_config_of_each_lcore(&cur_fwd_config);
2151         rxp = 0; rxq = 0;
2152         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2153                 struct fwd_stream *fs;
2154
2155                 fs = fwd_streams[sm_id];
2156                 txp = fwd_topology_tx_port_get(rxp);
2157                 fs->rx_port = fwd_ports_ids[rxp];
2158                 fs->rx_queue = rxq;
2159                 fs->tx_port = fwd_ports_ids[txp];
2160                 fs->tx_queue = rxq;
2161                 fs->peer_addr = fs->tx_port;
2162                 fs->retry_enabled = retry_enabled;
2163                 rxp++;
2164                 if (rxp < nb_fwd_ports)
2165                         continue;
2166                 rxp = 0;
2167                 rxq++;
2168         }
2169 }
2170
2171 /**
2172  * For the DCB forwarding test, each core is assigned on each traffic class.
2173  *
2174  * Each core is assigned a multi-stream, each stream being composed of
2175  * a RX queue to poll on a RX port for input messages, associated with
2176  * a TX queue of a TX port where to send forwarded packets. All RX and
2177  * TX queues are mapping to the same traffic class.
2178  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2179  * the same core
2180  */
2181 static void
2182 dcb_fwd_config_setup(void)
2183 {
2184         struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2185         portid_t txp, rxp = 0;
2186         queueid_t txq, rxq = 0;
2187         lcoreid_t  lc_id;
2188         uint16_t nb_rx_queue, nb_tx_queue;
2189         uint16_t i, j, k, sm_id = 0;
2190         uint8_t tc = 0;
2191
2192         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2193         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2194         cur_fwd_config.nb_fwd_streams =
2195                 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2196
2197         /* reinitialize forwarding streams */
2198         init_fwd_streams();
2199         sm_id = 0;
2200         txp = 1;
2201         /* get the dcb info on the first RX and TX ports */
2202         (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2203         (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2204
2205         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2206                 fwd_lcores[lc_id]->stream_nb = 0;
2207                 fwd_lcores[lc_id]->stream_idx = sm_id;
2208                 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2209                         /* if the nb_queue is zero, means this tc is
2210                          * not enabled on the POOL
2211                          */
2212                         if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2213                                 break;
2214                         k = fwd_lcores[lc_id]->stream_nb +
2215                                 fwd_lcores[lc_id]->stream_idx;
2216                         rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2217                         txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2218                         nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2219                         nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2220                         for (j = 0; j < nb_rx_queue; j++) {
2221                                 struct fwd_stream *fs;
2222
2223                                 fs = fwd_streams[k + j];
2224                                 fs->rx_port = fwd_ports_ids[rxp];
2225                                 fs->rx_queue = rxq + j;
2226                                 fs->tx_port = fwd_ports_ids[txp];
2227                                 fs->tx_queue = txq + j % nb_tx_queue;
2228                                 fs->peer_addr = fs->tx_port;
2229                                 fs->retry_enabled = retry_enabled;
2230                         }
2231                         fwd_lcores[lc_id]->stream_nb +=
2232                                 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2233                 }
2234                 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2235
2236                 tc++;
2237                 if (tc < rxp_dcb_info.nb_tcs)
2238                         continue;
2239                 /* Restart from TC 0 on next RX port */
2240                 tc = 0;
2241                 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2242                         rxp = (portid_t)
2243                                 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2244                 else
2245                         rxp++;
2246                 if (rxp >= nb_fwd_ports)
2247                         return;
2248                 /* get the dcb information on next RX and TX ports */
2249                 if ((rxp & 0x1) == 0)
2250                         txp = (portid_t) (rxp + 1);
2251                 else
2252                         txp = (portid_t) (rxp - 1);
2253                 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2254                 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2255         }
2256 }
2257
2258 static void
2259 icmp_echo_config_setup(void)
2260 {
2261         portid_t  rxp;
2262         queueid_t rxq;
2263         lcoreid_t lc_id;
2264         uint16_t  sm_id;
2265
2266         if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2267                 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2268                         (nb_txq * nb_fwd_ports);
2269         else
2270                 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2271         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2272         cur_fwd_config.nb_fwd_streams =
2273                 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2274         if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2275                 cur_fwd_config.nb_fwd_lcores =
2276                         (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2277         if (verbose_level > 0) {
2278                 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2279                        __FUNCTION__,
2280                        cur_fwd_config.nb_fwd_lcores,
2281                        cur_fwd_config.nb_fwd_ports,
2282                        cur_fwd_config.nb_fwd_streams);
2283         }
2284
2285         /* reinitialize forwarding streams */
2286         init_fwd_streams();
2287         setup_fwd_config_of_each_lcore(&cur_fwd_config);
2288         rxp = 0; rxq = 0;
2289         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2290                 if (verbose_level > 0)
2291                         printf("  core=%d: \n", lc_id);
2292                 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2293                         struct fwd_stream *fs;
2294                         fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2295                         fs->rx_port = fwd_ports_ids[rxp];
2296                         fs->rx_queue = rxq;
2297                         fs->tx_port = fs->rx_port;
2298                         fs->tx_queue = rxq;
2299                         fs->peer_addr = fs->tx_port;
2300                         fs->retry_enabled = retry_enabled;
2301                         if (verbose_level > 0)
2302                                 printf("  stream=%d port=%d rxq=%d txq=%d\n",
2303                                        sm_id, fs->rx_port, fs->rx_queue,
2304                                        fs->tx_queue);
2305                         rxq = (queueid_t) (rxq + 1);
2306                         if (rxq == nb_rxq) {
2307                                 rxq = 0;
2308                                 rxp = (portid_t) (rxp + 1);
2309                         }
2310                 }
2311         }
2312 }
2313
2314 #if defined RTE_LIBRTE_PMD_SOFTNIC
2315 static void
2316 softnic_fwd_config_setup(void)
2317 {
2318         struct rte_port *port;
2319         portid_t pid, softnic_portid;
2320         queueid_t i;
2321         uint8_t softnic_enable = 0;
2322
2323         RTE_ETH_FOREACH_DEV(pid) {
2324                         port = &ports[pid];
2325                         const char *driver = port->dev_info.driver_name;
2326
2327                         if (strcmp(driver, "net_softnic") == 0) {
2328                                 softnic_portid = pid;
2329                                 softnic_enable = 1;
2330                                 break;
2331                         }
2332         }
2333
2334         if (softnic_enable == 0) {
2335                 printf("Softnic mode not configured(%s)!\n", __func__);
2336                 return;
2337         }
2338
2339         cur_fwd_config.nb_fwd_ports = 1;
2340         cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
2341
2342         /* Re-initialize forwarding streams */
2343         init_fwd_streams();
2344
2345         /*
2346          * In the softnic forwarding test, the number of forwarding cores
2347          * is set to one and remaining are used for softnic packet processing.
2348          */
2349         cur_fwd_config.nb_fwd_lcores = 1;
2350         setup_fwd_config_of_each_lcore(&cur_fwd_config);
2351
2352         for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
2353                 fwd_streams[i]->rx_port   = softnic_portid;
2354                 fwd_streams[i]->rx_queue  = i;
2355                 fwd_streams[i]->tx_port   = softnic_portid;
2356                 fwd_streams[i]->tx_queue  = i;
2357                 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2358                 fwd_streams[i]->retry_enabled = retry_enabled;
2359         }
2360 }
2361 #endif
2362
2363 void
2364 fwd_config_setup(void)
2365 {
2366         cur_fwd_config.fwd_eng = cur_fwd_eng;
2367         if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2368                 icmp_echo_config_setup();
2369                 return;
2370         }
2371
2372 #if defined RTE_LIBRTE_PMD_SOFTNIC
2373         if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
2374                 softnic_fwd_config_setup();
2375                 return;
2376         }
2377 #endif
2378
2379         if ((nb_rxq > 1) && (nb_txq > 1)){
2380                 if (dcb_config)
2381                         dcb_fwd_config_setup();
2382                 else
2383                         rss_fwd_config_setup();
2384         }
2385         else
2386                 simple_fwd_config_setup();
2387 }
2388
2389 static const char *
2390 mp_alloc_to_str(uint8_t mode)
2391 {
2392         switch (mode) {
2393         case MP_ALLOC_NATIVE:
2394                 return "native";
2395         case MP_ALLOC_ANON:
2396                 return "anon";
2397         case MP_ALLOC_XMEM:
2398                 return "xmem";
2399         case MP_ALLOC_XMEM_HUGE:
2400                 return "xmemhuge";
2401         default:
2402                 return "invalid";
2403         }
2404 }
2405
2406 void
2407 pkt_fwd_config_display(struct fwd_config *cfg)
2408 {
2409         struct fwd_stream *fs;
2410         lcoreid_t  lc_id;
2411         streamid_t sm_id;
2412
2413         printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2414                 "NUMA support %s, MP allocation mode: %s\n",
2415                 cfg->fwd_eng->fwd_mode_name,
2416                 retry_enabled == 0 ? "" : " with retry",
2417                 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2418                 numa_support == 1 ? "enabled" : "disabled",
2419                 mp_alloc_to_str(mp_alloc_type));
2420
2421         if (retry_enabled)
2422                 printf("TX retry num: %u, delay between TX retries: %uus\n",
2423                         burst_tx_retry_num, burst_tx_delay_time);
2424         for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2425                 printf("Logical Core %u (socket %u) forwards packets on "
2426                        "%d streams:",
2427                        fwd_lcores_cpuids[lc_id],
2428                        rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2429                        fwd_lcores[lc_id]->stream_nb);
2430                 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2431                         fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2432                         printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
2433                                "P=%d/Q=%d (socket %u) ",
2434                                fs->rx_port, fs->rx_queue,
2435                                ports[fs->rx_port].socket_id,
2436                                fs->tx_port, fs->tx_queue,
2437                                ports[fs->tx_port].socket_id);
2438                         print_ethaddr("peer=",
2439                                       &peer_eth_addrs[fs->peer_addr]);
2440                 }
2441                 printf("\n");
2442         }
2443         printf("\n");
2444 }
2445
2446 void
2447 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2448 {
2449         struct rte_ether_addr new_peer_addr;
2450         if (!rte_eth_dev_is_valid_port(port_id)) {
2451                 printf("Error: Invalid port number %i\n", port_id);
2452                 return;
2453         }
2454         if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
2455                 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2456                 return;
2457         }
2458         peer_eth_addrs[port_id] = new_peer_addr;
2459 }
2460
2461 int
2462 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2463 {
2464         unsigned int i;
2465         unsigned int lcore_cpuid;
2466         int record_now;
2467
2468         record_now = 0;
2469  again:
2470         for (i = 0; i < nb_lc; i++) {
2471                 lcore_cpuid = lcorelist[i];
2472                 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2473                         printf("lcore %u not enabled\n", lcore_cpuid);
2474                         return -1;
2475                 }
2476                 if (lcore_cpuid == rte_get_master_lcore()) {
2477                         printf("lcore %u cannot be masked on for running "
2478                                "packet forwarding, which is the master lcore "
2479                                "and reserved for command line parsing only\n",
2480                                lcore_cpuid);
2481                         return -1;
2482                 }
2483                 if (record_now)
2484                         fwd_lcores_cpuids[i] = lcore_cpuid;
2485         }
2486         if (record_now == 0) {
2487                 record_now = 1;
2488                 goto again;
2489         }
2490         nb_cfg_lcores = (lcoreid_t) nb_lc;
2491         if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2492                 printf("previous number of forwarding cores %u - changed to "
2493                        "number of configured cores %u\n",
2494                        (unsigned int) nb_fwd_lcores, nb_lc);
2495                 nb_fwd_lcores = (lcoreid_t) nb_lc;
2496         }
2497
2498         return 0;
2499 }
2500
2501 int
2502 set_fwd_lcores_mask(uint64_t lcoremask)
2503 {
2504         unsigned int lcorelist[64];
2505         unsigned int nb_lc;
2506         unsigned int i;
2507
2508         if (lcoremask == 0) {
2509                 printf("Invalid NULL mask of cores\n");
2510                 return -1;
2511         }
2512         nb_lc = 0;
2513         for (i = 0; i < 64; i++) {
2514                 if (! ((uint64_t)(1ULL << i) & lcoremask))
2515                         continue;
2516                 lcorelist[nb_lc++] = i;
2517         }
2518         return set_fwd_lcores_list(lcorelist, nb_lc);
2519 }
2520
2521 void
2522 set_fwd_lcores_number(uint16_t nb_lc)
2523 {
2524         if (nb_lc > nb_cfg_lcores) {
2525                 printf("nb fwd cores %u > %u (max. number of configured "
2526                        "lcores) - ignored\n",
2527                        (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2528                 return;
2529         }
2530         nb_fwd_lcores = (lcoreid_t) nb_lc;
2531         printf("Number of forwarding cores set to %u\n",
2532                (unsigned int) nb_fwd_lcores);
2533 }
2534
2535 void
2536 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2537 {
2538         unsigned int i;
2539         portid_t port_id;
2540         int record_now;
2541
2542         record_now = 0;
2543  again:
2544         for (i = 0; i < nb_pt; i++) {
2545                 port_id = (portid_t) portlist[i];
2546                 if (port_id_is_invalid(port_id, ENABLED_WARN))
2547                         return;
2548                 if (record_now)
2549                         fwd_ports_ids[i] = port_id;
2550         }
2551         if (record_now == 0) {
2552                 record_now = 1;
2553                 goto again;
2554         }
2555         nb_cfg_ports = (portid_t) nb_pt;
2556         if (nb_fwd_ports != (portid_t) nb_pt) {
2557                 printf("previous number of forwarding ports %u - changed to "
2558                        "number of configured ports %u\n",
2559                        (unsigned int) nb_fwd_ports, nb_pt);
2560                 nb_fwd_ports = (portid_t) nb_pt;
2561         }
2562 }
2563
2564 void
2565 set_fwd_ports_mask(uint64_t portmask)
2566 {
2567         unsigned int portlist[64];
2568         unsigned int nb_pt;
2569         unsigned int i;
2570
2571         if (portmask == 0) {
2572                 printf("Invalid NULL mask of ports\n");
2573                 return;
2574         }
2575         nb_pt = 0;
2576         RTE_ETH_FOREACH_DEV(i) {
2577                 if (! ((uint64_t)(1ULL << i) & portmask))
2578                         continue;
2579                 portlist[nb_pt++] = i;
2580         }
2581         set_fwd_ports_list(portlist, nb_pt);
2582 }
2583
2584 void
2585 set_fwd_ports_number(uint16_t nb_pt)
2586 {
2587         if (nb_pt > nb_cfg_ports) {
2588                 printf("nb fwd ports %u > %u (number of configured "
2589                        "ports) - ignored\n",
2590                        (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2591                 return;
2592         }
2593         nb_fwd_ports = (portid_t) nb_pt;
2594         printf("Number of forwarding ports set to %u\n",
2595                (unsigned int) nb_fwd_ports);
2596 }
2597
2598 int
2599 port_is_forwarding(portid_t port_id)
2600 {
2601         unsigned int i;
2602
2603         if (port_id_is_invalid(port_id, ENABLED_WARN))
2604                 return -1;
2605
2606         for (i = 0; i < nb_fwd_ports; i++) {
2607                 if (fwd_ports_ids[i] == port_id)
2608                         return 1;
2609         }
2610
2611         return 0;
2612 }
2613
2614 void
2615 set_nb_pkt_per_burst(uint16_t nb)
2616 {
2617         if (nb > MAX_PKT_BURST) {
2618                 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2619                        " ignored\n",
2620                        (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2621                 return;
2622         }
2623         nb_pkt_per_burst = nb;
2624         printf("Number of packets per burst set to %u\n",
2625                (unsigned int) nb_pkt_per_burst);
2626 }
2627
2628 static const char *
2629 tx_split_get_name(enum tx_pkt_split split)
2630 {
2631         uint32_t i;
2632
2633         for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2634                 if (tx_split_name[i].split == split)
2635                         return tx_split_name[i].name;
2636         }
2637         return NULL;
2638 }
2639
2640 void
2641 set_tx_pkt_split(const char *name)
2642 {
2643         uint32_t i;
2644
2645         for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2646                 if (strcmp(tx_split_name[i].name, name) == 0) {
2647                         tx_pkt_split = tx_split_name[i].split;
2648                         return;
2649                 }
2650         }
2651         printf("unknown value: \"%s\"\n", name);
2652 }
2653
2654 void
2655 show_tx_pkt_segments(void)
2656 {
2657         uint32_t i, n;
2658         const char *split;
2659
2660         n = tx_pkt_nb_segs;
2661         split = tx_split_get_name(tx_pkt_split);
2662
2663         printf("Number of segments: %u\n", n);
2664         printf("Segment sizes: ");
2665         for (i = 0; i != n - 1; i++)
2666                 printf("%hu,", tx_pkt_seg_lengths[i]);
2667         printf("%hu\n", tx_pkt_seg_lengths[i]);
2668         printf("Split packet: %s\n", split);
2669 }
2670
2671 void
2672 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2673 {
2674         uint16_t tx_pkt_len;
2675         unsigned i;
2676
2677         if (nb_segs >= (unsigned) nb_txd) {
2678                 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2679                        nb_segs, (unsigned int) nb_txd);
2680                 return;
2681         }
2682
2683         /*
2684          * Check that each segment length is greater or equal than
2685          * the mbuf data sise.
2686          * Check also that the total packet length is greater or equal than the
2687          * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
2688          * 20 + 8).
2689          */
2690         tx_pkt_len = 0;
2691         for (i = 0; i < nb_segs; i++) {
2692                 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2693                         printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2694                                i, seg_lengths[i], (unsigned) mbuf_data_size);
2695                         return;
2696                 }
2697                 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2698         }
2699         if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
2700                 printf("total packet length=%u < %d - give up\n",
2701                                 (unsigned) tx_pkt_len,
2702                                 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
2703                 return;
2704         }
2705
2706         for (i = 0; i < nb_segs; i++)
2707                 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2708
2709         tx_pkt_length  = tx_pkt_len;
2710         tx_pkt_nb_segs = (uint8_t) nb_segs;
2711 }
2712
2713 void
2714 setup_gro(const char *onoff, portid_t port_id)
2715 {
2716         if (!rte_eth_dev_is_valid_port(port_id)) {
2717                 printf("invalid port id %u\n", port_id);
2718                 return;
2719         }
2720         if (test_done == 0) {
2721                 printf("Before enable/disable GRO,"
2722                                 " please stop forwarding first\n");
2723                 return;
2724         }
2725         if (strcmp(onoff, "on") == 0) {
2726                 if (gro_ports[port_id].enable != 0) {
2727                         printf("Port %u has enabled GRO. Please"
2728                                         " disable GRO first\n", port_id);
2729                         return;
2730                 }
2731                 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2732                         gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
2733                         gro_ports[port_id].param.max_flow_num =
2734                                 GRO_DEFAULT_FLOW_NUM;
2735                         gro_ports[port_id].param.max_item_per_flow =
2736                                 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
2737                 }
2738                 gro_ports[port_id].enable = 1;
2739         } else {
2740                 if (gro_ports[port_id].enable == 0) {
2741                         printf("Port %u has disabled GRO\n", port_id);
2742                         return;
2743                 }
2744                 gro_ports[port_id].enable = 0;
2745         }
2746 }
2747
2748 void
2749 setup_gro_flush_cycles(uint8_t cycles)
2750 {
2751         if (test_done == 0) {
2752                 printf("Before change flush interval for GRO,"
2753                                 " please stop forwarding first.\n");
2754                 return;
2755         }
2756
2757         if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
2758                         GRO_DEFAULT_FLUSH_CYCLES) {
2759                 printf("The flushing cycle be in the range"
2760                                 " of 1 to %u. Revert to the default"
2761                                 " value %u.\n",
2762                                 GRO_MAX_FLUSH_CYCLES,
2763                                 GRO_DEFAULT_FLUSH_CYCLES);
2764                 cycles = GRO_DEFAULT_FLUSH_CYCLES;
2765         }
2766
2767         gro_flush_cycles = cycles;
2768 }
2769
2770 void
2771 show_gro(portid_t port_id)
2772 {
2773         struct rte_gro_param *param;
2774         uint32_t max_pkts_num;
2775
2776         param = &gro_ports[port_id].param;
2777
2778         if (!rte_eth_dev_is_valid_port(port_id)) {
2779                 printf("Invalid port id %u.\n", port_id);
2780                 return;
2781         }
2782         if (gro_ports[port_id].enable) {
2783                 printf("GRO type: TCP/IPv4\n");
2784                 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2785                         max_pkts_num = param->max_flow_num *
2786                                 param->max_item_per_flow;
2787                 } else
2788                         max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
2789                 printf("Max number of packets to perform GRO: %u\n",
2790                                 max_pkts_num);
2791                 printf("Flushing cycles: %u\n", gro_flush_cycles);
2792         } else
2793                 printf("Port %u doesn't enable GRO.\n", port_id);
2794 }
2795
2796 void
2797 setup_gso(const char *mode, portid_t port_id)
2798 {
2799         if (!rte_eth_dev_is_valid_port(port_id)) {
2800                 printf("invalid port id %u\n", port_id);
2801                 return;
2802         }
2803         if (strcmp(mode, "on") == 0) {
2804                 if (test_done == 0) {
2805                         printf("before enabling GSO,"
2806                                         " please stop forwarding first\n");
2807                         return;
2808                 }
2809                 gso_ports[port_id].enable = 1;
2810         } else if (strcmp(mode, "off") == 0) {
2811                 if (test_done == 0) {
2812                         printf("before disabling GSO,"
2813                                         " please stop forwarding first\n");
2814                         return;
2815                 }
2816                 gso_ports[port_id].enable = 0;
2817         }
2818 }
2819
2820 char*
2821 list_pkt_forwarding_modes(void)
2822 {
2823         static char fwd_modes[128] = "";
2824         const char *separator = "|";
2825         struct fwd_engine *fwd_eng;
2826         unsigned i = 0;
2827
2828         if (strlen (fwd_modes) == 0) {
2829                 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2830                         strncat(fwd_modes, fwd_eng->fwd_mode_name,
2831                                         sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2832                         strncat(fwd_modes, separator,
2833                                         sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2834                 }
2835                 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2836         }
2837
2838         return fwd_modes;
2839 }
2840
2841 char*
2842 list_pkt_forwarding_retry_modes(void)
2843 {
2844         static char fwd_modes[128] = "";
2845         const char *separator = "|";
2846         struct fwd_engine *fwd_eng;
2847         unsigned i = 0;
2848
2849         if (strlen(fwd_modes) == 0) {
2850                 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2851                         if (fwd_eng == &rx_only_engine)
2852                                 continue;
2853                         strncat(fwd_modes, fwd_eng->fwd_mode_name,
2854                                         sizeof(fwd_modes) -
2855                                         strlen(fwd_modes) - 1);
2856                         strncat(fwd_modes, separator,
2857                                         sizeof(fwd_modes) -
2858                                         strlen(fwd_modes) - 1);
2859                 }
2860                 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2861         }
2862
2863         return fwd_modes;
2864 }
2865
2866 void
2867 set_pkt_forwarding_mode(const char *fwd_mode_name)
2868 {
2869         struct fwd_engine *fwd_eng;
2870         unsigned i;
2871
2872         i = 0;
2873         while ((fwd_eng = fwd_engines[i]) != NULL) {
2874                 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
2875                         printf("Set %s packet forwarding mode%s\n",
2876                                fwd_mode_name,
2877                                retry_enabled == 0 ? "" : " with retry");
2878                         cur_fwd_eng = fwd_eng;
2879                         return;
2880                 }
2881                 i++;
2882         }
2883         printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
2884 }
2885
2886 void
2887 add_rx_dump_callbacks(portid_t portid)
2888 {
2889         struct rte_eth_dev_info dev_info;
2890         uint16_t queue;
2891         int ret;
2892
2893         if (port_id_is_invalid(portid, ENABLED_WARN))
2894                 return;
2895
2896         ret = eth_dev_info_get_print_err(portid, &dev_info);
2897         if (ret != 0)
2898                 return;
2899
2900         for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
2901                 if (!ports[portid].rx_dump_cb[queue])
2902                         ports[portid].rx_dump_cb[queue] =
2903                                 rte_eth_add_rx_callback(portid, queue,
2904                                         dump_rx_pkts, NULL);
2905 }
2906
2907 void
2908 add_tx_dump_callbacks(portid_t portid)
2909 {
2910         struct rte_eth_dev_info dev_info;
2911         uint16_t queue;
2912         int ret;
2913
2914         if (port_id_is_invalid(portid, ENABLED_WARN))
2915                 return;
2916
2917         ret = eth_dev_info_get_print_err(portid, &dev_info);
2918         if (ret != 0)
2919                 return;
2920
2921         for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
2922                 if (!ports[portid].tx_dump_cb[queue])
2923                         ports[portid].tx_dump_cb[queue] =
2924                                 rte_eth_add_tx_callback(portid, queue,
2925                                                         dump_tx_pkts, NULL);
2926 }
2927
2928 void
2929 remove_rx_dump_callbacks(portid_t portid)
2930 {
2931         struct rte_eth_dev_info dev_info;
2932         uint16_t queue;
2933         int ret;
2934
2935         if (port_id_is_invalid(portid, ENABLED_WARN))
2936                 return;
2937
2938         ret = eth_dev_info_get_print_err(portid, &dev_info);
2939         if (ret != 0)
2940                 return;
2941
2942         for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
2943                 if (ports[portid].rx_dump_cb[queue]) {
2944                         rte_eth_remove_rx_callback(portid, queue,
2945                                 ports[portid].rx_dump_cb[queue]);
2946                         ports[portid].rx_dump_cb[queue] = NULL;
2947                 }
2948 }
2949
2950 void
2951 remove_tx_dump_callbacks(portid_t portid)
2952 {
2953         struct rte_eth_dev_info dev_info;
2954         uint16_t queue;
2955         int ret;
2956
2957         if (port_id_is_invalid(portid, ENABLED_WARN))
2958                 return;
2959
2960         ret = eth_dev_info_get_print_err(portid, &dev_info);
2961         if (ret != 0)
2962                 return;
2963
2964         for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
2965                 if (ports[portid].tx_dump_cb[queue]) {
2966                         rte_eth_remove_tx_callback(portid, queue,
2967                                 ports[portid].tx_dump_cb[queue]);
2968                         ports[portid].tx_dump_cb[queue] = NULL;
2969                 }
2970 }
2971
2972 void
2973 configure_rxtx_dump_callbacks(uint16_t verbose)
2974 {
2975         portid_t portid;
2976
2977 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2978                 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
2979                 return;
2980 #endif
2981
2982         RTE_ETH_FOREACH_DEV(portid)
2983         {
2984                 if (verbose == 1 || verbose > 2)
2985                         add_rx_dump_callbacks(portid);
2986                 else
2987                         remove_rx_dump_callbacks(portid);
2988                 if (verbose >= 2)
2989                         add_tx_dump_callbacks(portid);
2990                 else
2991                         remove_tx_dump_callbacks(portid);
2992         }
2993 }
2994
2995 void
2996 set_verbose_level(uint16_t vb_level)
2997 {
2998         printf("Change verbose level from %u to %u\n",
2999                (unsigned int) verbose_level, (unsigned int) vb_level);
3000         verbose_level = vb_level;
3001         configure_rxtx_dump_callbacks(verbose_level);
3002 }
3003
3004 void
3005 vlan_extend_set(portid_t port_id, int on)
3006 {
3007         int diag;
3008         int vlan_offload;
3009         uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3010
3011         if (port_id_is_invalid(port_id, ENABLED_WARN))
3012                 return;
3013
3014         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3015
3016         if (on) {
3017                 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
3018                 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3019         } else {
3020                 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
3021                 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3022         }
3023
3024         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3025         if (diag < 0)
3026                 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
3027                "diag=%d\n", port_id, on, diag);
3028         ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3029 }
3030
3031 void
3032 rx_vlan_strip_set(portid_t port_id, int on)
3033 {
3034         int diag;
3035         int vlan_offload;
3036         uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3037
3038         if (port_id_is_invalid(port_id, ENABLED_WARN))
3039                 return;
3040
3041         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3042
3043         if (on) {
3044                 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
3045                 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3046         } else {
3047                 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
3048                 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3049         }
3050
3051         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3052         if (diag < 0)
3053                 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
3054                "diag=%d\n", port_id, on, diag);
3055         ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3056 }
3057
3058 void
3059 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
3060 {
3061         int diag;
3062
3063         if (port_id_is_invalid(port_id, ENABLED_WARN))
3064                 return;
3065
3066         diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
3067         if (diag < 0)
3068                 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
3069                "diag=%d\n", port_id, queue_id, on, diag);
3070 }
3071
3072 void
3073 rx_vlan_filter_set(portid_t port_id, int on)
3074 {
3075         int diag;
3076         int vlan_offload;
3077         uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3078
3079         if (port_id_is_invalid(port_id, ENABLED_WARN))
3080                 return;
3081
3082         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3083
3084         if (on) {
3085                 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
3086                 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3087         } else {
3088                 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
3089                 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3090         }
3091
3092         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3093         if (diag < 0)
3094                 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
3095                "diag=%d\n", port_id, on, diag);
3096         ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3097 }
3098
3099 void
3100 rx_vlan_qinq_strip_set(portid_t port_id, int on)
3101 {
3102         int diag;
3103         int vlan_offload;
3104         uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3105
3106         if (port_id_is_invalid(port_id, ENABLED_WARN))
3107                 return;
3108
3109         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3110
3111         if (on) {
3112                 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
3113                 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3114         } else {
3115                 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
3116                 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3117         }
3118
3119         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3120         if (diag < 0)
3121                 printf("%s(port_pi=%d, on=%d) failed "
3122                "diag=%d\n", __func__, port_id, on, diag);
3123         ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3124 }
3125
3126 int
3127 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
3128 {
3129         int diag;
3130
3131         if (port_id_is_invalid(port_id, ENABLED_WARN))
3132                 return 1;
3133         if (vlan_id_is_invalid(vlan_id))
3134                 return 1;
3135         diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
3136         if (diag == 0)
3137                 return 0;
3138         printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
3139                "diag=%d\n",
3140                port_id, vlan_id, on, diag);
3141         return -1;
3142 }
3143
3144 void
3145 rx_vlan_all_filter_set(portid_t port_id, int on)
3146 {
3147         uint16_t vlan_id;
3148
3149         if (port_id_is_invalid(port_id, ENABLED_WARN))
3150                 return;
3151         for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
3152                 if (rx_vft_set(port_id, vlan_id, on))
3153                         break;
3154         }
3155 }
3156
3157 void
3158 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
3159 {
3160         int diag;
3161
3162         if (port_id_is_invalid(port_id, ENABLED_WARN))
3163                 return;
3164
3165         diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
3166         if (diag == 0)
3167                 return;
3168
3169         printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
3170                "diag=%d\n",
3171                port_id, vlan_type, tp_id, diag);
3172 }
3173
3174 void
3175 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
3176 {
3177         struct rte_eth_dev_info dev_info;
3178         int ret;
3179
3180         if (port_id_is_invalid(port_id, ENABLED_WARN))
3181                 return;
3182         if (vlan_id_is_invalid(vlan_id))
3183                 return;
3184
3185         if (ports[port_id].dev_conf.txmode.offloads &
3186             DEV_TX_OFFLOAD_QINQ_INSERT) {
3187                 printf("Error, as QinQ has been enabled.\n");
3188                 return;
3189         }
3190
3191         ret = eth_dev_info_get_print_err(port_id, &dev_info);
3192         if (ret != 0)
3193                 return;
3194
3195         if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
3196                 printf("Error: vlan insert is not supported by port %d\n",
3197                         port_id);
3198                 return;
3199         }
3200
3201         tx_vlan_reset(port_id);
3202         ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
3203         ports[port_id].tx_vlan_id = vlan_id;
3204 }
3205
3206 void
3207 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
3208 {
3209         struct rte_eth_dev_info dev_info;
3210         int ret;
3211
3212         if (port_id_is_invalid(port_id, ENABLED_WARN))
3213                 return;
3214         if (vlan_id_is_invalid(vlan_id))
3215                 return;
3216         if (vlan_id_is_invalid(vlan_id_outer))
3217                 return;
3218
3219         ret = eth_dev_info_get_print_err(port_id, &dev_info);
3220         if (ret != 0)
3221                 return;
3222
3223         if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
3224                 printf("Error: qinq insert not supported by port %d\n",
3225                         port_id);
3226                 return;
3227         }
3228
3229         tx_vlan_reset(port_id);
3230         ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
3231                                                     DEV_TX_OFFLOAD_QINQ_INSERT);
3232         ports[port_id].tx_vlan_id = vlan_id;
3233         ports[port_id].tx_vlan_id_outer = vlan_id_outer;
3234 }
3235
3236 void
3237 tx_vlan_reset(portid_t port_id)
3238 {
3239         if (port_id_is_invalid(port_id, ENABLED_WARN))
3240                 return;
3241         ports[port_id].dev_conf.txmode.offloads &=
3242                                 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
3243                                   DEV_TX_OFFLOAD_QINQ_INSERT);
3244         ports[port_id].tx_vlan_id = 0;
3245         ports[port_id].tx_vlan_id_outer = 0;
3246 }
3247
3248 void
3249 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
3250 {
3251         if (port_id_is_invalid(port_id, ENABLED_WARN))
3252                 return;
3253
3254         rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
3255 }
3256
3257 void
3258 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
3259 {
3260         uint16_t i;
3261         uint8_t existing_mapping_found = 0;
3262
3263         if (port_id_is_invalid(port_id, ENABLED_WARN))
3264                 return;
3265
3266         if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
3267                 return;
3268
3269         if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
3270                 printf("map_value not in required range 0..%d\n",
3271                                 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
3272                 return;
3273         }
3274
3275         if (!is_rx) { /*then tx*/
3276                 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3277                         if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3278                             (tx_queue_stats_mappings[i].queue_id == queue_id)) {
3279                                 tx_queue_stats_mappings[i].stats_counter_id = map_value;
3280                                 existing_mapping_found = 1;
3281                                 break;
3282                         }
3283                 }
3284                 if (!existing_mapping_found) { /* A new additional mapping... */
3285                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
3286                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
3287                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
3288                         nb_tx_queue_stats_mappings++;
3289                 }
3290         }
3291         else { /*rx*/
3292                 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3293                         if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3294                             (rx_queue_stats_mappings[i].queue_id == queue_id)) {
3295                                 rx_queue_stats_mappings[i].stats_counter_id = map_value;
3296                                 existing_mapping_found = 1;
3297                                 break;
3298                         }
3299                 }
3300                 if (!existing_mapping_found) { /* A new additional mapping... */
3301                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
3302                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
3303                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
3304                         nb_rx_queue_stats_mappings++;
3305                 }
3306         }
3307 }
3308
3309 void
3310 set_xstats_hide_zero(uint8_t on_off)
3311 {
3312         xstats_hide_zero = on_off;
3313 }
3314
3315 static inline void
3316 print_fdir_mask(struct rte_eth_fdir_masks *mask)
3317 {
3318         printf("\n    vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
3319
3320         if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3321                 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
3322                         " tunnel_id: 0x%08x",
3323                         mask->mac_addr_byte_mask, mask->tunnel_type_mask,
3324                         rte_be_to_cpu_32(mask->tunnel_id_mask));
3325         else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3326                 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
3327                         rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
3328                         rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
3329
3330                 printf("\n    src_port: 0x%04x, dst_port: 0x%04x",
3331                         rte_be_to_cpu_16(mask->src_port_mask),
3332                         rte_be_to_cpu_16(mask->dst_port_mask));
3333
3334                 printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3335                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
3336                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
3337                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
3338                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
3339
3340                 printf("\n    dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3341                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
3342                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
3343                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
3344                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
3345         }
3346
3347         printf("\n");
3348 }
3349
3350 static inline void
3351 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3352 {
3353         struct rte_eth_flex_payload_cfg *cfg;
3354         uint32_t i, j;
3355
3356         for (i = 0; i < flex_conf->nb_payloads; i++) {
3357                 cfg = &flex_conf->flex_set[i];
3358                 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3359                         printf("\n    RAW:  ");
3360                 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3361                         printf("\n    L2_PAYLOAD:  ");
3362                 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3363                         printf("\n    L3_PAYLOAD:  ");
3364                 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3365                         printf("\n    L4_PAYLOAD:  ");
3366                 else
3367                         printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
3368                 for (j = 0; j < num; j++)
3369                         printf("  %-5u", cfg->src_offset[j]);
3370         }
3371         printf("\n");
3372 }
3373
3374 static char *
3375 flowtype_to_str(uint16_t flow_type)
3376 {
3377         struct flow_type_info {
3378                 char str[32];
3379                 uint16_t ftype;
3380         };
3381
3382         uint8_t i;
3383         static struct flow_type_info flowtype_str_table[] = {
3384                 {"raw", RTE_ETH_FLOW_RAW},
3385                 {"ipv4", RTE_ETH_FLOW_IPV4},
3386                 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3387                 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3388                 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3389                 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3390                 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3391                 {"ipv6", RTE_ETH_FLOW_IPV6},
3392                 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3393                 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3394                 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3395                 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3396                 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3397                 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3398                 {"port", RTE_ETH_FLOW_PORT},
3399                 {"vxlan", RTE_ETH_FLOW_VXLAN},
3400                 {"geneve", RTE_ETH_FLOW_GENEVE},
3401                 {"nvgre", RTE_ETH_FLOW_NVGRE},
3402                 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
3403         };
3404
3405         for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3406                 if (flowtype_str_table[i].ftype == flow_type)
3407                         return flowtype_str_table[i].str;
3408         }
3409
3410         return NULL;
3411 }
3412
3413 static inline void
3414 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3415 {
3416         struct rte_eth_fdir_flex_mask *mask;
3417         uint32_t i, j;
3418         char *p;
3419
3420         for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3421                 mask = &flex_conf->flex_mask[i];
3422                 p = flowtype_to_str(mask->flow_type);
3423                 printf("\n    %s:\t", p ? p : "unknown");
3424                 for (j = 0; j < num; j++)
3425                         printf(" %02x", mask->mask[j]);
3426         }
3427         printf("\n");
3428 }
3429
3430 static inline void
3431 print_fdir_flow_type(uint32_t flow_types_mask)
3432 {
3433         int i;
3434         char *p;
3435
3436         for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3437                 if (!(flow_types_mask & (1 << i)))
3438                         continue;
3439                 p = flowtype_to_str(i);
3440                 if (p)
3441                         printf(" %s", p);
3442                 else
3443                         printf(" unknown");
3444         }
3445         printf("\n");
3446 }
3447
3448 void
3449 fdir_get_infos(portid_t port_id)
3450 {
3451         struct rte_eth_fdir_stats fdir_stat;
3452         struct rte_eth_fdir_info fdir_info;
3453         int ret;
3454
3455         static const char *fdir_stats_border = "########################";
3456
3457         if (port_id_is_invalid(port_id, ENABLED_WARN))
3458                 return;
3459         ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3460         if (ret < 0) {
3461                 printf("\n FDIR is not supported on port %-2d\n",
3462                         port_id);
3463                 return;
3464         }
3465
3466         memset(&fdir_info, 0, sizeof(fdir_info));
3467         rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3468                                RTE_ETH_FILTER_INFO, &fdir_info);
3469         memset(&fdir_stat, 0, sizeof(fdir_stat));
3470         rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3471                                RTE_ETH_FILTER_STATS, &fdir_stat);
3472         printf("\n  %s FDIR infos for port %-2d     %s\n",
3473                fdir_stats_border, port_id, fdir_stats_border);
3474         printf("  MODE: ");
3475         if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3476                 printf("  PERFECT\n");
3477         else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3478                 printf("  PERFECT-MAC-VLAN\n");
3479         else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3480                 printf("  PERFECT-TUNNEL\n");
3481         else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3482                 printf("  SIGNATURE\n");
3483         else
3484                 printf("  DISABLE\n");
3485         if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3486                 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3487                 printf("  SUPPORTED FLOW TYPE: ");
3488                 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3489         }
3490         printf("  FLEX PAYLOAD INFO:\n");
3491         printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
3492                "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
3493                "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
3494                 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3495                 fdir_info.flex_payload_unit,
3496                 fdir_info.max_flex_payload_segment_num,
3497                 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3498         printf("  MASK: ");
3499         print_fdir_mask(&fdir_info.mask);
3500         if (fdir_info.flex_conf.nb_payloads > 0) {
3501                 printf("  FLEX PAYLOAD SRC OFFSET:");
3502                 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3503         }
3504         if (fdir_info.flex_conf.nb_flexmasks > 0) {
3505                 printf("  FLEX MASK CFG:");
3506                 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3507         }
3508         printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
3509                fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3510         printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
3511                fdir_info.guarant_spc, fdir_info.best_spc);
3512         printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
3513                "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
3514                "  add:           %-10"PRIu64"  remove:        %"PRIu64"\n"
3515                "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
3516                fdir_stat.collision, fdir_stat.free,
3517                fdir_stat.maxhash, fdir_stat.maxlen,
3518                fdir_stat.add, fdir_stat.remove,
3519                fdir_stat.f_add, fdir_stat.f_remove);
3520         printf("  %s############################%s\n",
3521                fdir_stats_border, fdir_stats_border);
3522 }
3523
3524 void
3525 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3526 {
3527         struct rte_port *port;
3528         struct rte_eth_fdir_flex_conf *flex_conf;
3529         int i, idx = 0;
3530
3531         port = &ports[port_id];
3532         flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3533         for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3534                 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3535                         idx = i;
3536                         break;
3537                 }
3538         }
3539         if (i >= RTE_ETH_FLOW_MAX) {
3540                 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3541                         idx = flex_conf->nb_flexmasks;
3542                         flex_conf->nb_flexmasks++;
3543                 } else {
3544                         printf("The flex mask table is full. Can not set flex"
3545                                 " mask for flow_type(%u).", cfg->flow_type);
3546                         return;
3547                 }
3548         }
3549         rte_memcpy(&flex_conf->flex_mask[idx],
3550                          cfg,
3551                          sizeof(struct rte_eth_fdir_flex_mask));
3552 }
3553
3554 void
3555 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3556 {
3557         struct rte_port *port;
3558         struct rte_eth_fdir_flex_conf *flex_conf;
3559         int i, idx = 0;
3560
3561         port = &ports[port_id];
3562         flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3563         for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3564                 if (cfg->type == flex_conf->flex_set[i].type) {
3565                         idx = i;
3566                         break;
3567                 }
3568         }
3569         if (i >= RTE_ETH_PAYLOAD_MAX) {
3570                 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3571                         idx = flex_conf->nb_payloads;
3572                         flex_conf->nb_payloads++;
3573                 } else {
3574                         printf("The flex payload table is full. Can not set"
3575                                 " flex payload for type(%u).", cfg->type);
3576                         return;
3577                 }
3578         }
3579         rte_memcpy(&flex_conf->flex_set[idx],
3580                          cfg,
3581                          sizeof(struct rte_eth_flex_payload_cfg));
3582
3583 }
3584
3585 void
3586 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3587 {
3588 #ifdef RTE_LIBRTE_IXGBE_PMD
3589         int diag;
3590
3591         if (is_rx)
3592                 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3593         else
3594                 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3595
3596         if (diag == 0)
3597                 return;
3598         printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3599                         is_rx ? "rx" : "tx", port_id, diag);
3600         return;
3601 #endif
3602         printf("VF %s setting not supported for port %d\n",
3603                         is_rx ? "Rx" : "Tx", port_id);
3604         RTE_SET_USED(vf);
3605         RTE_SET_USED(on);
3606 }
3607
3608 int
3609 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3610 {
3611         int diag;
3612         struct rte_eth_link link;
3613         int ret;
3614
3615         if (port_id_is_invalid(port_id, ENABLED_WARN))
3616                 return 1;
3617         ret = eth_link_get_nowait_print_err(port_id, &link);
3618         if (ret < 0)
3619                 return 1;
3620         if (rate > link.link_speed) {
3621                 printf("Invalid rate value:%u bigger than link speed: %u\n",
3622                         rate, link.link_speed);
3623                 return 1;
3624         }
3625         diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3626         if (diag == 0)
3627                 return diag;
3628         printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3629                 port_id, diag);
3630         return diag;
3631 }
3632
3633 int
3634 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3635 {
3636         int diag = -ENOTSUP;
3637
3638         RTE_SET_USED(vf);
3639         RTE_SET_USED(rate);
3640         RTE_SET_USED(q_msk);
3641
3642 #ifdef RTE_LIBRTE_IXGBE_PMD
3643         if (diag == -ENOTSUP)
3644                 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3645                                                        q_msk);
3646 #endif
3647 #ifdef RTE_LIBRTE_BNXT_PMD
3648         if (diag == -ENOTSUP)
3649                 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3650 #endif
3651         if (diag == 0)
3652                 return diag;
3653
3654         printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3655                 port_id, diag);
3656         return diag;
3657 }
3658
3659 /*
3660  * Functions to manage the set of filtered Multicast MAC addresses.
3661  *
3662  * A pool of filtered multicast MAC addresses is associated with each port.
3663  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3664  * The address of the pool and the number of valid multicast MAC addresses
3665  * recorded in the pool are stored in the fields "mc_addr_pool" and
3666  * "mc_addr_nb" of the "rte_port" data structure.
3667  *
3668  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3669  * to be supplied a contiguous array of multicast MAC addresses.
3670  * To comply with this constraint, the set of multicast addresses recorded
3671  * into the pool are systematically compacted at the beginning of the pool.
3672  * Hence, when a multicast address is removed from the pool, all following
3673  * addresses, if any, are copied back to keep the set contiguous.
3674  */
3675 #define MCAST_POOL_INC 32
3676
3677 static int
3678 mcast_addr_pool_extend(struct rte_port *port)
3679 {
3680         struct rte_ether_addr *mc_pool;
3681         size_t mc_pool_size;
3682
3683         /*
3684          * If a free entry is available at the end of the pool, just
3685          * increment the number of recorded multicast addresses.
3686          */
3687         if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3688                 port->mc_addr_nb++;
3689                 return 0;
3690         }
3691
3692         /*
3693          * [re]allocate a pool with MCAST_POOL_INC more entries.
3694          * The previous test guarantees that port->mc_addr_nb is a multiple
3695          * of MCAST_POOL_INC.
3696          */
3697         mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
3698                                                     MCAST_POOL_INC);
3699         mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
3700                                                 mc_pool_size);
3701         if (mc_pool == NULL) {
3702                 printf("allocation of pool of %u multicast addresses failed\n",
3703                        port->mc_addr_nb + MCAST_POOL_INC);
3704                 return -ENOMEM;
3705         }
3706
3707         port->mc_addr_pool = mc_pool;
3708         port->mc_addr_nb++;
3709         return 0;
3710
3711 }
3712
3713 static void
3714 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3715 {
3716         port->mc_addr_nb--;
3717         if (addr_idx == port->mc_addr_nb) {
3718                 /* No need to recompact the set of multicast addressses. */
3719                 if (port->mc_addr_nb == 0) {
3720                         /* free the pool of multicast addresses. */
3721                         free(port->mc_addr_pool);
3722                         port->mc_addr_pool = NULL;
3723                 }
3724                 return;
3725         }
3726         memmove(&port->mc_addr_pool[addr_idx],
3727                 &port->mc_addr_pool[addr_idx + 1],
3728                 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
3729 }
3730
3731 static void
3732 eth_port_multicast_addr_list_set(portid_t port_id)
3733 {
3734         struct rte_port *port;
3735         int diag;
3736
3737         port = &ports[port_id];
3738         diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3739                                             port->mc_addr_nb);
3740         if (diag == 0)
3741                 return;
3742         printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3743                port->mc_addr_nb, port_id, -diag);
3744 }
3745
3746 void
3747 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
3748 {
3749         struct rte_port *port;
3750         uint32_t i;
3751
3752         if (port_id_is_invalid(port_id, ENABLED_WARN))
3753                 return;
3754
3755         port = &ports[port_id];
3756
3757         /*
3758          * Check that the added multicast MAC address is not already recorded
3759          * in the pool of multicast addresses.
3760          */
3761         for (i = 0; i < port->mc_addr_nb; i++) {
3762                 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3763                         printf("multicast address already filtered by port\n");
3764                         return;
3765                 }
3766         }
3767
3768         if (mcast_addr_pool_extend(port) != 0)
3769                 return;
3770         rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
3771         eth_port_multicast_addr_list_set(port_id);
3772 }
3773
3774 void
3775 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
3776 {
3777         struct rte_port *port;
3778         uint32_t i;
3779
3780         if (port_id_is_invalid(port_id, ENABLED_WARN))
3781                 return;
3782
3783         port = &ports[port_id];
3784
3785         /*
3786          * Search the pool of multicast MAC addresses for the removed address.
3787          */
3788         for (i = 0; i < port->mc_addr_nb; i++) {
3789                 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3790                         break;
3791         }
3792         if (i == port->mc_addr_nb) {
3793                 printf("multicast address not filtered by port %d\n", port_id);
3794                 return;
3795         }
3796
3797         mcast_addr_pool_remove(port, i);
3798         eth_port_multicast_addr_list_set(port_id);
3799 }
3800
3801 void
3802 port_dcb_info_display(portid_t port_id)
3803 {
3804         struct rte_eth_dcb_info dcb_info;
3805         uint16_t i;
3806         int ret;
3807         static const char *border = "================";
3808
3809         if (port_id_is_invalid(port_id, ENABLED_WARN))
3810                 return;
3811
3812         ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3813         if (ret) {
3814                 printf("\n Failed to get dcb infos on port %-2d\n",
3815                         port_id);
3816                 return;
3817         }
3818         printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
3819         printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
3820         printf("\n  TC :        ");
3821         for (i = 0; i < dcb_info.nb_tcs; i++)
3822                 printf("\t%4d", i);
3823         printf("\n  Priority :  ");
3824         for (i = 0; i < dcb_info.nb_tcs; i++)
3825                 printf("\t%4d", dcb_info.prio_tc[i]);
3826         printf("\n  BW percent :");
3827         for (i = 0; i < dcb_info.nb_tcs; i++)
3828                 printf("\t%4d%%", dcb_info.tc_bws[i]);
3829         printf("\n  RXQ base :  ");
3830         for (i = 0; i < dcb_info.nb_tcs; i++)
3831                 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
3832         printf("\n  RXQ number :");
3833         for (i = 0; i < dcb_info.nb_tcs; i++)
3834                 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
3835         printf("\n  TXQ base :  ");
3836         for (i = 0; i < dcb_info.nb_tcs; i++)
3837                 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
3838         printf("\n  TXQ number :");
3839         for (i = 0; i < dcb_info.nb_tcs; i++)
3840                 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
3841         printf("\n");
3842 }
3843
3844 uint8_t *
3845 open_file(const char *file_path, uint32_t *size)
3846 {
3847         int fd = open(file_path, O_RDONLY);
3848         off_t pkg_size;
3849         uint8_t *buf = NULL;
3850         int ret = 0;
3851         struct stat st_buf;
3852
3853         if (size)
3854                 *size = 0;
3855
3856         if (fd == -1) {
3857                 printf("%s: Failed to open %s\n", __func__, file_path);
3858                 return buf;
3859         }
3860
3861         if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
3862                 close(fd);
3863                 printf("%s: File operations failed\n", __func__);
3864                 return buf;
3865         }
3866
3867         pkg_size = st_buf.st_size;
3868         if (pkg_size < 0) {
3869                 close(fd);
3870                 printf("%s: File operations failed\n", __func__);
3871                 return buf;
3872         }
3873
3874         buf = (uint8_t *)malloc(pkg_size);
3875         if (!buf) {
3876                 close(fd);
3877                 printf("%s: Failed to malloc memory\n", __func__);
3878                 return buf;
3879         }
3880
3881         ret = read(fd, buf, pkg_size);
3882         if (ret < 0) {
3883                 close(fd);
3884                 printf("%s: File read operation failed\n", __func__);
3885                 close_file(buf);
3886                 return NULL;
3887         }
3888
3889         if (size)
3890                 *size = pkg_size;
3891
3892         close(fd);
3893
3894         return buf;
3895 }
3896
3897 int
3898 save_file(const char *file_path, uint8_t *buf, uint32_t size)
3899 {
3900         FILE *fh = fopen(file_path, "wb");
3901
3902         if (fh == NULL) {
3903                 printf("%s: Failed to open %s\n", __func__, file_path);
3904                 return -1;
3905         }
3906
3907         if (fwrite(buf, 1, size, fh) != size) {
3908                 fclose(fh);
3909                 printf("%s: File write operation failed\n", __func__);
3910                 return -1;
3911         }
3912
3913         fclose(fh);
3914
3915         return 0;
3916 }
3917
3918 int
3919 close_file(uint8_t *buf)
3920 {
3921         if (buf) {
3922                 free((void *)buf);
3923                 return 0;
3924         }
3925
3926         return -1;
3927 }
3928
3929 void
3930 port_queue_region_info_display(portid_t port_id, void *buf)
3931 {
3932 #ifdef RTE_LIBRTE_I40E_PMD
3933         uint16_t i, j;
3934         struct rte_pmd_i40e_queue_regions *info =
3935                 (struct rte_pmd_i40e_queue_regions *)buf;
3936         static const char *queue_region_info_stats_border = "-------";
3937
3938         if (!info->queue_region_number)
3939                 printf("there is no region has been set before");
3940
3941         printf("\n      %s All queue region info for port=%2d %s",
3942                         queue_region_info_stats_border, port_id,
3943                         queue_region_info_stats_border);
3944         printf("\n      queue_region_number: %-14u \n",
3945                         info->queue_region_number);
3946
3947         for (i = 0; i < info->queue_region_number; i++) {
3948                 printf("\n      region_id: %-14u queue_number: %-14u "
3949                         "queue_start_index: %-14u \n",
3950                         info->region[i].region_id,
3951                         info->region[i].queue_num,
3952                         info->region[i].queue_start_index);
3953
3954                 printf("  user_priority_num is  %-14u :",
3955                                         info->region[i].user_priority_num);
3956                 for (j = 0; j < info->region[i].user_priority_num; j++)
3957                         printf(" %-14u ", info->region[i].user_priority[j]);
3958
3959                 printf("\n      flowtype_num is  %-14u :",
3960                                 info->region[i].flowtype_num);
3961                 for (j = 0; j < info->region[i].flowtype_num; j++)
3962                         printf(" %-14u ", info->region[i].hw_flowtype[j]);
3963         }
3964 #else
3965         RTE_SET_USED(port_id);
3966         RTE_SET_USED(buf);
3967 #endif
3968
3969         printf("\n\n");
3970 }