app/testpmd: check status of getting link info
[dpdk.git] / app / test-pmd / config.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2013-2014 6WIND S.A.
4  */
5
6 #include <stdarg.h>
7 #include <errno.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12
13 #include <sys/queue.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 #include <fcntl.h>
17 #include <unistd.h>
18
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
22 #include <rte_log.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_mbuf.h>
34 #include <rte_interrupts.h>
35 #include <rte_pci.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
40 #include <rte_flow.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
44 #endif
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
47 #endif
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
50 #endif
51 #include <rte_gro.h>
52 #include <rte_config.h>
53
54 #include "testpmd.h"
55
56 static char *flowtype_to_str(uint16_t flow_type);
57
58 static const struct {
59         enum tx_pkt_split split;
60         const char *name;
61 } tx_split_name[] = {
62         {
63                 .split = TX_PKT_SPLIT_OFF,
64                 .name = "off",
65         },
66         {
67                 .split = TX_PKT_SPLIT_ON,
68                 .name = "on",
69         },
70         {
71                 .split = TX_PKT_SPLIT_RND,
72                 .name = "rand",
73         },
74 };
75
76 const struct rss_type_info rss_type_table[] = {
77         { "all", ETH_RSS_IP | ETH_RSS_TCP |
78                         ETH_RSS_UDP | ETH_RSS_SCTP |
79                         ETH_RSS_L2_PAYLOAD },
80         { "none", 0 },
81         { "ipv4", ETH_RSS_IPV4 },
82         { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
83         { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
84         { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
85         { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
86         { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
87         { "ipv6", ETH_RSS_IPV6 },
88         { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
89         { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
90         { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
91         { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
92         { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
93         { "l2-payload", ETH_RSS_L2_PAYLOAD },
94         { "ipv6-ex", ETH_RSS_IPV6_EX },
95         { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
96         { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
97         { "port", ETH_RSS_PORT },
98         { "vxlan", ETH_RSS_VXLAN },
99         { "geneve", ETH_RSS_GENEVE },
100         { "nvgre", ETH_RSS_NVGRE },
101         { "ip", ETH_RSS_IP },
102         { "udp", ETH_RSS_UDP },
103         { "tcp", ETH_RSS_TCP },
104         { "sctp", ETH_RSS_SCTP },
105         { "tunnel", ETH_RSS_TUNNEL },
106         { NULL, 0 },
107 };
108
109 static void
110 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
111 {
112         char buf[RTE_ETHER_ADDR_FMT_SIZE];
113         rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
114         printf("%s%s", name, buf);
115 }
116
117 void
118 nic_stats_display(portid_t port_id)
119 {
120         static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
121         static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
122         static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
123         uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles;
124         uint64_t mpps_rx, mpps_tx;
125         struct rte_eth_stats stats;
126         struct rte_port *port = &ports[port_id];
127         uint8_t i;
128
129         static const char *nic_stats_border = "########################";
130
131         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
132                 print_valid_ports();
133                 return;
134         }
135         rte_eth_stats_get(port_id, &stats);
136         printf("\n  %s NIC statistics for port %-2d %s\n",
137                nic_stats_border, port_id, nic_stats_border);
138
139         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
140                 printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
141                        "%-"PRIu64"\n",
142                        stats.ipackets, stats.imissed, stats.ibytes);
143                 printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
144                 printf("  RX-nombuf:  %-10"PRIu64"\n",
145                        stats.rx_nombuf);
146                 printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
147                        "%-"PRIu64"\n",
148                        stats.opackets, stats.oerrors, stats.obytes);
149         }
150         else {
151                 printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
152                        "    RX-bytes: %10"PRIu64"\n",
153                        stats.ipackets, stats.ierrors, stats.ibytes);
154                 printf("  RX-errors:  %10"PRIu64"\n", stats.ierrors);
155                 printf("  RX-nombuf:               %10"PRIu64"\n",
156                        stats.rx_nombuf);
157                 printf("  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
158                        "    TX-bytes: %10"PRIu64"\n",
159                        stats.opackets, stats.oerrors, stats.obytes);
160         }
161
162         if (port->rx_queue_stats_mapping_enabled) {
163                 printf("\n");
164                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
165                         printf("  Stats reg %2d RX-packets: %10"PRIu64
166                                "    RX-errors: %10"PRIu64
167                                "    RX-bytes: %10"PRIu64"\n",
168                                i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
169                 }
170         }
171         if (port->tx_queue_stats_mapping_enabled) {
172                 printf("\n");
173                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
174                         printf("  Stats reg %2d TX-packets: %10"PRIu64
175                                "                             TX-bytes: %10"PRIu64"\n",
176                                i, stats.q_opackets[i], stats.q_obytes[i]);
177                 }
178         }
179
180         diff_cycles = prev_cycles[port_id];
181         prev_cycles[port_id] = rte_rdtsc();
182         if (diff_cycles > 0)
183                 diff_cycles = prev_cycles[port_id] - diff_cycles;
184
185         diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
186                 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
187         diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
188                 (stats.opackets - prev_pkts_tx[port_id]) : 0;
189         prev_pkts_rx[port_id] = stats.ipackets;
190         prev_pkts_tx[port_id] = stats.opackets;
191         mpps_rx = diff_cycles > 0 ?
192                 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
193         mpps_tx = diff_cycles > 0 ?
194                 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
195         printf("\n  Throughput (since last show)\n");
196         printf("  Rx-pps: %12"PRIu64"\n  Tx-pps: %12"PRIu64"\n",
197                         mpps_rx, mpps_tx);
198
199         printf("  %s############################%s\n",
200                nic_stats_border, nic_stats_border);
201 }
202
203 void
204 nic_stats_clear(portid_t port_id)
205 {
206         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
207                 print_valid_ports();
208                 return;
209         }
210         rte_eth_stats_reset(port_id);
211         printf("\n  NIC statistics for port %d cleared\n", port_id);
212 }
213
214 void
215 nic_xstats_display(portid_t port_id)
216 {
217         struct rte_eth_xstat *xstats;
218         int cnt_xstats, idx_xstat;
219         struct rte_eth_xstat_name *xstats_names;
220
221         printf("###### NIC extended statistics for port %-2d\n", port_id);
222         if (!rte_eth_dev_is_valid_port(port_id)) {
223                 printf("Error: Invalid port number %i\n", port_id);
224                 return;
225         }
226
227         /* Get count */
228         cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
229         if (cnt_xstats  < 0) {
230                 printf("Error: Cannot get count of xstats\n");
231                 return;
232         }
233
234         /* Get id-name lookup table */
235         xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
236         if (xstats_names == NULL) {
237                 printf("Cannot allocate memory for xstats lookup\n");
238                 return;
239         }
240         if (cnt_xstats != rte_eth_xstats_get_names(
241                         port_id, xstats_names, cnt_xstats)) {
242                 printf("Error: Cannot get xstats lookup\n");
243                 free(xstats_names);
244                 return;
245         }
246
247         /* Get stats themselves */
248         xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
249         if (xstats == NULL) {
250                 printf("Cannot allocate memory for xstats\n");
251                 free(xstats_names);
252                 return;
253         }
254         if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
255                 printf("Error: Unable to get xstats\n");
256                 free(xstats_names);
257                 free(xstats);
258                 return;
259         }
260
261         /* Display xstats */
262         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
263                 if (xstats_hide_zero && !xstats[idx_xstat].value)
264                         continue;
265                 printf("%s: %"PRIu64"\n",
266                         xstats_names[idx_xstat].name,
267                         xstats[idx_xstat].value);
268         }
269         free(xstats_names);
270         free(xstats);
271 }
272
273 void
274 nic_xstats_clear(portid_t port_id)
275 {
276         int ret;
277
278         ret = rte_eth_xstats_reset(port_id);
279         if (ret != 0) {
280                 printf("%s: Error: failed to reset xstats (port %u): %s",
281                        __func__, port_id, strerror(ret));
282         }
283 }
284
285 void
286 nic_stats_mapping_display(portid_t port_id)
287 {
288         struct rte_port *port = &ports[port_id];
289         uint16_t i;
290
291         static const char *nic_stats_mapping_border = "########################";
292
293         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
294                 print_valid_ports();
295                 return;
296         }
297
298         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
299                 printf("Port id %d - either does not support queue statistic mapping or"
300                        " no queue statistic mapping set\n", port_id);
301                 return;
302         }
303
304         printf("\n  %s NIC statistics mapping for port %-2d %s\n",
305                nic_stats_mapping_border, port_id, nic_stats_mapping_border);
306
307         if (port->rx_queue_stats_mapping_enabled) {
308                 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
309                         if (rx_queue_stats_mappings[i].port_id == port_id) {
310                                 printf("  RX-queue %2d mapped to Stats Reg %2d\n",
311                                        rx_queue_stats_mappings[i].queue_id,
312                                        rx_queue_stats_mappings[i].stats_counter_id);
313                         }
314                 }
315                 printf("\n");
316         }
317
318
319         if (port->tx_queue_stats_mapping_enabled) {
320                 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
321                         if (tx_queue_stats_mappings[i].port_id == port_id) {
322                                 printf("  TX-queue %2d mapped to Stats Reg %2d\n",
323                                        tx_queue_stats_mappings[i].queue_id,
324                                        tx_queue_stats_mappings[i].stats_counter_id);
325                         }
326                 }
327         }
328
329         printf("  %s####################################%s\n",
330                nic_stats_mapping_border, nic_stats_mapping_border);
331 }
332
333 void
334 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
335 {
336         struct rte_eth_rxq_info qinfo;
337         int32_t rc;
338         static const char *info_border = "*********************";
339
340         rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
341         if (rc != 0) {
342                 printf("Failed to retrieve information for port: %u, "
343                         "RX queue: %hu\nerror desc: %s(%d)\n",
344                         port_id, queue_id, strerror(-rc), rc);
345                 return;
346         }
347
348         printf("\n%s Infos for port %-2u, RX queue %-2u %s",
349                info_border, port_id, queue_id, info_border);
350
351         printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
352         printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
353         printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
354         printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
355         printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
356         printf("\nRX drop packets: %s",
357                 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
358         printf("\nRX deferred start: %s",
359                 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
360         printf("\nRX scattered packets: %s",
361                 (qinfo.scattered_rx != 0) ? "on" : "off");
362         printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
363         printf("\n");
364 }
365
366 void
367 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
368 {
369         struct rte_eth_txq_info qinfo;
370         int32_t rc;
371         static const char *info_border = "*********************";
372
373         rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
374         if (rc != 0) {
375                 printf("Failed to retrieve information for port: %u, "
376                         "TX queue: %hu\nerror desc: %s(%d)\n",
377                         port_id, queue_id, strerror(-rc), rc);
378                 return;
379         }
380
381         printf("\n%s Infos for port %-2u, TX queue %-2u %s",
382                info_border, port_id, queue_id, info_border);
383
384         printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
385         printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
386         printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
387         printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
388         printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
389         printf("\nTX deferred start: %s",
390                 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
391         printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
392         printf("\n");
393 }
394
395 static int bus_match_all(const struct rte_bus *bus, const void *data)
396 {
397         RTE_SET_USED(bus);
398         RTE_SET_USED(data);
399         return 0;
400 }
401
402 void
403 device_infos_display(const char *identifier)
404 {
405         static const char *info_border = "*********************";
406         struct rte_bus *start = NULL, *next;
407         struct rte_dev_iterator dev_iter;
408         char name[RTE_ETH_NAME_MAX_LEN];
409         struct rte_ether_addr mac_addr;
410         struct rte_device *dev;
411         struct rte_devargs da;
412         portid_t port_id;
413         char devstr[128];
414
415         memset(&da, 0, sizeof(da));
416         if (!identifier)
417                 goto skip_parse;
418
419         if (rte_devargs_parsef(&da, "%s", identifier)) {
420                 printf("cannot parse identifier\n");
421                 if (da.args)
422                         free(da.args);
423                 return;
424         }
425
426 skip_parse:
427         while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
428
429                 start = next;
430                 if (identifier && da.bus != next)
431                         continue;
432
433                 /* Skip buses that don't have iterate method */
434                 if (!next->dev_iterate)
435                         continue;
436
437                 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
438                 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
439
440                         if (!dev->driver)
441                                 continue;
442                         /* Check for matching device if identifier is present */
443                         if (identifier &&
444                             strncmp(da.name, dev->name, strlen(dev->name)))
445                                 continue;
446                         printf("\n%s Infos for device %s %s\n",
447                                info_border, dev->name, info_border);
448                         printf("Bus name: %s", dev->bus->name);
449                         printf("\nDriver name: %s", dev->driver->name);
450                         printf("\nDevargs: %s",
451                                dev->devargs ? dev->devargs->args : "");
452                         printf("\nConnect to socket: %d", dev->numa_node);
453                         printf("\n");
454
455                         /* List ports with matching device name */
456                         RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
457                                 rte_eth_macaddr_get(port_id, &mac_addr);
458                                 printf("\n\tPort id: %-2d", port_id);
459                                 print_ethaddr("\n\tMAC address: ", &mac_addr);
460                                 rte_eth_dev_get_name_by_port(port_id, name);
461                                 printf("\n\tDevice name: %s", name);
462                                 printf("\n");
463                         }
464                 }
465         };
466 }
467
468 void
469 port_infos_display(portid_t port_id)
470 {
471         struct rte_port *port;
472         struct rte_ether_addr mac_addr;
473         struct rte_eth_link link;
474         struct rte_eth_dev_info dev_info;
475         int vlan_offload;
476         struct rte_mempool * mp;
477         static const char *info_border = "*********************";
478         uint16_t mtu;
479         char name[RTE_ETH_NAME_MAX_LEN];
480         int ret;
481
482         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
483                 print_valid_ports();
484                 return;
485         }
486         port = &ports[port_id];
487         ret = eth_link_get_nowait_print_err(port_id, &link);
488         if (ret < 0)
489                 return;
490
491         ret = eth_dev_info_get_print_err(port_id, &dev_info);
492         if (ret != 0)
493                 return;
494
495         printf("\n%s Infos for port %-2d %s\n",
496                info_border, port_id, info_border);
497         rte_eth_macaddr_get(port_id, &mac_addr);
498         print_ethaddr("MAC address: ", &mac_addr);
499         rte_eth_dev_get_name_by_port(port_id, name);
500         printf("\nDevice name: %s", name);
501         printf("\nDriver name: %s", dev_info.driver_name);
502         if (dev_info.device->devargs && dev_info.device->devargs->args)
503                 printf("\nDevargs: %s", dev_info.device->devargs->args);
504         printf("\nConnect to socket: %u", port->socket_id);
505
506         if (port_numa[port_id] != NUMA_NO_CONFIG) {
507                 mp = mbuf_pool_find(port_numa[port_id]);
508                 if (mp)
509                         printf("\nmemory allocation on the socket: %d",
510                                                         port_numa[port_id]);
511         } else
512                 printf("\nmemory allocation on the socket: %u",port->socket_id);
513
514         printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
515         printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
516         printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
517                ("full-duplex") : ("half-duplex"));
518
519         if (!rte_eth_dev_get_mtu(port_id, &mtu))
520                 printf("MTU: %u\n", mtu);
521
522         printf("Promiscuous mode: %s\n",
523                rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
524         printf("Allmulticast mode: %s\n",
525                rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
526         printf("Maximum number of MAC addresses: %u\n",
527                (unsigned int)(port->dev_info.max_mac_addrs));
528         printf("Maximum number of MAC addresses of hash filtering: %u\n",
529                (unsigned int)(port->dev_info.max_hash_mac_addrs));
530
531         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
532         if (vlan_offload >= 0){
533                 printf("VLAN offload: \n");
534                 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
535                         printf("  strip on \n");
536                 else
537                         printf("  strip off \n");
538
539                 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
540                         printf("  filter on \n");
541                 else
542                         printf("  filter off \n");
543
544                 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
545                         printf("  qinq(extend) on \n");
546                 else
547                         printf("  qinq(extend) off \n");
548         }
549
550         if (dev_info.hash_key_size > 0)
551                 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
552         if (dev_info.reta_size > 0)
553                 printf("Redirection table size: %u\n", dev_info.reta_size);
554         if (!dev_info.flow_type_rss_offloads)
555                 printf("No RSS offload flow type is supported.\n");
556         else {
557                 uint16_t i;
558                 char *p;
559
560                 printf("Supported RSS offload flow types:\n");
561                 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
562                      i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
563                         if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
564                                 continue;
565                         p = flowtype_to_str(i);
566                         if (p)
567                                 printf("  %s\n", p);
568                         else
569                                 printf("  user defined %d\n", i);
570                 }
571         }
572
573         printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
574         printf("Maximum configurable length of RX packet: %u\n",
575                 dev_info.max_rx_pktlen);
576         if (dev_info.max_vfs)
577                 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
578         if (dev_info.max_vmdq_pools)
579                 printf("Maximum number of VMDq pools: %u\n",
580                         dev_info.max_vmdq_pools);
581
582         printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
583         printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
584         printf("Max possible number of RXDs per queue: %hu\n",
585                 dev_info.rx_desc_lim.nb_max);
586         printf("Min possible number of RXDs per queue: %hu\n",
587                 dev_info.rx_desc_lim.nb_min);
588         printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
589
590         printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
591         printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
592         printf("Max possible number of TXDs per queue: %hu\n",
593                 dev_info.tx_desc_lim.nb_max);
594         printf("Min possible number of TXDs per queue: %hu\n",
595                 dev_info.tx_desc_lim.nb_min);
596         printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
597         printf("Max segment number per packet: %hu\n",
598                 dev_info.tx_desc_lim.nb_seg_max);
599         printf("Max segment number per MTU/TSO: %hu\n",
600                 dev_info.tx_desc_lim.nb_mtu_seg_max);
601
602         /* Show switch info only if valid switch domain and port id is set */
603         if (dev_info.switch_info.domain_id !=
604                 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
605                 if (dev_info.switch_info.name)
606                         printf("Switch name: %s\n", dev_info.switch_info.name);
607
608                 printf("Switch domain Id: %u\n",
609                         dev_info.switch_info.domain_id);
610                 printf("Switch Port Id: %u\n",
611                         dev_info.switch_info.port_id);
612         }
613 }
614
615 void
616 port_summary_header_display(void)
617 {
618         uint16_t port_number;
619
620         port_number = rte_eth_dev_count_avail();
621         printf("Number of available ports: %i\n", port_number);
622         printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
623                         "Driver", "Status", "Link");
624 }
625
626 void
627 port_summary_display(portid_t port_id)
628 {
629         struct rte_ether_addr mac_addr;
630         struct rte_eth_link link;
631         struct rte_eth_dev_info dev_info;
632         char name[RTE_ETH_NAME_MAX_LEN];
633         int ret;
634
635         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
636                 print_valid_ports();
637                 return;
638         }
639
640         ret = eth_link_get_nowait_print_err(port_id, &link);
641         if (ret < 0)
642                 return;
643
644         ret = eth_dev_info_get_print_err(port_id, &dev_info);
645         if (ret != 0)
646                 return;
647
648         rte_eth_dev_get_name_by_port(port_id, name);
649         rte_eth_macaddr_get(port_id, &mac_addr);
650
651         printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n",
652                 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
653                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
654                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
655                 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
656                 (unsigned int) link.link_speed);
657 }
658
659 void
660 port_offload_cap_display(portid_t port_id)
661 {
662         struct rte_eth_dev_info dev_info;
663         static const char *info_border = "************";
664         int ret;
665
666         if (port_id_is_invalid(port_id, ENABLED_WARN))
667                 return;
668
669         ret = eth_dev_info_get_print_err(port_id, &dev_info);
670         if (ret != 0)
671                 return;
672
673         printf("\n%s Port %d supported offload features: %s\n",
674                 info_border, port_id, info_border);
675
676         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
677                 printf("VLAN stripped:                 ");
678                 if (ports[port_id].dev_conf.rxmode.offloads &
679                     DEV_RX_OFFLOAD_VLAN_STRIP)
680                         printf("on\n");
681                 else
682                         printf("off\n");
683         }
684
685         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
686                 printf("Double VLANs stripped:         ");
687                 if (ports[port_id].dev_conf.rxmode.offloads &
688                     DEV_RX_OFFLOAD_QINQ_STRIP)
689                         printf("on\n");
690                 else
691                         printf("off\n");
692         }
693
694         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
695                 printf("RX IPv4 checksum:              ");
696                 if (ports[port_id].dev_conf.rxmode.offloads &
697                     DEV_RX_OFFLOAD_IPV4_CKSUM)
698                         printf("on\n");
699                 else
700                         printf("off\n");
701         }
702
703         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
704                 printf("RX UDP checksum:               ");
705                 if (ports[port_id].dev_conf.rxmode.offloads &
706                     DEV_RX_OFFLOAD_UDP_CKSUM)
707                         printf("on\n");
708                 else
709                         printf("off\n");
710         }
711
712         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
713                 printf("RX TCP checksum:               ");
714                 if (ports[port_id].dev_conf.rxmode.offloads &
715                     DEV_RX_OFFLOAD_TCP_CKSUM)
716                         printf("on\n");
717                 else
718                         printf("off\n");
719         }
720
721         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
722                 printf("RX SCTP checksum:              ");
723                 if (ports[port_id].dev_conf.rxmode.offloads &
724                     DEV_RX_OFFLOAD_SCTP_CKSUM)
725                         printf("on\n");
726                 else
727                         printf("off\n");
728         }
729
730         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
731                 printf("RX Outer IPv4 checksum:        ");
732                 if (ports[port_id].dev_conf.rxmode.offloads &
733                     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
734                         printf("on\n");
735                 else
736                         printf("off\n");
737         }
738
739         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
740                 printf("RX Outer UDP checksum:         ");
741                 if (ports[port_id].dev_conf.rxmode.offloads &
742                     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
743                         printf("on\n");
744                 else
745                         printf("off\n");
746         }
747
748         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
749                 printf("Large receive offload:         ");
750                 if (ports[port_id].dev_conf.rxmode.offloads &
751                     DEV_RX_OFFLOAD_TCP_LRO)
752                         printf("on\n");
753                 else
754                         printf("off\n");
755         }
756
757         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
758                 printf("HW timestamp:                  ");
759                 if (ports[port_id].dev_conf.rxmode.offloads &
760                     DEV_RX_OFFLOAD_TIMESTAMP)
761                         printf("on\n");
762                 else
763                         printf("off\n");
764         }
765
766         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
767                 printf("Rx Keep CRC:                   ");
768                 if (ports[port_id].dev_conf.rxmode.offloads &
769                     DEV_RX_OFFLOAD_KEEP_CRC)
770                         printf("on\n");
771                 else
772                         printf("off\n");
773         }
774
775         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
776                 printf("RX offload security:           ");
777                 if (ports[port_id].dev_conf.rxmode.offloads &
778                     DEV_RX_OFFLOAD_SECURITY)
779                         printf("on\n");
780                 else
781                         printf("off\n");
782         }
783
784         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
785                 printf("VLAN insert:                   ");
786                 if (ports[port_id].dev_conf.txmode.offloads &
787                     DEV_TX_OFFLOAD_VLAN_INSERT)
788                         printf("on\n");
789                 else
790                         printf("off\n");
791         }
792
793         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
794                 printf("Double VLANs insert:           ");
795                 if (ports[port_id].dev_conf.txmode.offloads &
796                     DEV_TX_OFFLOAD_QINQ_INSERT)
797                         printf("on\n");
798                 else
799                         printf("off\n");
800         }
801
802         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
803                 printf("TX IPv4 checksum:              ");
804                 if (ports[port_id].dev_conf.txmode.offloads &
805                     DEV_TX_OFFLOAD_IPV4_CKSUM)
806                         printf("on\n");
807                 else
808                         printf("off\n");
809         }
810
811         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
812                 printf("TX UDP checksum:               ");
813                 if (ports[port_id].dev_conf.txmode.offloads &
814                     DEV_TX_OFFLOAD_UDP_CKSUM)
815                         printf("on\n");
816                 else
817                         printf("off\n");
818         }
819
820         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
821                 printf("TX TCP checksum:               ");
822                 if (ports[port_id].dev_conf.txmode.offloads &
823                     DEV_TX_OFFLOAD_TCP_CKSUM)
824                         printf("on\n");
825                 else
826                         printf("off\n");
827         }
828
829         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
830                 printf("TX SCTP checksum:              ");
831                 if (ports[port_id].dev_conf.txmode.offloads &
832                     DEV_TX_OFFLOAD_SCTP_CKSUM)
833                         printf("on\n");
834                 else
835                         printf("off\n");
836         }
837
838         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
839                 printf("TX Outer IPv4 checksum:        ");
840                 if (ports[port_id].dev_conf.txmode.offloads &
841                     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
842                         printf("on\n");
843                 else
844                         printf("off\n");
845         }
846
847         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
848                 printf("TX TCP segmentation:           ");
849                 if (ports[port_id].dev_conf.txmode.offloads &
850                     DEV_TX_OFFLOAD_TCP_TSO)
851                         printf("on\n");
852                 else
853                         printf("off\n");
854         }
855
856         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
857                 printf("TX UDP segmentation:           ");
858                 if (ports[port_id].dev_conf.txmode.offloads &
859                     DEV_TX_OFFLOAD_UDP_TSO)
860                         printf("on\n");
861                 else
862                         printf("off\n");
863         }
864
865         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
866                 printf("TSO for VXLAN tunnel packet:   ");
867                 if (ports[port_id].dev_conf.txmode.offloads &
868                     DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
869                         printf("on\n");
870                 else
871                         printf("off\n");
872         }
873
874         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
875                 printf("TSO for GRE tunnel packet:     ");
876                 if (ports[port_id].dev_conf.txmode.offloads &
877                     DEV_TX_OFFLOAD_GRE_TNL_TSO)
878                         printf("on\n");
879                 else
880                         printf("off\n");
881         }
882
883         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
884                 printf("TSO for IPIP tunnel packet:    ");
885                 if (ports[port_id].dev_conf.txmode.offloads &
886                     DEV_TX_OFFLOAD_IPIP_TNL_TSO)
887                         printf("on\n");
888                 else
889                         printf("off\n");
890         }
891
892         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
893                 printf("TSO for GENEVE tunnel packet:  ");
894                 if (ports[port_id].dev_conf.txmode.offloads &
895                     DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
896                         printf("on\n");
897                 else
898                         printf("off\n");
899         }
900
901         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
902                 printf("IP tunnel TSO:  ");
903                 if (ports[port_id].dev_conf.txmode.offloads &
904                     DEV_TX_OFFLOAD_IP_TNL_TSO)
905                         printf("on\n");
906                 else
907                         printf("off\n");
908         }
909
910         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
911                 printf("UDP tunnel TSO:  ");
912                 if (ports[port_id].dev_conf.txmode.offloads &
913                     DEV_TX_OFFLOAD_UDP_TNL_TSO)
914                         printf("on\n");
915                 else
916                         printf("off\n");
917         }
918
919         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
920                 printf("TX Outer UDP checksum:         ");
921                 if (ports[port_id].dev_conf.txmode.offloads &
922                     DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
923                         printf("on\n");
924                 else
925                         printf("off\n");
926         }
927
928 }
929
930 int
931 port_id_is_invalid(portid_t port_id, enum print_warning warning)
932 {
933         uint16_t pid;
934
935         if (port_id == (portid_t)RTE_PORT_ALL)
936                 return 0;
937
938         RTE_ETH_FOREACH_DEV(pid)
939                 if (port_id == pid)
940                         return 0;
941
942         if (warning == ENABLED_WARN)
943                 printf("Invalid port %d\n", port_id);
944
945         return 1;
946 }
947
948 void print_valid_ports(void)
949 {
950         portid_t pid;
951
952         printf("The valid ports array is [");
953         RTE_ETH_FOREACH_DEV(pid) {
954                 printf(" %d", pid);
955         }
956         printf(" ]\n");
957 }
958
959 static int
960 vlan_id_is_invalid(uint16_t vlan_id)
961 {
962         if (vlan_id < 4096)
963                 return 0;
964         printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
965         return 1;
966 }
967
968 static int
969 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
970 {
971         const struct rte_pci_device *pci_dev;
972         const struct rte_bus *bus;
973         uint64_t pci_len;
974
975         if (reg_off & 0x3) {
976                 printf("Port register offset 0x%X not aligned on a 4-byte "
977                        "boundary\n",
978                        (unsigned)reg_off);
979                 return 1;
980         }
981
982         if (!ports[port_id].dev_info.device) {
983                 printf("Invalid device\n");
984                 return 0;
985         }
986
987         bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
988         if (bus && !strcmp(bus->name, "pci")) {
989                 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
990         } else {
991                 printf("Not a PCI device\n");
992                 return 1;
993         }
994
995         pci_len = pci_dev->mem_resource[0].len;
996         if (reg_off >= pci_len) {
997                 printf("Port %d: register offset %u (0x%X) out of port PCI "
998                        "resource (length=%"PRIu64")\n",
999                        port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
1000                 return 1;
1001         }
1002         return 0;
1003 }
1004
1005 static int
1006 reg_bit_pos_is_invalid(uint8_t bit_pos)
1007 {
1008         if (bit_pos <= 31)
1009                 return 0;
1010         printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
1011         return 1;
1012 }
1013
1014 #define display_port_and_reg_off(port_id, reg_off) \
1015         printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1016
1017 static inline void
1018 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1019 {
1020         display_port_and_reg_off(port_id, (unsigned)reg_off);
1021         printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1022 }
1023
1024 void
1025 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1026 {
1027         uint32_t reg_v;
1028
1029
1030         if (port_id_is_invalid(port_id, ENABLED_WARN))
1031                 return;
1032         if (port_reg_off_is_invalid(port_id, reg_off))
1033                 return;
1034         if (reg_bit_pos_is_invalid(bit_x))
1035                 return;
1036         reg_v = port_id_pci_reg_read(port_id, reg_off);
1037         display_port_and_reg_off(port_id, (unsigned)reg_off);
1038         printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1039 }
1040
1041 void
1042 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1043                            uint8_t bit1_pos, uint8_t bit2_pos)
1044 {
1045         uint32_t reg_v;
1046         uint8_t  l_bit;
1047         uint8_t  h_bit;
1048
1049         if (port_id_is_invalid(port_id, ENABLED_WARN))
1050                 return;
1051         if (port_reg_off_is_invalid(port_id, reg_off))
1052                 return;
1053         if (reg_bit_pos_is_invalid(bit1_pos))
1054                 return;
1055         if (reg_bit_pos_is_invalid(bit2_pos))
1056                 return;
1057         if (bit1_pos > bit2_pos)
1058                 l_bit = bit2_pos, h_bit = bit1_pos;
1059         else
1060                 l_bit = bit1_pos, h_bit = bit2_pos;
1061
1062         reg_v = port_id_pci_reg_read(port_id, reg_off);
1063         reg_v >>= l_bit;
1064         if (h_bit < 31)
1065                 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1066         display_port_and_reg_off(port_id, (unsigned)reg_off);
1067         printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1068                ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1069 }
1070
1071 void
1072 port_reg_display(portid_t port_id, uint32_t reg_off)
1073 {
1074         uint32_t reg_v;
1075
1076         if (port_id_is_invalid(port_id, ENABLED_WARN))
1077                 return;
1078         if (port_reg_off_is_invalid(port_id, reg_off))
1079                 return;
1080         reg_v = port_id_pci_reg_read(port_id, reg_off);
1081         display_port_reg_value(port_id, reg_off, reg_v);
1082 }
1083
1084 void
1085 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1086                  uint8_t bit_v)
1087 {
1088         uint32_t reg_v;
1089
1090         if (port_id_is_invalid(port_id, ENABLED_WARN))
1091                 return;
1092         if (port_reg_off_is_invalid(port_id, reg_off))
1093                 return;
1094         if (reg_bit_pos_is_invalid(bit_pos))
1095                 return;
1096         if (bit_v > 1) {
1097                 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1098                 return;
1099         }
1100         reg_v = port_id_pci_reg_read(port_id, reg_off);
1101         if (bit_v == 0)
1102                 reg_v &= ~(1 << bit_pos);
1103         else
1104                 reg_v |= (1 << bit_pos);
1105         port_id_pci_reg_write(port_id, reg_off, reg_v);
1106         display_port_reg_value(port_id, reg_off, reg_v);
1107 }
1108
1109 void
1110 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1111                        uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1112 {
1113         uint32_t max_v;
1114         uint32_t reg_v;
1115         uint8_t  l_bit;
1116         uint8_t  h_bit;
1117
1118         if (port_id_is_invalid(port_id, ENABLED_WARN))
1119                 return;
1120         if (port_reg_off_is_invalid(port_id, reg_off))
1121                 return;
1122         if (reg_bit_pos_is_invalid(bit1_pos))
1123                 return;
1124         if (reg_bit_pos_is_invalid(bit2_pos))
1125                 return;
1126         if (bit1_pos > bit2_pos)
1127                 l_bit = bit2_pos, h_bit = bit1_pos;
1128         else
1129                 l_bit = bit1_pos, h_bit = bit2_pos;
1130
1131         if ((h_bit - l_bit) < 31)
1132                 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1133         else
1134                 max_v = 0xFFFFFFFF;
1135
1136         if (value > max_v) {
1137                 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1138                                 (unsigned)value, (unsigned)value,
1139                                 (unsigned)max_v, (unsigned)max_v);
1140                 return;
1141         }
1142         reg_v = port_id_pci_reg_read(port_id, reg_off);
1143         reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1144         reg_v |= (value << l_bit); /* Set changed bits */
1145         port_id_pci_reg_write(port_id, reg_off, reg_v);
1146         display_port_reg_value(port_id, reg_off, reg_v);
1147 }
1148
1149 void
1150 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1151 {
1152         if (port_id_is_invalid(port_id, ENABLED_WARN))
1153                 return;
1154         if (port_reg_off_is_invalid(port_id, reg_off))
1155                 return;
1156         port_id_pci_reg_write(port_id, reg_off, reg_v);
1157         display_port_reg_value(port_id, reg_off, reg_v);
1158 }
1159
1160 void
1161 port_mtu_set(portid_t port_id, uint16_t mtu)
1162 {
1163         int diag;
1164         struct rte_eth_dev_info dev_info;
1165         int ret;
1166
1167         if (port_id_is_invalid(port_id, ENABLED_WARN))
1168                 return;
1169
1170         ret = eth_dev_info_get_print_err(port_id, &dev_info);
1171         if (ret != 0)
1172                 return;
1173
1174         if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1175                 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1176                         mtu, dev_info.min_mtu, dev_info.max_mtu);
1177                 return;
1178         }
1179         diag = rte_eth_dev_set_mtu(port_id, mtu);
1180         if (diag == 0)
1181                 return;
1182         printf("Set MTU failed. diag=%d\n", diag);
1183 }
1184
1185 /* Generic flow management functions. */
1186
1187 /** Generate a port_flow entry from attributes/pattern/actions. */
1188 static struct port_flow *
1189 port_flow_new(const struct rte_flow_attr *attr,
1190               const struct rte_flow_item *pattern,
1191               const struct rte_flow_action *actions,
1192               struct rte_flow_error *error)
1193 {
1194         const struct rte_flow_conv_rule rule = {
1195                 .attr_ro = attr,
1196                 .pattern_ro = pattern,
1197                 .actions_ro = actions,
1198         };
1199         struct port_flow *pf;
1200         int ret;
1201
1202         ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1203         if (ret < 0)
1204                 return NULL;
1205         pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1206         if (!pf) {
1207                 rte_flow_error_set
1208                         (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1209                          "calloc() failed");
1210                 return NULL;
1211         }
1212         if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1213                           error) >= 0)
1214                 return pf;
1215         free(pf);
1216         return NULL;
1217 }
1218
1219 /** Print a message out of a flow error. */
1220 static int
1221 port_flow_complain(struct rte_flow_error *error)
1222 {
1223         static const char *const errstrlist[] = {
1224                 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1225                 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1226                 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1227                 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1228                 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1229                 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1230                 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1231                 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1232                 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1233                 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1234                 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1235                 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1236                 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1237                 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1238                 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1239                 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1240                 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1241         };
1242         const char *errstr;
1243         char buf[32];
1244         int err = rte_errno;
1245
1246         if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1247             !errstrlist[error->type])
1248                 errstr = "unknown type";
1249         else
1250                 errstr = errstrlist[error->type];
1251         printf("Caught error type %d (%s): %s%s: %s\n",
1252                error->type, errstr,
1253                error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1254                                         error->cause), buf) : "",
1255                error->message ? error->message : "(no stated reason)",
1256                rte_strerror(err));
1257         return -err;
1258 }
1259
1260 /** Validate flow rule. */
1261 int
1262 port_flow_validate(portid_t port_id,
1263                    const struct rte_flow_attr *attr,
1264                    const struct rte_flow_item *pattern,
1265                    const struct rte_flow_action *actions)
1266 {
1267         struct rte_flow_error error;
1268
1269         /* Poisoning to make sure PMDs update it in case of error. */
1270         memset(&error, 0x11, sizeof(error));
1271         if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1272                 return port_flow_complain(&error);
1273         printf("Flow rule validated\n");
1274         return 0;
1275 }
1276
1277 /** Create flow rule. */
1278 int
1279 port_flow_create(portid_t port_id,
1280                  const struct rte_flow_attr *attr,
1281                  const struct rte_flow_item *pattern,
1282                  const struct rte_flow_action *actions)
1283 {
1284         struct rte_flow *flow;
1285         struct rte_port *port;
1286         struct port_flow *pf;
1287         uint32_t id;
1288         struct rte_flow_error error;
1289
1290         /* Poisoning to make sure PMDs update it in case of error. */
1291         memset(&error, 0x22, sizeof(error));
1292         flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1293         if (!flow)
1294                 return port_flow_complain(&error);
1295         port = &ports[port_id];
1296         if (port->flow_list) {
1297                 if (port->flow_list->id == UINT32_MAX) {
1298                         printf("Highest rule ID is already assigned, delete"
1299                                " it first");
1300                         rte_flow_destroy(port_id, flow, NULL);
1301                         return -ENOMEM;
1302                 }
1303                 id = port->flow_list->id + 1;
1304         } else
1305                 id = 0;
1306         pf = port_flow_new(attr, pattern, actions, &error);
1307         if (!pf) {
1308                 rte_flow_destroy(port_id, flow, NULL);
1309                 return port_flow_complain(&error);
1310         }
1311         pf->next = port->flow_list;
1312         pf->id = id;
1313         pf->flow = flow;
1314         port->flow_list = pf;
1315         printf("Flow rule #%u created\n", pf->id);
1316         return 0;
1317 }
1318
1319 /** Destroy a number of flow rules. */
1320 int
1321 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1322 {
1323         struct rte_port *port;
1324         struct port_flow **tmp;
1325         uint32_t c = 0;
1326         int ret = 0;
1327
1328         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1329             port_id == (portid_t)RTE_PORT_ALL)
1330                 return -EINVAL;
1331         port = &ports[port_id];
1332         tmp = &port->flow_list;
1333         while (*tmp) {
1334                 uint32_t i;
1335
1336                 for (i = 0; i != n; ++i) {
1337                         struct rte_flow_error error;
1338                         struct port_flow *pf = *tmp;
1339
1340                         if (rule[i] != pf->id)
1341                                 continue;
1342                         /*
1343                          * Poisoning to make sure PMDs update it in case
1344                          * of error.
1345                          */
1346                         memset(&error, 0x33, sizeof(error));
1347                         if (rte_flow_destroy(port_id, pf->flow, &error)) {
1348                                 ret = port_flow_complain(&error);
1349                                 continue;
1350                         }
1351                         printf("Flow rule #%u destroyed\n", pf->id);
1352                         *tmp = pf->next;
1353                         free(pf);
1354                         break;
1355                 }
1356                 if (i == n)
1357                         tmp = &(*tmp)->next;
1358                 ++c;
1359         }
1360         return ret;
1361 }
1362
1363 /** Remove all flow rules. */
1364 int
1365 port_flow_flush(portid_t port_id)
1366 {
1367         struct rte_flow_error error;
1368         struct rte_port *port;
1369         int ret = 0;
1370
1371         /* Poisoning to make sure PMDs update it in case of error. */
1372         memset(&error, 0x44, sizeof(error));
1373         if (rte_flow_flush(port_id, &error)) {
1374                 ret = port_flow_complain(&error);
1375                 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1376                     port_id == (portid_t)RTE_PORT_ALL)
1377                         return ret;
1378         }
1379         port = &ports[port_id];
1380         while (port->flow_list) {
1381                 struct port_flow *pf = port->flow_list->next;
1382
1383                 free(port->flow_list);
1384                 port->flow_list = pf;
1385         }
1386         return ret;
1387 }
1388
1389 /** Query a flow rule. */
1390 int
1391 port_flow_query(portid_t port_id, uint32_t rule,
1392                 const struct rte_flow_action *action)
1393 {
1394         struct rte_flow_error error;
1395         struct rte_port *port;
1396         struct port_flow *pf;
1397         const char *name;
1398         union {
1399                 struct rte_flow_query_count count;
1400         } query;
1401         int ret;
1402
1403         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1404             port_id == (portid_t)RTE_PORT_ALL)
1405                 return -EINVAL;
1406         port = &ports[port_id];
1407         for (pf = port->flow_list; pf; pf = pf->next)
1408                 if (pf->id == rule)
1409                         break;
1410         if (!pf) {
1411                 printf("Flow rule #%u not found\n", rule);
1412                 return -ENOENT;
1413         }
1414         ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1415                             &name, sizeof(name),
1416                             (void *)(uintptr_t)action->type, &error);
1417         if (ret < 0)
1418                 return port_flow_complain(&error);
1419         switch (action->type) {
1420         case RTE_FLOW_ACTION_TYPE_COUNT:
1421                 break;
1422         default:
1423                 printf("Cannot query action type %d (%s)\n",
1424                         action->type, name);
1425                 return -ENOTSUP;
1426         }
1427         /* Poisoning to make sure PMDs update it in case of error. */
1428         memset(&error, 0x55, sizeof(error));
1429         memset(&query, 0, sizeof(query));
1430         if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1431                 return port_flow_complain(&error);
1432         switch (action->type) {
1433         case RTE_FLOW_ACTION_TYPE_COUNT:
1434                 printf("%s:\n"
1435                        " hits_set: %u\n"
1436                        " bytes_set: %u\n"
1437                        " hits: %" PRIu64 "\n"
1438                        " bytes: %" PRIu64 "\n",
1439                        name,
1440                        query.count.hits_set,
1441                        query.count.bytes_set,
1442                        query.count.hits,
1443                        query.count.bytes);
1444                 break;
1445         default:
1446                 printf("Cannot display result for action type %d (%s)\n",
1447                        action->type, name);
1448                 break;
1449         }
1450         return 0;
1451 }
1452
1453 /** List flow rules. */
1454 void
1455 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1456 {
1457         struct rte_port *port;
1458         struct port_flow *pf;
1459         struct port_flow *list = NULL;
1460         uint32_t i;
1461
1462         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1463             port_id == (portid_t)RTE_PORT_ALL)
1464                 return;
1465         port = &ports[port_id];
1466         if (!port->flow_list)
1467                 return;
1468         /* Sort flows by group, priority and ID. */
1469         for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1470                 struct port_flow **tmp;
1471                 const struct rte_flow_attr *curr = pf->rule.attr;
1472
1473                 if (n) {
1474                         /* Filter out unwanted groups. */
1475                         for (i = 0; i != n; ++i)
1476                                 if (curr->group == group[i])
1477                                         break;
1478                         if (i == n)
1479                                 continue;
1480                 }
1481                 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1482                         const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1483
1484                         if (curr->group > comp->group ||
1485                             (curr->group == comp->group &&
1486                              curr->priority > comp->priority) ||
1487                             (curr->group == comp->group &&
1488                              curr->priority == comp->priority &&
1489                              pf->id > (*tmp)->id))
1490                                 continue;
1491                         break;
1492                 }
1493                 pf->tmp = *tmp;
1494                 *tmp = pf;
1495         }
1496         printf("ID\tGroup\tPrio\tAttr\tRule\n");
1497         for (pf = list; pf != NULL; pf = pf->tmp) {
1498                 const struct rte_flow_item *item = pf->rule.pattern;
1499                 const struct rte_flow_action *action = pf->rule.actions;
1500                 const char *name;
1501
1502                 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1503                        pf->id,
1504                        pf->rule.attr->group,
1505                        pf->rule.attr->priority,
1506                        pf->rule.attr->ingress ? 'i' : '-',
1507                        pf->rule.attr->egress ? 'e' : '-',
1508                        pf->rule.attr->transfer ? 't' : '-');
1509                 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1510                         if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1511                                           &name, sizeof(name),
1512                                           (void *)(uintptr_t)item->type,
1513                                           NULL) <= 0)
1514                                 name = "[UNKNOWN]";
1515                         if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1516                                 printf("%s ", name);
1517                         ++item;
1518                 }
1519                 printf("=>");
1520                 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1521                         if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1522                                           &name, sizeof(name),
1523                                           (void *)(uintptr_t)action->type,
1524                                           NULL) <= 0)
1525                                 name = "[UNKNOWN]";
1526                         if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1527                                 printf(" %s", name);
1528                         ++action;
1529                 }
1530                 printf("\n");
1531         }
1532 }
1533
1534 /** Restrict ingress traffic to the defined flow rules. */
1535 int
1536 port_flow_isolate(portid_t port_id, int set)
1537 {
1538         struct rte_flow_error error;
1539
1540         /* Poisoning to make sure PMDs update it in case of error. */
1541         memset(&error, 0x66, sizeof(error));
1542         if (rte_flow_isolate(port_id, set, &error))
1543                 return port_flow_complain(&error);
1544         printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1545                port_id,
1546                set ? "now restricted" : "not restricted anymore");
1547         return 0;
1548 }
1549
1550 /*
1551  * RX/TX ring descriptors display functions.
1552  */
1553 int
1554 rx_queue_id_is_invalid(queueid_t rxq_id)
1555 {
1556         if (rxq_id < nb_rxq)
1557                 return 0;
1558         printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1559         return 1;
1560 }
1561
1562 int
1563 tx_queue_id_is_invalid(queueid_t txq_id)
1564 {
1565         if (txq_id < nb_txq)
1566                 return 0;
1567         printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1568         return 1;
1569 }
1570
1571 static int
1572 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1573 {
1574         if (rxdesc_id < nb_rxd)
1575                 return 0;
1576         printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1577                rxdesc_id, nb_rxd);
1578         return 1;
1579 }
1580
1581 static int
1582 tx_desc_id_is_invalid(uint16_t txdesc_id)
1583 {
1584         if (txdesc_id < nb_txd)
1585                 return 0;
1586         printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1587                txdesc_id, nb_txd);
1588         return 1;
1589 }
1590
1591 static const struct rte_memzone *
1592 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1593 {
1594         char mz_name[RTE_MEMZONE_NAMESIZE];
1595         const struct rte_memzone *mz;
1596
1597         snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
1598                         port_id, q_id, ring_name);
1599         mz = rte_memzone_lookup(mz_name);
1600         if (mz == NULL)
1601                 printf("%s ring memory zoneof (port %d, queue %d) not"
1602                        "found (zone name = %s\n",
1603                        ring_name, port_id, q_id, mz_name);
1604         return mz;
1605 }
1606
1607 union igb_ring_dword {
1608         uint64_t dword;
1609         struct {
1610 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1611                 uint32_t lo;
1612                 uint32_t hi;
1613 #else
1614                 uint32_t hi;
1615                 uint32_t lo;
1616 #endif
1617         } words;
1618 };
1619
1620 struct igb_ring_desc_32_bytes {
1621         union igb_ring_dword lo_dword;
1622         union igb_ring_dword hi_dword;
1623         union igb_ring_dword resv1;
1624         union igb_ring_dword resv2;
1625 };
1626
1627 struct igb_ring_desc_16_bytes {
1628         union igb_ring_dword lo_dword;
1629         union igb_ring_dword hi_dword;
1630 };
1631
1632 static void
1633 ring_rxd_display_dword(union igb_ring_dword dword)
1634 {
1635         printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1636                                         (unsigned)dword.words.hi);
1637 }
1638
1639 static void
1640 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1641 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1642                            portid_t port_id,
1643 #else
1644                            __rte_unused portid_t port_id,
1645 #endif
1646                            uint16_t desc_id)
1647 {
1648         int ret;
1649         struct igb_ring_desc_16_bytes *ring =
1650                 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1651 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1652         struct rte_eth_dev_info dev_info;
1653
1654         ret = eth_dev_info_get_print_err(port_id, &dev_info);
1655         if (ret != 0)
1656                 return;
1657
1658         if (strstr(dev_info.driver_name, "i40e") != NULL) {
1659                 /* 32 bytes RX descriptor, i40e only */
1660                 struct igb_ring_desc_32_bytes *ring =
1661                         (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1662                 ring[desc_id].lo_dword.dword =
1663                         rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1664                 ring_rxd_display_dword(ring[desc_id].lo_dword);
1665                 ring[desc_id].hi_dword.dword =
1666                         rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1667                 ring_rxd_display_dword(ring[desc_id].hi_dword);
1668                 ring[desc_id].resv1.dword =
1669                         rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1670                 ring_rxd_display_dword(ring[desc_id].resv1);
1671                 ring[desc_id].resv2.dword =
1672                         rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1673                 ring_rxd_display_dword(ring[desc_id].resv2);
1674
1675                 return;
1676         }
1677 #endif
1678         /* 16 bytes RX descriptor */
1679         ring[desc_id].lo_dword.dword =
1680                 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1681         ring_rxd_display_dword(ring[desc_id].lo_dword);
1682         ring[desc_id].hi_dword.dword =
1683                 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1684         ring_rxd_display_dword(ring[desc_id].hi_dword);
1685 }
1686
1687 static void
1688 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1689 {
1690         struct igb_ring_desc_16_bytes *ring;
1691         struct igb_ring_desc_16_bytes txd;
1692
1693         ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1694         txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1695         txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1696         printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1697                         (unsigned)txd.lo_dword.words.lo,
1698                         (unsigned)txd.lo_dword.words.hi,
1699                         (unsigned)txd.hi_dword.words.lo,
1700                         (unsigned)txd.hi_dword.words.hi);
1701 }
1702
1703 void
1704 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1705 {
1706         const struct rte_memzone *rx_mz;
1707
1708         if (port_id_is_invalid(port_id, ENABLED_WARN))
1709                 return;
1710         if (rx_queue_id_is_invalid(rxq_id))
1711                 return;
1712         if (rx_desc_id_is_invalid(rxd_id))
1713                 return;
1714         rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1715         if (rx_mz == NULL)
1716                 return;
1717         ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1718 }
1719
1720 void
1721 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1722 {
1723         const struct rte_memzone *tx_mz;
1724
1725         if (port_id_is_invalid(port_id, ENABLED_WARN))
1726                 return;
1727         if (tx_queue_id_is_invalid(txq_id))
1728                 return;
1729         if (tx_desc_id_is_invalid(txd_id))
1730                 return;
1731         tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1732         if (tx_mz == NULL)
1733                 return;
1734         ring_tx_descriptor_display(tx_mz, txd_id);
1735 }
1736
1737 void
1738 fwd_lcores_config_display(void)
1739 {
1740         lcoreid_t lc_id;
1741
1742         printf("List of forwarding lcores:");
1743         for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1744                 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1745         printf("\n");
1746 }
1747 void
1748 rxtx_config_display(void)
1749 {
1750         portid_t pid;
1751         queueid_t qid;
1752
1753         printf("  %s packet forwarding%s packets/burst=%d\n",
1754                cur_fwd_eng->fwd_mode_name,
1755                retry_enabled == 0 ? "" : " with retry",
1756                nb_pkt_per_burst);
1757
1758         if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1759                 printf("  packet len=%u - nb packet segments=%d\n",
1760                                 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1761
1762         printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
1763                nb_fwd_lcores, nb_fwd_ports);
1764
1765         RTE_ETH_FOREACH_DEV(pid) {
1766                 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
1767                 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
1768                 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
1769                 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
1770                 uint16_t nb_rx_desc_tmp;
1771                 uint16_t nb_tx_desc_tmp;
1772                 struct rte_eth_rxq_info rx_qinfo;
1773                 struct rte_eth_txq_info tx_qinfo;
1774                 int32_t rc;
1775
1776                 /* per port config */
1777                 printf("  port %d: RX queue number: %d Tx queue number: %d\n",
1778                                 (unsigned int)pid, nb_rxq, nb_txq);
1779
1780                 printf("    Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
1781                                 ports[pid].dev_conf.rxmode.offloads,
1782                                 ports[pid].dev_conf.txmode.offloads);
1783
1784                 /* per rx queue config only for first queue to be less verbose */
1785                 for (qid = 0; qid < 1; qid++) {
1786                         rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
1787                         if (rc)
1788                                 nb_rx_desc_tmp = nb_rx_desc[qid];
1789                         else
1790                                 nb_rx_desc_tmp = rx_qinfo.nb_desc;
1791
1792                         printf("    RX queue: %d\n", qid);
1793                         printf("      RX desc=%d - RX free threshold=%d\n",
1794                                 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
1795                         printf("      RX threshold registers: pthresh=%d hthresh=%d "
1796                                 " wthresh=%d\n",
1797                                 rx_conf[qid].rx_thresh.pthresh,
1798                                 rx_conf[qid].rx_thresh.hthresh,
1799                                 rx_conf[qid].rx_thresh.wthresh);
1800                         printf("      RX Offloads=0x%"PRIx64"\n",
1801                                 rx_conf[qid].offloads);
1802                 }
1803
1804                 /* per tx queue config only for first queue to be less verbose */
1805                 for (qid = 0; qid < 1; qid++) {
1806                         rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
1807                         if (rc)
1808                                 nb_tx_desc_tmp = nb_tx_desc[qid];
1809                         else
1810                                 nb_tx_desc_tmp = tx_qinfo.nb_desc;
1811
1812                         printf("    TX queue: %d\n", qid);
1813                         printf("      TX desc=%d - TX free threshold=%d\n",
1814                                 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
1815                         printf("      TX threshold registers: pthresh=%d hthresh=%d "
1816                                 " wthresh=%d\n",
1817                                 tx_conf[qid].tx_thresh.pthresh,
1818                                 tx_conf[qid].tx_thresh.hthresh,
1819                                 tx_conf[qid].tx_thresh.wthresh);
1820                         printf("      TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
1821                                 tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
1822                 }
1823         }
1824 }
1825
1826 void
1827 port_rss_reta_info(portid_t port_id,
1828                    struct rte_eth_rss_reta_entry64 *reta_conf,
1829                    uint16_t nb_entries)
1830 {
1831         uint16_t i, idx, shift;
1832         int ret;
1833
1834         if (port_id_is_invalid(port_id, ENABLED_WARN))
1835                 return;
1836
1837         ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1838         if (ret != 0) {
1839                 printf("Failed to get RSS RETA info, return code = %d\n", ret);
1840                 return;
1841         }
1842
1843         for (i = 0; i < nb_entries; i++) {
1844                 idx = i / RTE_RETA_GROUP_SIZE;
1845                 shift = i % RTE_RETA_GROUP_SIZE;
1846                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1847                         continue;
1848                 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1849                                         i, reta_conf[idx].reta[shift]);
1850         }
1851 }
1852
1853 /*
1854  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1855  * key of the port.
1856  */
1857 void
1858 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
1859 {
1860         struct rte_eth_rss_conf rss_conf = {0};
1861         uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1862         uint64_t rss_hf;
1863         uint8_t i;
1864         int diag;
1865         struct rte_eth_dev_info dev_info;
1866         uint8_t hash_key_size;
1867         int ret;
1868
1869         if (port_id_is_invalid(port_id, ENABLED_WARN))
1870                 return;
1871
1872         ret = eth_dev_info_get_print_err(port_id, &dev_info);
1873         if (ret != 0)
1874                 return;
1875
1876         if (dev_info.hash_key_size > 0 &&
1877                         dev_info.hash_key_size <= sizeof(rss_key))
1878                 hash_key_size = dev_info.hash_key_size;
1879         else {
1880                 printf("dev_info did not provide a valid hash key size\n");
1881                 return;
1882         }
1883
1884         /* Get RSS hash key if asked to display it */
1885         rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1886         rss_conf.rss_key_len = hash_key_size;
1887         diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1888         if (diag != 0) {
1889                 switch (diag) {
1890                 case -ENODEV:
1891                         printf("port index %d invalid\n", port_id);
1892                         break;
1893                 case -ENOTSUP:
1894                         printf("operation not supported by device\n");
1895                         break;
1896                 default:
1897                         printf("operation failed - diag=%d\n", diag);
1898                         break;
1899                 }
1900                 return;
1901         }
1902         rss_hf = rss_conf.rss_hf;
1903         if (rss_hf == 0) {
1904                 printf("RSS disabled\n");
1905                 return;
1906         }
1907         printf("RSS functions:\n ");
1908         for (i = 0; rss_type_table[i].str; i++) {
1909                 if (rss_hf & rss_type_table[i].rss_type)
1910                         printf("%s ", rss_type_table[i].str);
1911         }
1912         printf("\n");
1913         if (!show_rss_key)
1914                 return;
1915         printf("RSS key:\n");
1916         for (i = 0; i < hash_key_size; i++)
1917                 printf("%02X", rss_key[i]);
1918         printf("\n");
1919 }
1920
1921 void
1922 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1923                          uint hash_key_len)
1924 {
1925         struct rte_eth_rss_conf rss_conf;
1926         int diag;
1927         unsigned int i;
1928
1929         rss_conf.rss_key = NULL;
1930         rss_conf.rss_key_len = hash_key_len;
1931         rss_conf.rss_hf = 0;
1932         for (i = 0; rss_type_table[i].str; i++) {
1933                 if (!strcmp(rss_type_table[i].str, rss_type))
1934                         rss_conf.rss_hf = rss_type_table[i].rss_type;
1935         }
1936         diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1937         if (diag == 0) {
1938                 rss_conf.rss_key = hash_key;
1939                 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1940         }
1941         if (diag == 0)
1942                 return;
1943
1944         switch (diag) {
1945         case -ENODEV:
1946                 printf("port index %d invalid\n", port_id);
1947                 break;
1948         case -ENOTSUP:
1949                 printf("operation not supported by device\n");
1950                 break;
1951         default:
1952                 printf("operation failed - diag=%d\n", diag);
1953                 break;
1954         }
1955 }
1956
1957 /*
1958  * Setup forwarding configuration for each logical core.
1959  */
1960 static void
1961 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1962 {
1963         streamid_t nb_fs_per_lcore;
1964         streamid_t nb_fs;
1965         streamid_t sm_id;
1966         lcoreid_t  nb_extra;
1967         lcoreid_t  nb_fc;
1968         lcoreid_t  nb_lc;
1969         lcoreid_t  lc_id;
1970
1971         nb_fs = cfg->nb_fwd_streams;
1972         nb_fc = cfg->nb_fwd_lcores;
1973         if (nb_fs <= nb_fc) {
1974                 nb_fs_per_lcore = 1;
1975                 nb_extra = 0;
1976         } else {
1977                 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
1978                 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
1979         }
1980
1981         nb_lc = (lcoreid_t) (nb_fc - nb_extra);
1982         sm_id = 0;
1983         for (lc_id = 0; lc_id < nb_lc; lc_id++) {
1984                 fwd_lcores[lc_id]->stream_idx = sm_id;
1985                 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
1986                 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1987         }
1988
1989         /*
1990          * Assign extra remaining streams, if any.
1991          */
1992         nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
1993         for (lc_id = 0; lc_id < nb_extra; lc_id++) {
1994                 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
1995                 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
1996                 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1997         }
1998 }
1999
2000 static portid_t
2001 fwd_topology_tx_port_get(portid_t rxp)
2002 {
2003         static int warning_once = 1;
2004
2005         RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2006
2007         switch (port_topology) {
2008         default:
2009         case PORT_TOPOLOGY_PAIRED:
2010                 if ((rxp & 0x1) == 0) {
2011                         if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2012                                 return rxp + 1;
2013                         if (warning_once) {
2014                                 printf("\nWarning! port-topology=paired"
2015                                        " and odd forward ports number,"
2016                                        " the last port will pair with"
2017                                        " itself.\n\n");
2018                                 warning_once = 0;
2019                         }
2020                         return rxp;
2021                 }
2022                 return rxp - 1;
2023         case PORT_TOPOLOGY_CHAINED:
2024                 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2025         case PORT_TOPOLOGY_LOOP:
2026                 return rxp;
2027         }
2028 }
2029
2030 static void
2031 simple_fwd_config_setup(void)
2032 {
2033         portid_t i;
2034
2035         cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2036         cur_fwd_config.nb_fwd_streams =
2037                 (streamid_t) cur_fwd_config.nb_fwd_ports;
2038
2039         /* reinitialize forwarding streams */
2040         init_fwd_streams();
2041
2042         /*
2043          * In the simple forwarding test, the number of forwarding cores
2044          * must be lower or equal to the number of forwarding ports.
2045          */
2046         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2047         if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2048                 cur_fwd_config.nb_fwd_lcores =
2049                         (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2050         setup_fwd_config_of_each_lcore(&cur_fwd_config);
2051
2052         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2053                 fwd_streams[i]->rx_port   = fwd_ports_ids[i];
2054                 fwd_streams[i]->rx_queue  = 0;
2055                 fwd_streams[i]->tx_port   =
2056                                 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2057                 fwd_streams[i]->tx_queue  = 0;
2058                 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2059                 fwd_streams[i]->retry_enabled = retry_enabled;
2060         }
2061 }
2062
2063 /**
2064  * For the RSS forwarding test all streams distributed over lcores. Each stream
2065  * being composed of a RX queue to poll on a RX port for input messages,
2066  * associated with a TX queue of a TX port where to send forwarded packets.
2067  */
2068 static void
2069 rss_fwd_config_setup(void)
2070 {
2071         portid_t   rxp;
2072         portid_t   txp;
2073         queueid_t  rxq;
2074         queueid_t  nb_q;
2075         streamid_t  sm_id;
2076
2077         nb_q = nb_rxq;
2078         if (nb_q > nb_txq)
2079                 nb_q = nb_txq;
2080         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2081         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2082         cur_fwd_config.nb_fwd_streams =
2083                 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2084
2085         if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2086                 cur_fwd_config.nb_fwd_lcores =
2087                         (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2088
2089         /* reinitialize forwarding streams */
2090         init_fwd_streams();
2091
2092         setup_fwd_config_of_each_lcore(&cur_fwd_config);
2093         rxp = 0; rxq = 0;
2094         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2095                 struct fwd_stream *fs;
2096
2097                 fs = fwd_streams[sm_id];
2098                 txp = fwd_topology_tx_port_get(rxp);
2099                 fs->rx_port = fwd_ports_ids[rxp];
2100                 fs->rx_queue = rxq;
2101                 fs->tx_port = fwd_ports_ids[txp];
2102                 fs->tx_queue = rxq;
2103                 fs->peer_addr = fs->tx_port;
2104                 fs->retry_enabled = retry_enabled;
2105                 rxp++;
2106                 if (rxp < nb_fwd_ports)
2107                         continue;
2108                 rxp = 0;
2109                 rxq++;
2110         }
2111 }
2112
2113 /**
2114  * For the DCB forwarding test, each core is assigned on each traffic class.
2115  *
2116  * Each core is assigned a multi-stream, each stream being composed of
2117  * a RX queue to poll on a RX port for input messages, associated with
2118  * a TX queue of a TX port where to send forwarded packets. All RX and
2119  * TX queues are mapping to the same traffic class.
2120  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2121  * the same core
2122  */
2123 static void
2124 dcb_fwd_config_setup(void)
2125 {
2126         struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2127         portid_t txp, rxp = 0;
2128         queueid_t txq, rxq = 0;
2129         lcoreid_t  lc_id;
2130         uint16_t nb_rx_queue, nb_tx_queue;
2131         uint16_t i, j, k, sm_id = 0;
2132         uint8_t tc = 0;
2133
2134         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2135         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2136         cur_fwd_config.nb_fwd_streams =
2137                 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2138
2139         /* reinitialize forwarding streams */
2140         init_fwd_streams();
2141         sm_id = 0;
2142         txp = 1;
2143         /* get the dcb info on the first RX and TX ports */
2144         (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2145         (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2146
2147         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2148                 fwd_lcores[lc_id]->stream_nb = 0;
2149                 fwd_lcores[lc_id]->stream_idx = sm_id;
2150                 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2151                         /* if the nb_queue is zero, means this tc is
2152                          * not enabled on the POOL
2153                          */
2154                         if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2155                                 break;
2156                         k = fwd_lcores[lc_id]->stream_nb +
2157                                 fwd_lcores[lc_id]->stream_idx;
2158                         rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2159                         txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2160                         nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2161                         nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2162                         for (j = 0; j < nb_rx_queue; j++) {
2163                                 struct fwd_stream *fs;
2164
2165                                 fs = fwd_streams[k + j];
2166                                 fs->rx_port = fwd_ports_ids[rxp];
2167                                 fs->rx_queue = rxq + j;
2168                                 fs->tx_port = fwd_ports_ids[txp];
2169                                 fs->tx_queue = txq + j % nb_tx_queue;
2170                                 fs->peer_addr = fs->tx_port;
2171                                 fs->retry_enabled = retry_enabled;
2172                         }
2173                         fwd_lcores[lc_id]->stream_nb +=
2174                                 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2175                 }
2176                 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2177
2178                 tc++;
2179                 if (tc < rxp_dcb_info.nb_tcs)
2180                         continue;
2181                 /* Restart from TC 0 on next RX port */
2182                 tc = 0;
2183                 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2184                         rxp = (portid_t)
2185                                 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2186                 else
2187                         rxp++;
2188                 if (rxp >= nb_fwd_ports)
2189                         return;
2190                 /* get the dcb information on next RX and TX ports */
2191                 if ((rxp & 0x1) == 0)
2192                         txp = (portid_t) (rxp + 1);
2193                 else
2194                         txp = (portid_t) (rxp - 1);
2195                 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2196                 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2197         }
2198 }
2199
2200 static void
2201 icmp_echo_config_setup(void)
2202 {
2203         portid_t  rxp;
2204         queueid_t rxq;
2205         lcoreid_t lc_id;
2206         uint16_t  sm_id;
2207
2208         if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2209                 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2210                         (nb_txq * nb_fwd_ports);
2211         else
2212                 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2213         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2214         cur_fwd_config.nb_fwd_streams =
2215                 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2216         if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2217                 cur_fwd_config.nb_fwd_lcores =
2218                         (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2219         if (verbose_level > 0) {
2220                 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2221                        __FUNCTION__,
2222                        cur_fwd_config.nb_fwd_lcores,
2223                        cur_fwd_config.nb_fwd_ports,
2224                        cur_fwd_config.nb_fwd_streams);
2225         }
2226
2227         /* reinitialize forwarding streams */
2228         init_fwd_streams();
2229         setup_fwd_config_of_each_lcore(&cur_fwd_config);
2230         rxp = 0; rxq = 0;
2231         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2232                 if (verbose_level > 0)
2233                         printf("  core=%d: \n", lc_id);
2234                 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2235                         struct fwd_stream *fs;
2236                         fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2237                         fs->rx_port = fwd_ports_ids[rxp];
2238                         fs->rx_queue = rxq;
2239                         fs->tx_port = fs->rx_port;
2240                         fs->tx_queue = rxq;
2241                         fs->peer_addr = fs->tx_port;
2242                         fs->retry_enabled = retry_enabled;
2243                         if (verbose_level > 0)
2244                                 printf("  stream=%d port=%d rxq=%d txq=%d\n",
2245                                        sm_id, fs->rx_port, fs->rx_queue,
2246                                        fs->tx_queue);
2247                         rxq = (queueid_t) (rxq + 1);
2248                         if (rxq == nb_rxq) {
2249                                 rxq = 0;
2250                                 rxp = (portid_t) (rxp + 1);
2251                         }
2252                 }
2253         }
2254 }
2255
2256 #if defined RTE_LIBRTE_PMD_SOFTNIC
2257 static void
2258 softnic_fwd_config_setup(void)
2259 {
2260         struct rte_port *port;
2261         portid_t pid, softnic_portid;
2262         queueid_t i;
2263         uint8_t softnic_enable = 0;
2264
2265         RTE_ETH_FOREACH_DEV(pid) {
2266                         port = &ports[pid];
2267                         const char *driver = port->dev_info.driver_name;
2268
2269                         if (strcmp(driver, "net_softnic") == 0) {
2270                                 softnic_portid = pid;
2271                                 softnic_enable = 1;
2272                                 break;
2273                         }
2274         }
2275
2276         if (softnic_enable == 0) {
2277                 printf("Softnic mode not configured(%s)!\n", __func__);
2278                 return;
2279         }
2280
2281         cur_fwd_config.nb_fwd_ports = 1;
2282         cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
2283
2284         /* Re-initialize forwarding streams */
2285         init_fwd_streams();
2286
2287         /*
2288          * In the softnic forwarding test, the number of forwarding cores
2289          * is set to one and remaining are used for softnic packet processing.
2290          */
2291         cur_fwd_config.nb_fwd_lcores = 1;
2292         setup_fwd_config_of_each_lcore(&cur_fwd_config);
2293
2294         for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
2295                 fwd_streams[i]->rx_port   = softnic_portid;
2296                 fwd_streams[i]->rx_queue  = i;
2297                 fwd_streams[i]->tx_port   = softnic_portid;
2298                 fwd_streams[i]->tx_queue  = i;
2299                 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2300                 fwd_streams[i]->retry_enabled = retry_enabled;
2301         }
2302 }
2303 #endif
2304
2305 void
2306 fwd_config_setup(void)
2307 {
2308         cur_fwd_config.fwd_eng = cur_fwd_eng;
2309         if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2310                 icmp_echo_config_setup();
2311                 return;
2312         }
2313
2314 #if defined RTE_LIBRTE_PMD_SOFTNIC
2315         if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
2316                 softnic_fwd_config_setup();
2317                 return;
2318         }
2319 #endif
2320
2321         if ((nb_rxq > 1) && (nb_txq > 1)){
2322                 if (dcb_config)
2323                         dcb_fwd_config_setup();
2324                 else
2325                         rss_fwd_config_setup();
2326         }
2327         else
2328                 simple_fwd_config_setup();
2329 }
2330
2331 static const char *
2332 mp_alloc_to_str(uint8_t mode)
2333 {
2334         switch (mode) {
2335         case MP_ALLOC_NATIVE:
2336                 return "native";
2337         case MP_ALLOC_ANON:
2338                 return "anon";
2339         case MP_ALLOC_XMEM:
2340                 return "xmem";
2341         case MP_ALLOC_XMEM_HUGE:
2342                 return "xmemhuge";
2343         default:
2344                 return "invalid";
2345         }
2346 }
2347
2348 void
2349 pkt_fwd_config_display(struct fwd_config *cfg)
2350 {
2351         struct fwd_stream *fs;
2352         lcoreid_t  lc_id;
2353         streamid_t sm_id;
2354
2355         printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2356                 "NUMA support %s, MP allocation mode: %s\n",
2357                 cfg->fwd_eng->fwd_mode_name,
2358                 retry_enabled == 0 ? "" : " with retry",
2359                 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2360                 numa_support == 1 ? "enabled" : "disabled",
2361                 mp_alloc_to_str(mp_alloc_type));
2362
2363         if (retry_enabled)
2364                 printf("TX retry num: %u, delay between TX retries: %uus\n",
2365                         burst_tx_retry_num, burst_tx_delay_time);
2366         for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2367                 printf("Logical Core %u (socket %u) forwards packets on "
2368                        "%d streams:",
2369                        fwd_lcores_cpuids[lc_id],
2370                        rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2371                        fwd_lcores[lc_id]->stream_nb);
2372                 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2373                         fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2374                         printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
2375                                "P=%d/Q=%d (socket %u) ",
2376                                fs->rx_port, fs->rx_queue,
2377                                ports[fs->rx_port].socket_id,
2378                                fs->tx_port, fs->tx_queue,
2379                                ports[fs->tx_port].socket_id);
2380                         print_ethaddr("peer=",
2381                                       &peer_eth_addrs[fs->peer_addr]);
2382                 }
2383                 printf("\n");
2384         }
2385         printf("\n");
2386 }
2387
2388 void
2389 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2390 {
2391         struct rte_ether_addr new_peer_addr;
2392         if (!rte_eth_dev_is_valid_port(port_id)) {
2393                 printf("Error: Invalid port number %i\n", port_id);
2394                 return;
2395         }
2396         if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
2397                 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2398                 return;
2399         }
2400         peer_eth_addrs[port_id] = new_peer_addr;
2401 }
2402
2403 int
2404 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2405 {
2406         unsigned int i;
2407         unsigned int lcore_cpuid;
2408         int record_now;
2409
2410         record_now = 0;
2411  again:
2412         for (i = 0; i < nb_lc; i++) {
2413                 lcore_cpuid = lcorelist[i];
2414                 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2415                         printf("lcore %u not enabled\n", lcore_cpuid);
2416                         return -1;
2417                 }
2418                 if (lcore_cpuid == rte_get_master_lcore()) {
2419                         printf("lcore %u cannot be masked on for running "
2420                                "packet forwarding, which is the master lcore "
2421                                "and reserved for command line parsing only\n",
2422                                lcore_cpuid);
2423                         return -1;
2424                 }
2425                 if (record_now)
2426                         fwd_lcores_cpuids[i] = lcore_cpuid;
2427         }
2428         if (record_now == 0) {
2429                 record_now = 1;
2430                 goto again;
2431         }
2432         nb_cfg_lcores = (lcoreid_t) nb_lc;
2433         if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2434                 printf("previous number of forwarding cores %u - changed to "
2435                        "number of configured cores %u\n",
2436                        (unsigned int) nb_fwd_lcores, nb_lc);
2437                 nb_fwd_lcores = (lcoreid_t) nb_lc;
2438         }
2439
2440         return 0;
2441 }
2442
2443 int
2444 set_fwd_lcores_mask(uint64_t lcoremask)
2445 {
2446         unsigned int lcorelist[64];
2447         unsigned int nb_lc;
2448         unsigned int i;
2449
2450         if (lcoremask == 0) {
2451                 printf("Invalid NULL mask of cores\n");
2452                 return -1;
2453         }
2454         nb_lc = 0;
2455         for (i = 0; i < 64; i++) {
2456                 if (! ((uint64_t)(1ULL << i) & lcoremask))
2457                         continue;
2458                 lcorelist[nb_lc++] = i;
2459         }
2460         return set_fwd_lcores_list(lcorelist, nb_lc);
2461 }
2462
2463 void
2464 set_fwd_lcores_number(uint16_t nb_lc)
2465 {
2466         if (nb_lc > nb_cfg_lcores) {
2467                 printf("nb fwd cores %u > %u (max. number of configured "
2468                        "lcores) - ignored\n",
2469                        (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2470                 return;
2471         }
2472         nb_fwd_lcores = (lcoreid_t) nb_lc;
2473         printf("Number of forwarding cores set to %u\n",
2474                (unsigned int) nb_fwd_lcores);
2475 }
2476
2477 void
2478 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2479 {
2480         unsigned int i;
2481         portid_t port_id;
2482         int record_now;
2483
2484         record_now = 0;
2485  again:
2486         for (i = 0; i < nb_pt; i++) {
2487                 port_id = (portid_t) portlist[i];
2488                 if (port_id_is_invalid(port_id, ENABLED_WARN))
2489                         return;
2490                 if (record_now)
2491                         fwd_ports_ids[i] = port_id;
2492         }
2493         if (record_now == 0) {
2494                 record_now = 1;
2495                 goto again;
2496         }
2497         nb_cfg_ports = (portid_t) nb_pt;
2498         if (nb_fwd_ports != (portid_t) nb_pt) {
2499                 printf("previous number of forwarding ports %u - changed to "
2500                        "number of configured ports %u\n",
2501                        (unsigned int) nb_fwd_ports, nb_pt);
2502                 nb_fwd_ports = (portid_t) nb_pt;
2503         }
2504 }
2505
2506 void
2507 set_fwd_ports_mask(uint64_t portmask)
2508 {
2509         unsigned int portlist[64];
2510         unsigned int nb_pt;
2511         unsigned int i;
2512
2513         if (portmask == 0) {
2514                 printf("Invalid NULL mask of ports\n");
2515                 return;
2516         }
2517         nb_pt = 0;
2518         RTE_ETH_FOREACH_DEV(i) {
2519                 if (! ((uint64_t)(1ULL << i) & portmask))
2520                         continue;
2521                 portlist[nb_pt++] = i;
2522         }
2523         set_fwd_ports_list(portlist, nb_pt);
2524 }
2525
2526 void
2527 set_fwd_ports_number(uint16_t nb_pt)
2528 {
2529         if (nb_pt > nb_cfg_ports) {
2530                 printf("nb fwd ports %u > %u (number of configured "
2531                        "ports) - ignored\n",
2532                        (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2533                 return;
2534         }
2535         nb_fwd_ports = (portid_t) nb_pt;
2536         printf("Number of forwarding ports set to %u\n",
2537                (unsigned int) nb_fwd_ports);
2538 }
2539
2540 int
2541 port_is_forwarding(portid_t port_id)
2542 {
2543         unsigned int i;
2544
2545         if (port_id_is_invalid(port_id, ENABLED_WARN))
2546                 return -1;
2547
2548         for (i = 0; i < nb_fwd_ports; i++) {
2549                 if (fwd_ports_ids[i] == port_id)
2550                         return 1;
2551         }
2552
2553         return 0;
2554 }
2555
2556 void
2557 set_nb_pkt_per_burst(uint16_t nb)
2558 {
2559         if (nb > MAX_PKT_BURST) {
2560                 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2561                        " ignored\n",
2562                        (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2563                 return;
2564         }
2565         nb_pkt_per_burst = nb;
2566         printf("Number of packets per burst set to %u\n",
2567                (unsigned int) nb_pkt_per_burst);
2568 }
2569
2570 static const char *
2571 tx_split_get_name(enum tx_pkt_split split)
2572 {
2573         uint32_t i;
2574
2575         for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2576                 if (tx_split_name[i].split == split)
2577                         return tx_split_name[i].name;
2578         }
2579         return NULL;
2580 }
2581
2582 void
2583 set_tx_pkt_split(const char *name)
2584 {
2585         uint32_t i;
2586
2587         for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2588                 if (strcmp(tx_split_name[i].name, name) == 0) {
2589                         tx_pkt_split = tx_split_name[i].split;
2590                         return;
2591                 }
2592         }
2593         printf("unknown value: \"%s\"\n", name);
2594 }
2595
2596 void
2597 show_tx_pkt_segments(void)
2598 {
2599         uint32_t i, n;
2600         const char *split;
2601
2602         n = tx_pkt_nb_segs;
2603         split = tx_split_get_name(tx_pkt_split);
2604
2605         printf("Number of segments: %u\n", n);
2606         printf("Segment sizes: ");
2607         for (i = 0; i != n - 1; i++)
2608                 printf("%hu,", tx_pkt_seg_lengths[i]);
2609         printf("%hu\n", tx_pkt_seg_lengths[i]);
2610         printf("Split packet: %s\n", split);
2611 }
2612
2613 void
2614 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2615 {
2616         uint16_t tx_pkt_len;
2617         unsigned i;
2618
2619         if (nb_segs >= (unsigned) nb_txd) {
2620                 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2621                        nb_segs, (unsigned int) nb_txd);
2622                 return;
2623         }
2624
2625         /*
2626          * Check that each segment length is greater or equal than
2627          * the mbuf data sise.
2628          * Check also that the total packet length is greater or equal than the
2629          * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
2630          * 20 + 8).
2631          */
2632         tx_pkt_len = 0;
2633         for (i = 0; i < nb_segs; i++) {
2634                 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2635                         printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2636                                i, seg_lengths[i], (unsigned) mbuf_data_size);
2637                         return;
2638                 }
2639                 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2640         }
2641         if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
2642                 printf("total packet length=%u < %d - give up\n",
2643                                 (unsigned) tx_pkt_len,
2644                                 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
2645                 return;
2646         }
2647
2648         for (i = 0; i < nb_segs; i++)
2649                 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2650
2651         tx_pkt_length  = tx_pkt_len;
2652         tx_pkt_nb_segs = (uint8_t) nb_segs;
2653 }
2654
2655 void
2656 setup_gro(const char *onoff, portid_t port_id)
2657 {
2658         if (!rte_eth_dev_is_valid_port(port_id)) {
2659                 printf("invalid port id %u\n", port_id);
2660                 return;
2661         }
2662         if (test_done == 0) {
2663                 printf("Before enable/disable GRO,"
2664                                 " please stop forwarding first\n");
2665                 return;
2666         }
2667         if (strcmp(onoff, "on") == 0) {
2668                 if (gro_ports[port_id].enable != 0) {
2669                         printf("Port %u has enabled GRO. Please"
2670                                         " disable GRO first\n", port_id);
2671                         return;
2672                 }
2673                 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2674                         gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
2675                         gro_ports[port_id].param.max_flow_num =
2676                                 GRO_DEFAULT_FLOW_NUM;
2677                         gro_ports[port_id].param.max_item_per_flow =
2678                                 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
2679                 }
2680                 gro_ports[port_id].enable = 1;
2681         } else {
2682                 if (gro_ports[port_id].enable == 0) {
2683                         printf("Port %u has disabled GRO\n", port_id);
2684                         return;
2685                 }
2686                 gro_ports[port_id].enable = 0;
2687         }
2688 }
2689
2690 void
2691 setup_gro_flush_cycles(uint8_t cycles)
2692 {
2693         if (test_done == 0) {
2694                 printf("Before change flush interval for GRO,"
2695                                 " please stop forwarding first.\n");
2696                 return;
2697         }
2698
2699         if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
2700                         GRO_DEFAULT_FLUSH_CYCLES) {
2701                 printf("The flushing cycle be in the range"
2702                                 " of 1 to %u. Revert to the default"
2703                                 " value %u.\n",
2704                                 GRO_MAX_FLUSH_CYCLES,
2705                                 GRO_DEFAULT_FLUSH_CYCLES);
2706                 cycles = GRO_DEFAULT_FLUSH_CYCLES;
2707         }
2708
2709         gro_flush_cycles = cycles;
2710 }
2711
2712 void
2713 show_gro(portid_t port_id)
2714 {
2715         struct rte_gro_param *param;
2716         uint32_t max_pkts_num;
2717
2718         param = &gro_ports[port_id].param;
2719
2720         if (!rte_eth_dev_is_valid_port(port_id)) {
2721                 printf("Invalid port id %u.\n", port_id);
2722                 return;
2723         }
2724         if (gro_ports[port_id].enable) {
2725                 printf("GRO type: TCP/IPv4\n");
2726                 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2727                         max_pkts_num = param->max_flow_num *
2728                                 param->max_item_per_flow;
2729                 } else
2730                         max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
2731                 printf("Max number of packets to perform GRO: %u\n",
2732                                 max_pkts_num);
2733                 printf("Flushing cycles: %u\n", gro_flush_cycles);
2734         } else
2735                 printf("Port %u doesn't enable GRO.\n", port_id);
2736 }
2737
2738 void
2739 setup_gso(const char *mode, portid_t port_id)
2740 {
2741         if (!rte_eth_dev_is_valid_port(port_id)) {
2742                 printf("invalid port id %u\n", port_id);
2743                 return;
2744         }
2745         if (strcmp(mode, "on") == 0) {
2746                 if (test_done == 0) {
2747                         printf("before enabling GSO,"
2748                                         " please stop forwarding first\n");
2749                         return;
2750                 }
2751                 gso_ports[port_id].enable = 1;
2752         } else if (strcmp(mode, "off") == 0) {
2753                 if (test_done == 0) {
2754                         printf("before disabling GSO,"
2755                                         " please stop forwarding first\n");
2756                         return;
2757                 }
2758                 gso_ports[port_id].enable = 0;
2759         }
2760 }
2761
2762 char*
2763 list_pkt_forwarding_modes(void)
2764 {
2765         static char fwd_modes[128] = "";
2766         const char *separator = "|";
2767         struct fwd_engine *fwd_eng;
2768         unsigned i = 0;
2769
2770         if (strlen (fwd_modes) == 0) {
2771                 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2772                         strncat(fwd_modes, fwd_eng->fwd_mode_name,
2773                                         sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2774                         strncat(fwd_modes, separator,
2775                                         sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2776                 }
2777                 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2778         }
2779
2780         return fwd_modes;
2781 }
2782
2783 char*
2784 list_pkt_forwarding_retry_modes(void)
2785 {
2786         static char fwd_modes[128] = "";
2787         const char *separator = "|";
2788         struct fwd_engine *fwd_eng;
2789         unsigned i = 0;
2790
2791         if (strlen(fwd_modes) == 0) {
2792                 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2793                         if (fwd_eng == &rx_only_engine)
2794                                 continue;
2795                         strncat(fwd_modes, fwd_eng->fwd_mode_name,
2796                                         sizeof(fwd_modes) -
2797                                         strlen(fwd_modes) - 1);
2798                         strncat(fwd_modes, separator,
2799                                         sizeof(fwd_modes) -
2800                                         strlen(fwd_modes) - 1);
2801                 }
2802                 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2803         }
2804
2805         return fwd_modes;
2806 }
2807
2808 void
2809 set_pkt_forwarding_mode(const char *fwd_mode_name)
2810 {
2811         struct fwd_engine *fwd_eng;
2812         unsigned i;
2813
2814         i = 0;
2815         while ((fwd_eng = fwd_engines[i]) != NULL) {
2816                 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
2817                         printf("Set %s packet forwarding mode%s\n",
2818                                fwd_mode_name,
2819                                retry_enabled == 0 ? "" : " with retry");
2820                         cur_fwd_eng = fwd_eng;
2821                         return;
2822                 }
2823                 i++;
2824         }
2825         printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
2826 }
2827
2828 void
2829 add_rx_dump_callbacks(portid_t portid)
2830 {
2831         struct rte_eth_dev_info dev_info;
2832         uint16_t queue;
2833         int ret;
2834
2835         if (port_id_is_invalid(portid, ENABLED_WARN))
2836                 return;
2837
2838         ret = eth_dev_info_get_print_err(portid, &dev_info);
2839         if (ret != 0)
2840                 return;
2841
2842         for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
2843                 if (!ports[portid].rx_dump_cb[queue])
2844                         ports[portid].rx_dump_cb[queue] =
2845                                 rte_eth_add_rx_callback(portid, queue,
2846                                         dump_rx_pkts, NULL);
2847 }
2848
2849 void
2850 add_tx_dump_callbacks(portid_t portid)
2851 {
2852         struct rte_eth_dev_info dev_info;
2853         uint16_t queue;
2854         int ret;
2855
2856         if (port_id_is_invalid(portid, ENABLED_WARN))
2857                 return;
2858
2859         ret = eth_dev_info_get_print_err(portid, &dev_info);
2860         if (ret != 0)
2861                 return;
2862
2863         for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
2864                 if (!ports[portid].tx_dump_cb[queue])
2865                         ports[portid].tx_dump_cb[queue] =
2866                                 rte_eth_add_tx_callback(portid, queue,
2867                                                         dump_tx_pkts, NULL);
2868 }
2869
2870 void
2871 remove_rx_dump_callbacks(portid_t portid)
2872 {
2873         struct rte_eth_dev_info dev_info;
2874         uint16_t queue;
2875         int ret;
2876
2877         if (port_id_is_invalid(portid, ENABLED_WARN))
2878                 return;
2879
2880         ret = eth_dev_info_get_print_err(portid, &dev_info);
2881         if (ret != 0)
2882                 return;
2883
2884         for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
2885                 if (ports[portid].rx_dump_cb[queue]) {
2886                         rte_eth_remove_rx_callback(portid, queue,
2887                                 ports[portid].rx_dump_cb[queue]);
2888                         ports[portid].rx_dump_cb[queue] = NULL;
2889                 }
2890 }
2891
2892 void
2893 remove_tx_dump_callbacks(portid_t portid)
2894 {
2895         struct rte_eth_dev_info dev_info;
2896         uint16_t queue;
2897         int ret;
2898
2899         if (port_id_is_invalid(portid, ENABLED_WARN))
2900                 return;
2901
2902         ret = eth_dev_info_get_print_err(portid, &dev_info);
2903         if (ret != 0)
2904                 return;
2905
2906         for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
2907                 if (ports[portid].tx_dump_cb[queue]) {
2908                         rte_eth_remove_tx_callback(portid, queue,
2909                                 ports[portid].tx_dump_cb[queue]);
2910                         ports[portid].tx_dump_cb[queue] = NULL;
2911                 }
2912 }
2913
2914 void
2915 configure_rxtx_dump_callbacks(uint16_t verbose)
2916 {
2917         portid_t portid;
2918
2919 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2920                 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
2921                 return;
2922 #endif
2923
2924         RTE_ETH_FOREACH_DEV(portid)
2925         {
2926                 if (verbose == 1 || verbose > 2)
2927                         add_rx_dump_callbacks(portid);
2928                 else
2929                         remove_rx_dump_callbacks(portid);
2930                 if (verbose >= 2)
2931                         add_tx_dump_callbacks(portid);
2932                 else
2933                         remove_tx_dump_callbacks(portid);
2934         }
2935 }
2936
2937 void
2938 set_verbose_level(uint16_t vb_level)
2939 {
2940         printf("Change verbose level from %u to %u\n",
2941                (unsigned int) verbose_level, (unsigned int) vb_level);
2942         verbose_level = vb_level;
2943         configure_rxtx_dump_callbacks(verbose_level);
2944 }
2945
2946 void
2947 vlan_extend_set(portid_t port_id, int on)
2948 {
2949         int diag;
2950         int vlan_offload;
2951         uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2952
2953         if (port_id_is_invalid(port_id, ENABLED_WARN))
2954                 return;
2955
2956         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2957
2958         if (on) {
2959                 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
2960                 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2961         } else {
2962                 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
2963                 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2964         }
2965
2966         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2967         if (diag < 0)
2968                 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
2969                "diag=%d\n", port_id, on, diag);
2970         ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2971 }
2972
2973 void
2974 rx_vlan_strip_set(portid_t port_id, int on)
2975 {
2976         int diag;
2977         int vlan_offload;
2978         uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2979
2980         if (port_id_is_invalid(port_id, ENABLED_WARN))
2981                 return;
2982
2983         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2984
2985         if (on) {
2986                 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
2987                 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2988         } else {
2989                 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
2990                 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2991         }
2992
2993         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2994         if (diag < 0)
2995                 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
2996                "diag=%d\n", port_id, on, diag);
2997         ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2998 }
2999
3000 void
3001 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
3002 {
3003         int diag;
3004
3005         if (port_id_is_invalid(port_id, ENABLED_WARN))
3006                 return;
3007
3008         diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
3009         if (diag < 0)
3010                 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
3011                "diag=%d\n", port_id, queue_id, on, diag);
3012 }
3013
3014 void
3015 rx_vlan_filter_set(portid_t port_id, int on)
3016 {
3017         int diag;
3018         int vlan_offload;
3019         uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3020
3021         if (port_id_is_invalid(port_id, ENABLED_WARN))
3022                 return;
3023
3024         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3025
3026         if (on) {
3027                 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
3028                 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3029         } else {
3030                 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
3031                 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3032         }
3033
3034         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3035         if (diag < 0)
3036                 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
3037                "diag=%d\n", port_id, on, diag);
3038         ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3039 }
3040
3041 int
3042 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
3043 {
3044         int diag;
3045
3046         if (port_id_is_invalid(port_id, ENABLED_WARN))
3047                 return 1;
3048         if (vlan_id_is_invalid(vlan_id))
3049                 return 1;
3050         diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
3051         if (diag == 0)
3052                 return 0;
3053         printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
3054                "diag=%d\n",
3055                port_id, vlan_id, on, diag);
3056         return -1;
3057 }
3058
3059 void
3060 rx_vlan_all_filter_set(portid_t port_id, int on)
3061 {
3062         uint16_t vlan_id;
3063
3064         if (port_id_is_invalid(port_id, ENABLED_WARN))
3065                 return;
3066         for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
3067                 if (rx_vft_set(port_id, vlan_id, on))
3068                         break;
3069         }
3070 }
3071
3072 void
3073 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
3074 {
3075         int diag;
3076
3077         if (port_id_is_invalid(port_id, ENABLED_WARN))
3078                 return;
3079
3080         diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
3081         if (diag == 0)
3082                 return;
3083
3084         printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
3085                "diag=%d\n",
3086                port_id, vlan_type, tp_id, diag);
3087 }
3088
3089 void
3090 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
3091 {
3092         struct rte_eth_dev_info dev_info;
3093         int ret;
3094
3095         if (port_id_is_invalid(port_id, ENABLED_WARN))
3096                 return;
3097         if (vlan_id_is_invalid(vlan_id))
3098                 return;
3099
3100         if (ports[port_id].dev_conf.txmode.offloads &
3101             DEV_TX_OFFLOAD_QINQ_INSERT) {
3102                 printf("Error, as QinQ has been enabled.\n");
3103                 return;
3104         }
3105
3106         ret = eth_dev_info_get_print_err(port_id, &dev_info);
3107         if (ret != 0)
3108                 return;
3109
3110         if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
3111                 printf("Error: vlan insert is not supported by port %d\n",
3112                         port_id);
3113                 return;
3114         }
3115
3116         tx_vlan_reset(port_id);
3117         ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
3118         ports[port_id].tx_vlan_id = vlan_id;
3119 }
3120
3121 void
3122 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
3123 {
3124         struct rte_eth_dev_info dev_info;
3125         int ret;
3126
3127         if (port_id_is_invalid(port_id, ENABLED_WARN))
3128                 return;
3129         if (vlan_id_is_invalid(vlan_id))
3130                 return;
3131         if (vlan_id_is_invalid(vlan_id_outer))
3132                 return;
3133
3134         ret = eth_dev_info_get_print_err(port_id, &dev_info);
3135         if (ret != 0)
3136                 return;
3137
3138         if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
3139                 printf("Error: qinq insert not supported by port %d\n",
3140                         port_id);
3141                 return;
3142         }
3143
3144         tx_vlan_reset(port_id);
3145         ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
3146                                                     DEV_TX_OFFLOAD_QINQ_INSERT);
3147         ports[port_id].tx_vlan_id = vlan_id;
3148         ports[port_id].tx_vlan_id_outer = vlan_id_outer;
3149 }
3150
3151 void
3152 tx_vlan_reset(portid_t port_id)
3153 {
3154         if (port_id_is_invalid(port_id, ENABLED_WARN))
3155                 return;
3156         ports[port_id].dev_conf.txmode.offloads &=
3157                                 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
3158                                   DEV_TX_OFFLOAD_QINQ_INSERT);
3159         ports[port_id].tx_vlan_id = 0;
3160         ports[port_id].tx_vlan_id_outer = 0;
3161 }
3162
3163 void
3164 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
3165 {
3166         if (port_id_is_invalid(port_id, ENABLED_WARN))
3167                 return;
3168
3169         rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
3170 }
3171
3172 void
3173 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
3174 {
3175         uint16_t i;
3176         uint8_t existing_mapping_found = 0;
3177
3178         if (port_id_is_invalid(port_id, ENABLED_WARN))
3179                 return;
3180
3181         if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
3182                 return;
3183
3184         if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
3185                 printf("map_value not in required range 0..%d\n",
3186                                 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
3187                 return;
3188         }
3189
3190         if (!is_rx) { /*then tx*/
3191                 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3192                         if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3193                             (tx_queue_stats_mappings[i].queue_id == queue_id)) {
3194                                 tx_queue_stats_mappings[i].stats_counter_id = map_value;
3195                                 existing_mapping_found = 1;
3196                                 break;
3197                         }
3198                 }
3199                 if (!existing_mapping_found) { /* A new additional mapping... */
3200                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
3201                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
3202                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
3203                         nb_tx_queue_stats_mappings++;
3204                 }
3205         }
3206         else { /*rx*/
3207                 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3208                         if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3209                             (rx_queue_stats_mappings[i].queue_id == queue_id)) {
3210                                 rx_queue_stats_mappings[i].stats_counter_id = map_value;
3211                                 existing_mapping_found = 1;
3212                                 break;
3213                         }
3214                 }
3215                 if (!existing_mapping_found) { /* A new additional mapping... */
3216                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
3217                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
3218                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
3219                         nb_rx_queue_stats_mappings++;
3220                 }
3221         }
3222 }
3223
3224 void
3225 set_xstats_hide_zero(uint8_t on_off)
3226 {
3227         xstats_hide_zero = on_off;
3228 }
3229
3230 static inline void
3231 print_fdir_mask(struct rte_eth_fdir_masks *mask)
3232 {
3233         printf("\n    vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
3234
3235         if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3236                 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
3237                         " tunnel_id: 0x%08x",
3238                         mask->mac_addr_byte_mask, mask->tunnel_type_mask,
3239                         rte_be_to_cpu_32(mask->tunnel_id_mask));
3240         else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3241                 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
3242                         rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
3243                         rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
3244
3245                 printf("\n    src_port: 0x%04x, dst_port: 0x%04x",
3246                         rte_be_to_cpu_16(mask->src_port_mask),
3247                         rte_be_to_cpu_16(mask->dst_port_mask));
3248
3249                 printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3250                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
3251                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
3252                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
3253                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
3254
3255                 printf("\n    dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3256                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
3257                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
3258                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
3259                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
3260         }
3261
3262         printf("\n");
3263 }
3264
3265 static inline void
3266 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3267 {
3268         struct rte_eth_flex_payload_cfg *cfg;
3269         uint32_t i, j;
3270
3271         for (i = 0; i < flex_conf->nb_payloads; i++) {
3272                 cfg = &flex_conf->flex_set[i];
3273                 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3274                         printf("\n    RAW:  ");
3275                 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3276                         printf("\n    L2_PAYLOAD:  ");
3277                 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3278                         printf("\n    L3_PAYLOAD:  ");
3279                 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3280                         printf("\n    L4_PAYLOAD:  ");
3281                 else
3282                         printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
3283                 for (j = 0; j < num; j++)
3284                         printf("  %-5u", cfg->src_offset[j]);
3285         }
3286         printf("\n");
3287 }
3288
3289 static char *
3290 flowtype_to_str(uint16_t flow_type)
3291 {
3292         struct flow_type_info {
3293                 char str[32];
3294                 uint16_t ftype;
3295         };
3296
3297         uint8_t i;
3298         static struct flow_type_info flowtype_str_table[] = {
3299                 {"raw", RTE_ETH_FLOW_RAW},
3300                 {"ipv4", RTE_ETH_FLOW_IPV4},
3301                 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3302                 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3303                 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3304                 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3305                 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3306                 {"ipv6", RTE_ETH_FLOW_IPV6},
3307                 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3308                 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3309                 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3310                 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3311                 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3312                 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3313                 {"port", RTE_ETH_FLOW_PORT},
3314                 {"vxlan", RTE_ETH_FLOW_VXLAN},
3315                 {"geneve", RTE_ETH_FLOW_GENEVE},
3316                 {"nvgre", RTE_ETH_FLOW_NVGRE},
3317                 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
3318         };
3319
3320         for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3321                 if (flowtype_str_table[i].ftype == flow_type)
3322                         return flowtype_str_table[i].str;
3323         }
3324
3325         return NULL;
3326 }
3327
3328 static inline void
3329 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3330 {
3331         struct rte_eth_fdir_flex_mask *mask;
3332         uint32_t i, j;
3333         char *p;
3334
3335         for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3336                 mask = &flex_conf->flex_mask[i];
3337                 p = flowtype_to_str(mask->flow_type);
3338                 printf("\n    %s:\t", p ? p : "unknown");
3339                 for (j = 0; j < num; j++)
3340                         printf(" %02x", mask->mask[j]);
3341         }
3342         printf("\n");
3343 }
3344
3345 static inline void
3346 print_fdir_flow_type(uint32_t flow_types_mask)
3347 {
3348         int i;
3349         char *p;
3350
3351         for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3352                 if (!(flow_types_mask & (1 << i)))
3353                         continue;
3354                 p = flowtype_to_str(i);
3355                 if (p)
3356                         printf(" %s", p);
3357                 else
3358                         printf(" unknown");
3359         }
3360         printf("\n");
3361 }
3362
3363 void
3364 fdir_get_infos(portid_t port_id)
3365 {
3366         struct rte_eth_fdir_stats fdir_stat;
3367         struct rte_eth_fdir_info fdir_info;
3368         int ret;
3369
3370         static const char *fdir_stats_border = "########################";
3371
3372         if (port_id_is_invalid(port_id, ENABLED_WARN))
3373                 return;
3374         ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3375         if (ret < 0) {
3376                 printf("\n FDIR is not supported on port %-2d\n",
3377                         port_id);
3378                 return;
3379         }
3380
3381         memset(&fdir_info, 0, sizeof(fdir_info));
3382         rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3383                                RTE_ETH_FILTER_INFO, &fdir_info);
3384         memset(&fdir_stat, 0, sizeof(fdir_stat));
3385         rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3386                                RTE_ETH_FILTER_STATS, &fdir_stat);
3387         printf("\n  %s FDIR infos for port %-2d     %s\n",
3388                fdir_stats_border, port_id, fdir_stats_border);
3389         printf("  MODE: ");
3390         if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3391                 printf("  PERFECT\n");
3392         else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3393                 printf("  PERFECT-MAC-VLAN\n");
3394         else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3395                 printf("  PERFECT-TUNNEL\n");
3396         else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3397                 printf("  SIGNATURE\n");
3398         else
3399                 printf("  DISABLE\n");
3400         if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3401                 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3402                 printf("  SUPPORTED FLOW TYPE: ");
3403                 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3404         }
3405         printf("  FLEX PAYLOAD INFO:\n");
3406         printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
3407                "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
3408                "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
3409                 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3410                 fdir_info.flex_payload_unit,
3411                 fdir_info.max_flex_payload_segment_num,
3412                 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3413         printf("  MASK: ");
3414         print_fdir_mask(&fdir_info.mask);
3415         if (fdir_info.flex_conf.nb_payloads > 0) {
3416                 printf("  FLEX PAYLOAD SRC OFFSET:");
3417                 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3418         }
3419         if (fdir_info.flex_conf.nb_flexmasks > 0) {
3420                 printf("  FLEX MASK CFG:");
3421                 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3422         }
3423         printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
3424                fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3425         printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
3426                fdir_info.guarant_spc, fdir_info.best_spc);
3427         printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
3428                "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
3429                "  add:           %-10"PRIu64"  remove:        %"PRIu64"\n"
3430                "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
3431                fdir_stat.collision, fdir_stat.free,
3432                fdir_stat.maxhash, fdir_stat.maxlen,
3433                fdir_stat.add, fdir_stat.remove,
3434                fdir_stat.f_add, fdir_stat.f_remove);
3435         printf("  %s############################%s\n",
3436                fdir_stats_border, fdir_stats_border);
3437 }
3438
3439 void
3440 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3441 {
3442         struct rte_port *port;
3443         struct rte_eth_fdir_flex_conf *flex_conf;
3444         int i, idx = 0;
3445
3446         port = &ports[port_id];
3447         flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3448         for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3449                 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3450                         idx = i;
3451                         break;
3452                 }
3453         }
3454         if (i >= RTE_ETH_FLOW_MAX) {
3455                 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3456                         idx = flex_conf->nb_flexmasks;
3457                         flex_conf->nb_flexmasks++;
3458                 } else {
3459                         printf("The flex mask table is full. Can not set flex"
3460                                 " mask for flow_type(%u).", cfg->flow_type);
3461                         return;
3462                 }
3463         }
3464         rte_memcpy(&flex_conf->flex_mask[idx],
3465                          cfg,
3466                          sizeof(struct rte_eth_fdir_flex_mask));
3467 }
3468
3469 void
3470 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3471 {
3472         struct rte_port *port;
3473         struct rte_eth_fdir_flex_conf *flex_conf;
3474         int i, idx = 0;
3475
3476         port = &ports[port_id];
3477         flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3478         for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3479                 if (cfg->type == flex_conf->flex_set[i].type) {
3480                         idx = i;
3481                         break;
3482                 }
3483         }
3484         if (i >= RTE_ETH_PAYLOAD_MAX) {
3485                 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3486                         idx = flex_conf->nb_payloads;
3487                         flex_conf->nb_payloads++;
3488                 } else {
3489                         printf("The flex payload table is full. Can not set"
3490                                 " flex payload for type(%u).", cfg->type);
3491                         return;
3492                 }
3493         }
3494         rte_memcpy(&flex_conf->flex_set[idx],
3495                          cfg,
3496                          sizeof(struct rte_eth_flex_payload_cfg));
3497
3498 }
3499
3500 void
3501 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3502 {
3503 #ifdef RTE_LIBRTE_IXGBE_PMD
3504         int diag;
3505
3506         if (is_rx)
3507                 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3508         else
3509                 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3510
3511         if (diag == 0)
3512                 return;
3513         printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3514                         is_rx ? "rx" : "tx", port_id, diag);
3515         return;
3516 #endif
3517         printf("VF %s setting not supported for port %d\n",
3518                         is_rx ? "Rx" : "Tx", port_id);
3519         RTE_SET_USED(vf);
3520         RTE_SET_USED(on);
3521 }
3522
3523 int
3524 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3525 {
3526         int diag;
3527         struct rte_eth_link link;
3528         int ret;
3529
3530         if (port_id_is_invalid(port_id, ENABLED_WARN))
3531                 return 1;
3532         ret = eth_link_get_nowait_print_err(port_id, &link);
3533         if (ret < 0)
3534                 return 1;
3535         if (rate > link.link_speed) {
3536                 printf("Invalid rate value:%u bigger than link speed: %u\n",
3537                         rate, link.link_speed);
3538                 return 1;
3539         }
3540         diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3541         if (diag == 0)
3542                 return diag;
3543         printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3544                 port_id, diag);
3545         return diag;
3546 }
3547
3548 int
3549 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3550 {
3551         int diag = -ENOTSUP;
3552
3553         RTE_SET_USED(vf);
3554         RTE_SET_USED(rate);
3555         RTE_SET_USED(q_msk);
3556
3557 #ifdef RTE_LIBRTE_IXGBE_PMD
3558         if (diag == -ENOTSUP)
3559                 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3560                                                        q_msk);
3561 #endif
3562 #ifdef RTE_LIBRTE_BNXT_PMD
3563         if (diag == -ENOTSUP)
3564                 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3565 #endif
3566         if (diag == 0)
3567                 return diag;
3568
3569         printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3570                 port_id, diag);
3571         return diag;
3572 }
3573
3574 /*
3575  * Functions to manage the set of filtered Multicast MAC addresses.
3576  *
3577  * A pool of filtered multicast MAC addresses is associated with each port.
3578  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3579  * The address of the pool and the number of valid multicast MAC addresses
3580  * recorded in the pool are stored in the fields "mc_addr_pool" and
3581  * "mc_addr_nb" of the "rte_port" data structure.
3582  *
3583  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3584  * to be supplied a contiguous array of multicast MAC addresses.
3585  * To comply with this constraint, the set of multicast addresses recorded
3586  * into the pool are systematically compacted at the beginning of the pool.
3587  * Hence, when a multicast address is removed from the pool, all following
3588  * addresses, if any, are copied back to keep the set contiguous.
3589  */
3590 #define MCAST_POOL_INC 32
3591
3592 static int
3593 mcast_addr_pool_extend(struct rte_port *port)
3594 {
3595         struct rte_ether_addr *mc_pool;
3596         size_t mc_pool_size;
3597
3598         /*
3599          * If a free entry is available at the end of the pool, just
3600          * increment the number of recorded multicast addresses.
3601          */
3602         if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3603                 port->mc_addr_nb++;
3604                 return 0;
3605         }
3606
3607         /*
3608          * [re]allocate a pool with MCAST_POOL_INC more entries.
3609          * The previous test guarantees that port->mc_addr_nb is a multiple
3610          * of MCAST_POOL_INC.
3611          */
3612         mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
3613                                                     MCAST_POOL_INC);
3614         mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
3615                                                 mc_pool_size);
3616         if (mc_pool == NULL) {
3617                 printf("allocation of pool of %u multicast addresses failed\n",
3618                        port->mc_addr_nb + MCAST_POOL_INC);
3619                 return -ENOMEM;
3620         }
3621
3622         port->mc_addr_pool = mc_pool;
3623         port->mc_addr_nb++;
3624         return 0;
3625
3626 }
3627
3628 static void
3629 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3630 {
3631         port->mc_addr_nb--;
3632         if (addr_idx == port->mc_addr_nb) {
3633                 /* No need to recompact the set of multicast addressses. */
3634                 if (port->mc_addr_nb == 0) {
3635                         /* free the pool of multicast addresses. */
3636                         free(port->mc_addr_pool);
3637                         port->mc_addr_pool = NULL;
3638                 }
3639                 return;
3640         }
3641         memmove(&port->mc_addr_pool[addr_idx],
3642                 &port->mc_addr_pool[addr_idx + 1],
3643                 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
3644 }
3645
3646 static void
3647 eth_port_multicast_addr_list_set(portid_t port_id)
3648 {
3649         struct rte_port *port;
3650         int diag;
3651
3652         port = &ports[port_id];
3653         diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3654                                             port->mc_addr_nb);
3655         if (diag == 0)
3656                 return;
3657         printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3658                port->mc_addr_nb, port_id, -diag);
3659 }
3660
3661 void
3662 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
3663 {
3664         struct rte_port *port;
3665         uint32_t i;
3666
3667         if (port_id_is_invalid(port_id, ENABLED_WARN))
3668                 return;
3669
3670         port = &ports[port_id];
3671
3672         /*
3673          * Check that the added multicast MAC address is not already recorded
3674          * in the pool of multicast addresses.
3675          */
3676         for (i = 0; i < port->mc_addr_nb; i++) {
3677                 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3678                         printf("multicast address already filtered by port\n");
3679                         return;
3680                 }
3681         }
3682
3683         if (mcast_addr_pool_extend(port) != 0)
3684                 return;
3685         rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
3686         eth_port_multicast_addr_list_set(port_id);
3687 }
3688
3689 void
3690 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
3691 {
3692         struct rte_port *port;
3693         uint32_t i;
3694
3695         if (port_id_is_invalid(port_id, ENABLED_WARN))
3696                 return;
3697
3698         port = &ports[port_id];
3699
3700         /*
3701          * Search the pool of multicast MAC addresses for the removed address.
3702          */
3703         for (i = 0; i < port->mc_addr_nb; i++) {
3704                 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3705                         break;
3706         }
3707         if (i == port->mc_addr_nb) {
3708                 printf("multicast address not filtered by port %d\n", port_id);
3709                 return;
3710         }
3711
3712         mcast_addr_pool_remove(port, i);
3713         eth_port_multicast_addr_list_set(port_id);
3714 }
3715
3716 void
3717 port_dcb_info_display(portid_t port_id)
3718 {
3719         struct rte_eth_dcb_info dcb_info;
3720         uint16_t i;
3721         int ret;
3722         static const char *border = "================";
3723
3724         if (port_id_is_invalid(port_id, ENABLED_WARN))
3725                 return;
3726
3727         ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3728         if (ret) {
3729                 printf("\n Failed to get dcb infos on port %-2d\n",
3730                         port_id);
3731                 return;
3732         }
3733         printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
3734         printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
3735         printf("\n  TC :        ");
3736         for (i = 0; i < dcb_info.nb_tcs; i++)
3737                 printf("\t%4d", i);
3738         printf("\n  Priority :  ");
3739         for (i = 0; i < dcb_info.nb_tcs; i++)
3740                 printf("\t%4d", dcb_info.prio_tc[i]);
3741         printf("\n  BW percent :");
3742         for (i = 0; i < dcb_info.nb_tcs; i++)
3743                 printf("\t%4d%%", dcb_info.tc_bws[i]);
3744         printf("\n  RXQ base :  ");
3745         for (i = 0; i < dcb_info.nb_tcs; i++)
3746                 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
3747         printf("\n  RXQ number :");
3748         for (i = 0; i < dcb_info.nb_tcs; i++)
3749                 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
3750         printf("\n  TXQ base :  ");
3751         for (i = 0; i < dcb_info.nb_tcs; i++)
3752                 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
3753         printf("\n  TXQ number :");
3754         for (i = 0; i < dcb_info.nb_tcs; i++)
3755                 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
3756         printf("\n");
3757 }
3758
3759 uint8_t *
3760 open_file(const char *file_path, uint32_t *size)
3761 {
3762         int fd = open(file_path, O_RDONLY);
3763         off_t pkg_size;
3764         uint8_t *buf = NULL;
3765         int ret = 0;
3766         struct stat st_buf;
3767
3768         if (size)
3769                 *size = 0;
3770
3771         if (fd == -1) {
3772                 printf("%s: Failed to open %s\n", __func__, file_path);
3773                 return buf;
3774         }
3775
3776         if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
3777                 close(fd);
3778                 printf("%s: File operations failed\n", __func__);
3779                 return buf;
3780         }
3781
3782         pkg_size = st_buf.st_size;
3783         if (pkg_size < 0) {
3784                 close(fd);
3785                 printf("%s: File operations failed\n", __func__);
3786                 return buf;
3787         }
3788
3789         buf = (uint8_t *)malloc(pkg_size);
3790         if (!buf) {
3791                 close(fd);
3792                 printf("%s: Failed to malloc memory\n", __func__);
3793                 return buf;
3794         }
3795
3796         ret = read(fd, buf, pkg_size);
3797         if (ret < 0) {
3798                 close(fd);
3799                 printf("%s: File read operation failed\n", __func__);
3800                 close_file(buf);
3801                 return NULL;
3802         }
3803
3804         if (size)
3805                 *size = pkg_size;
3806
3807         close(fd);
3808
3809         return buf;
3810 }
3811
3812 int
3813 save_file(const char *file_path, uint8_t *buf, uint32_t size)
3814 {
3815         FILE *fh = fopen(file_path, "wb");
3816
3817         if (fh == NULL) {
3818                 printf("%s: Failed to open %s\n", __func__, file_path);
3819                 return -1;
3820         }
3821
3822         if (fwrite(buf, 1, size, fh) != size) {
3823                 fclose(fh);
3824                 printf("%s: File write operation failed\n", __func__);
3825                 return -1;
3826         }
3827
3828         fclose(fh);
3829
3830         return 0;
3831 }
3832
3833 int
3834 close_file(uint8_t *buf)
3835 {
3836         if (buf) {
3837                 free((void *)buf);
3838                 return 0;
3839         }
3840
3841         return -1;
3842 }
3843
3844 void
3845 port_queue_region_info_display(portid_t port_id, void *buf)
3846 {
3847 #ifdef RTE_LIBRTE_I40E_PMD
3848         uint16_t i, j;
3849         struct rte_pmd_i40e_queue_regions *info =
3850                 (struct rte_pmd_i40e_queue_regions *)buf;
3851         static const char *queue_region_info_stats_border = "-------";
3852
3853         if (!info->queue_region_number)
3854                 printf("there is no region has been set before");
3855
3856         printf("\n      %s All queue region info for port=%2d %s",
3857                         queue_region_info_stats_border, port_id,
3858                         queue_region_info_stats_border);
3859         printf("\n      queue_region_number: %-14u \n",
3860                         info->queue_region_number);
3861
3862         for (i = 0; i < info->queue_region_number; i++) {
3863                 printf("\n      region_id: %-14u queue_number: %-14u "
3864                         "queue_start_index: %-14u \n",
3865                         info->region[i].region_id,
3866                         info->region[i].queue_num,
3867                         info->region[i].queue_start_index);
3868
3869                 printf("  user_priority_num is  %-14u :",
3870                                         info->region[i].user_priority_num);
3871                 for (j = 0; j < info->region[i].user_priority_num; j++)
3872                         printf(" %-14u ", info->region[i].user_priority[j]);
3873
3874                 printf("\n      flowtype_num is  %-14u :",
3875                                 info->region[i].flowtype_num);
3876                 for (j = 0; j < info->region[i].flowtype_num; j++)
3877                         printf(" %-14u ", info->region[i].hw_flowtype[j]);
3878         }
3879 #else
3880         RTE_SET_USED(port_id);
3881         RTE_SET_USED(buf);
3882 #endif
3883
3884         printf("\n\n");
3885 }