1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
52 #include <rte_hexdump.h>
56 #define ETHDEV_FWVERS_LEN 32
58 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
59 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
64 #define NS_PER_SEC 1E9
66 static char *flowtype_to_str(uint16_t flow_type);
69 enum tx_pkt_split split;
73 .split = TX_PKT_SPLIT_OFF,
77 .split = TX_PKT_SPLIT_ON,
81 .split = TX_PKT_SPLIT_RND,
86 const struct rss_type_info rss_type_table[] = {
87 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
88 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
89 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
92 { "eth", ETH_RSS_ETH },
93 { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
94 { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
95 { "vlan", ETH_RSS_VLAN },
96 { "s-vlan", ETH_RSS_S_VLAN },
97 { "c-vlan", ETH_RSS_C_VLAN },
98 { "ipv4", ETH_RSS_IPV4 },
99 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
100 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
101 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
102 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
103 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
104 { "ipv6", ETH_RSS_IPV6 },
105 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
106 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
107 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
108 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
109 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
110 { "l2-payload", ETH_RSS_L2_PAYLOAD },
111 { "ipv6-ex", ETH_RSS_IPV6_EX },
112 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
113 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
114 { "port", ETH_RSS_PORT },
115 { "vxlan", ETH_RSS_VXLAN },
116 { "geneve", ETH_RSS_GENEVE },
117 { "nvgre", ETH_RSS_NVGRE },
118 { "ip", ETH_RSS_IP },
119 { "udp", ETH_RSS_UDP },
120 { "tcp", ETH_RSS_TCP },
121 { "sctp", ETH_RSS_SCTP },
122 { "tunnel", ETH_RSS_TUNNEL },
123 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
124 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
125 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
126 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
127 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
128 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
129 { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
130 { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
131 { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
132 { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
133 { "esp", ETH_RSS_ESP },
134 { "ah", ETH_RSS_AH },
135 { "l2tpv3", ETH_RSS_L2TPV3 },
136 { "pfcp", ETH_RSS_PFCP },
137 { "pppoe", ETH_RSS_PPPOE },
138 { "gtpu", ETH_RSS_GTPU },
143 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
145 char buf[RTE_ETHER_ADDR_FMT_SIZE];
146 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
147 printf("%s%s", name, buf);
151 nic_stats_display(portid_t port_id)
153 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
154 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
155 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
156 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
157 static uint64_t prev_ns[RTE_MAX_ETHPORTS];
158 struct timespec cur_time;
159 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
161 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
162 struct rte_eth_stats stats;
163 struct rte_port *port = &ports[port_id];
166 static const char *nic_stats_border = "########################";
168 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
172 rte_eth_stats_get(port_id, &stats);
173 printf("\n %s NIC statistics for port %-2d %s\n",
174 nic_stats_border, port_id, nic_stats_border);
176 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
177 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
179 stats.ipackets, stats.imissed, stats.ibytes);
180 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
181 printf(" RX-nombuf: %-10"PRIu64"\n",
183 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
185 stats.opackets, stats.oerrors, stats.obytes);
188 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
189 " RX-bytes: %10"PRIu64"\n",
190 stats.ipackets, stats.ierrors, stats.ibytes);
191 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
192 printf(" RX-nombuf: %10"PRIu64"\n",
194 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
195 " TX-bytes: %10"PRIu64"\n",
196 stats.opackets, stats.oerrors, stats.obytes);
199 if (port->rx_queue_stats_mapping_enabled) {
201 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
202 printf(" Stats reg %2d RX-packets: %10"PRIu64
203 " RX-errors: %10"PRIu64
204 " RX-bytes: %10"PRIu64"\n",
205 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
208 if (port->tx_queue_stats_mapping_enabled) {
210 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
211 printf(" Stats reg %2d TX-packets: %10"PRIu64
212 " TX-bytes: %10"PRIu64"\n",
213 i, stats.q_opackets[i], stats.q_obytes[i]);
218 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
221 ns = cur_time.tv_sec * NS_PER_SEC;
222 ns += cur_time.tv_nsec;
224 if (prev_ns[port_id] != 0)
225 diff_ns = ns - prev_ns[port_id];
226 prev_ns[port_id] = ns;
229 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
230 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
231 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
232 (stats.opackets - prev_pkts_tx[port_id]) : 0;
233 prev_pkts_rx[port_id] = stats.ipackets;
234 prev_pkts_tx[port_id] = stats.opackets;
235 mpps_rx = diff_ns > 0 ?
236 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
237 mpps_tx = diff_ns > 0 ?
238 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
240 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
241 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
242 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
243 (stats.obytes - prev_bytes_tx[port_id]) : 0;
244 prev_bytes_rx[port_id] = stats.ibytes;
245 prev_bytes_tx[port_id] = stats.obytes;
246 mbps_rx = diff_ns > 0 ?
247 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
248 mbps_tx = diff_ns > 0 ?
249 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
251 printf("\n Throughput (since last show)\n");
252 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
253 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
254 mpps_tx, mbps_tx * 8);
256 printf(" %s############################%s\n",
257 nic_stats_border, nic_stats_border);
261 nic_stats_clear(portid_t port_id)
265 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
270 ret = rte_eth_stats_reset(port_id);
272 printf("%s: Error: failed to reset stats (port %u): %s",
273 __func__, port_id, strerror(-ret));
277 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
281 printf("%s: Error: failed to get stats (port %u): %s",
282 __func__, port_id, strerror(ret));
285 printf("\n NIC statistics for port %d cleared\n", port_id);
289 nic_xstats_display(portid_t port_id)
291 struct rte_eth_xstat *xstats;
292 int cnt_xstats, idx_xstat;
293 struct rte_eth_xstat_name *xstats_names;
295 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
299 printf("###### NIC extended statistics for port %-2d\n", port_id);
300 if (!rte_eth_dev_is_valid_port(port_id)) {
301 printf("Error: Invalid port number %i\n", port_id);
306 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
307 if (cnt_xstats < 0) {
308 printf("Error: Cannot get count of xstats\n");
312 /* Get id-name lookup table */
313 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
314 if (xstats_names == NULL) {
315 printf("Cannot allocate memory for xstats lookup\n");
318 if (cnt_xstats != rte_eth_xstats_get_names(
319 port_id, xstats_names, cnt_xstats)) {
320 printf("Error: Cannot get xstats lookup\n");
325 /* Get stats themselves */
326 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
327 if (xstats == NULL) {
328 printf("Cannot allocate memory for xstats\n");
332 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
333 printf("Error: Unable to get xstats\n");
340 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
341 if (xstats_hide_zero && !xstats[idx_xstat].value)
343 printf("%s: %"PRIu64"\n",
344 xstats_names[idx_xstat].name,
345 xstats[idx_xstat].value);
352 nic_xstats_clear(portid_t port_id)
356 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
361 ret = rte_eth_xstats_reset(port_id);
363 printf("%s: Error: failed to reset xstats (port %u): %s",
364 __func__, port_id, strerror(-ret));
368 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
372 printf("%s: Error: failed to get stats (port %u): %s",
373 __func__, port_id, strerror(ret));
379 nic_stats_mapping_display(portid_t port_id)
381 struct rte_port *port = &ports[port_id];
384 static const char *nic_stats_mapping_border = "########################";
386 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
391 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
392 printf("Port id %d - either does not support queue statistic mapping or"
393 " no queue statistic mapping set\n", port_id);
397 printf("\n %s NIC statistics mapping for port %-2d %s\n",
398 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
400 if (port->rx_queue_stats_mapping_enabled) {
401 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
402 if (rx_queue_stats_mappings[i].port_id == port_id) {
403 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
404 rx_queue_stats_mappings[i].queue_id,
405 rx_queue_stats_mappings[i].stats_counter_id);
412 if (port->tx_queue_stats_mapping_enabled) {
413 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
414 if (tx_queue_stats_mappings[i].port_id == port_id) {
415 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
416 tx_queue_stats_mappings[i].queue_id,
417 tx_queue_stats_mappings[i].stats_counter_id);
422 printf(" %s####################################%s\n",
423 nic_stats_mapping_border, nic_stats_mapping_border);
427 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
429 struct rte_eth_burst_mode mode;
430 struct rte_eth_rxq_info qinfo;
432 static const char *info_border = "*********************";
434 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
436 printf("Failed to retrieve information for port: %u, "
437 "RX queue: %hu\nerror desc: %s(%d)\n",
438 port_id, queue_id, strerror(-rc), rc);
442 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
443 info_border, port_id, queue_id, info_border);
445 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
446 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
447 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
448 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
449 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
450 printf("\nRX drop packets: %s",
451 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
452 printf("\nRX deferred start: %s",
453 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
454 printf("\nRX scattered packets: %s",
455 (qinfo.scattered_rx != 0) ? "on" : "off");
456 if (qinfo.rx_buf_size != 0)
457 printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
458 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
460 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
461 printf("\nBurst mode: %s%s",
463 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
464 " (per queue)" : "");
470 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
472 struct rte_eth_burst_mode mode;
473 struct rte_eth_txq_info qinfo;
475 static const char *info_border = "*********************";
477 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
479 printf("Failed to retrieve information for port: %u, "
480 "TX queue: %hu\nerror desc: %s(%d)\n",
481 port_id, queue_id, strerror(-rc), rc);
485 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
486 info_border, port_id, queue_id, info_border);
488 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
489 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
490 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
491 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
492 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
493 printf("\nTX deferred start: %s",
494 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
495 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
497 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
498 printf("\nBurst mode: %s%s",
500 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
501 " (per queue)" : "");
506 static int bus_match_all(const struct rte_bus *bus, const void *data)
514 device_infos_display(const char *identifier)
516 static const char *info_border = "*********************";
517 struct rte_bus *start = NULL, *next;
518 struct rte_dev_iterator dev_iter;
519 char name[RTE_ETH_NAME_MAX_LEN];
520 struct rte_ether_addr mac_addr;
521 struct rte_device *dev;
522 struct rte_devargs da;
526 memset(&da, 0, sizeof(da));
530 if (rte_devargs_parsef(&da, "%s", identifier)) {
531 printf("cannot parse identifier\n");
538 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
541 if (identifier && da.bus != next)
544 /* Skip buses that don't have iterate method */
545 if (!next->dev_iterate)
548 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
549 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
553 /* Check for matching device if identifier is present */
555 strncmp(da.name, dev->name, strlen(dev->name)))
557 printf("\n%s Infos for device %s %s\n",
558 info_border, dev->name, info_border);
559 printf("Bus name: %s", dev->bus->name);
560 printf("\nDriver name: %s", dev->driver->name);
561 printf("\nDevargs: %s",
562 dev->devargs ? dev->devargs->args : "");
563 printf("\nConnect to socket: %d", dev->numa_node);
566 /* List ports with matching device name */
567 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
568 printf("\n\tPort id: %-2d", port_id);
569 if (eth_macaddr_get_print_err(port_id,
571 print_ethaddr("\n\tMAC address: ",
573 rte_eth_dev_get_name_by_port(port_id, name);
574 printf("\n\tDevice name: %s", name);
582 port_infos_display(portid_t port_id)
584 struct rte_port *port;
585 struct rte_ether_addr mac_addr;
586 struct rte_eth_link link;
587 struct rte_eth_dev_info dev_info;
589 struct rte_mempool * mp;
590 static const char *info_border = "*********************";
592 char name[RTE_ETH_NAME_MAX_LEN];
594 char fw_version[ETHDEV_FWVERS_LEN];
596 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
600 port = &ports[port_id];
601 ret = eth_link_get_nowait_print_err(port_id, &link);
605 ret = eth_dev_info_get_print_err(port_id, &dev_info);
609 printf("\n%s Infos for port %-2d %s\n",
610 info_border, port_id, info_border);
611 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
612 print_ethaddr("MAC address: ", &mac_addr);
613 rte_eth_dev_get_name_by_port(port_id, name);
614 printf("\nDevice name: %s", name);
615 printf("\nDriver name: %s", dev_info.driver_name);
617 if (rte_eth_dev_fw_version_get(port_id, fw_version,
618 ETHDEV_FWVERS_LEN) == 0)
619 printf("\nFirmware-version: %s", fw_version);
621 printf("\nFirmware-version: %s", "not available");
623 if (dev_info.device->devargs && dev_info.device->devargs->args)
624 printf("\nDevargs: %s", dev_info.device->devargs->args);
625 printf("\nConnect to socket: %u", port->socket_id);
627 if (port_numa[port_id] != NUMA_NO_CONFIG) {
628 mp = mbuf_pool_find(port_numa[port_id]);
630 printf("\nmemory allocation on the socket: %d",
633 printf("\nmemory allocation on the socket: %u",port->socket_id);
635 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
636 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
637 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
638 ("full-duplex") : ("half-duplex"));
640 if (!rte_eth_dev_get_mtu(port_id, &mtu))
641 printf("MTU: %u\n", mtu);
643 printf("Promiscuous mode: %s\n",
644 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
645 printf("Allmulticast mode: %s\n",
646 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
647 printf("Maximum number of MAC addresses: %u\n",
648 (unsigned int)(port->dev_info.max_mac_addrs));
649 printf("Maximum number of MAC addresses of hash filtering: %u\n",
650 (unsigned int)(port->dev_info.max_hash_mac_addrs));
652 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
653 if (vlan_offload >= 0){
654 printf("VLAN offload: \n");
655 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
656 printf(" strip on, ");
658 printf(" strip off, ");
660 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
661 printf("filter on, ");
663 printf("filter off, ");
665 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
666 printf("extend on, ");
668 printf("extend off, ");
670 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
671 printf("qinq strip on\n");
673 printf("qinq strip off\n");
676 if (dev_info.hash_key_size > 0)
677 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
678 if (dev_info.reta_size > 0)
679 printf("Redirection table size: %u\n", dev_info.reta_size);
680 if (!dev_info.flow_type_rss_offloads)
681 printf("No RSS offload flow type is supported.\n");
686 printf("Supported RSS offload flow types:\n");
687 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
688 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
689 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
691 p = flowtype_to_str(i);
695 printf(" user defined %d\n", i);
699 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
700 printf("Maximum configurable length of RX packet: %u\n",
701 dev_info.max_rx_pktlen);
702 printf("Maximum configurable size of LRO aggregated packet: %u\n",
703 dev_info.max_lro_pkt_size);
704 if (dev_info.max_vfs)
705 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
706 if (dev_info.max_vmdq_pools)
707 printf("Maximum number of VMDq pools: %u\n",
708 dev_info.max_vmdq_pools);
710 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
711 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
712 printf("Max possible number of RXDs per queue: %hu\n",
713 dev_info.rx_desc_lim.nb_max);
714 printf("Min possible number of RXDs per queue: %hu\n",
715 dev_info.rx_desc_lim.nb_min);
716 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
718 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
719 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
720 printf("Max possible number of TXDs per queue: %hu\n",
721 dev_info.tx_desc_lim.nb_max);
722 printf("Min possible number of TXDs per queue: %hu\n",
723 dev_info.tx_desc_lim.nb_min);
724 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
725 printf("Max segment number per packet: %hu\n",
726 dev_info.tx_desc_lim.nb_seg_max);
727 printf("Max segment number per MTU/TSO: %hu\n",
728 dev_info.tx_desc_lim.nb_mtu_seg_max);
730 /* Show switch info only if valid switch domain and port id is set */
731 if (dev_info.switch_info.domain_id !=
732 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
733 if (dev_info.switch_info.name)
734 printf("Switch name: %s\n", dev_info.switch_info.name);
736 printf("Switch domain Id: %u\n",
737 dev_info.switch_info.domain_id);
738 printf("Switch Port Id: %u\n",
739 dev_info.switch_info.port_id);
744 port_summary_header_display(void)
746 uint16_t port_number;
748 port_number = rte_eth_dev_count_avail();
749 printf("Number of available ports: %i\n", port_number);
750 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
751 "Driver", "Status", "Link");
755 port_summary_display(portid_t port_id)
757 struct rte_ether_addr mac_addr;
758 struct rte_eth_link link;
759 struct rte_eth_dev_info dev_info;
760 char name[RTE_ETH_NAME_MAX_LEN];
763 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
768 ret = eth_link_get_nowait_print_err(port_id, &link);
772 ret = eth_dev_info_get_print_err(port_id, &dev_info);
776 rte_eth_dev_get_name_by_port(port_id, name);
777 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
781 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n",
782 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
783 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
784 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
785 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
786 rte_eth_link_speed_to_str(link.link_speed));
790 port_eeprom_display(portid_t port_id)
792 struct rte_dev_eeprom_info einfo;
794 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
799 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
800 if (len_eeprom < 0) {
801 switch (len_eeprom) {
803 printf("port index %d invalid\n", port_id);
806 printf("operation not supported by device\n");
809 printf("device is removed\n");
812 printf("Unable to get EEPROM: %d\n", len_eeprom);
818 char buf[len_eeprom];
820 einfo.length = len_eeprom;
823 ret = rte_eth_dev_get_eeprom(port_id, &einfo);
827 printf("port index %d invalid\n", port_id);
830 printf("operation not supported by device\n");
833 printf("device is removed\n");
836 printf("Unable to get EEPROM: %d\n", ret);
841 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
842 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
846 port_module_eeprom_display(portid_t port_id)
848 struct rte_eth_dev_module_info minfo;
849 struct rte_dev_eeprom_info einfo;
852 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
858 ret = rte_eth_dev_get_module_info(port_id, &minfo);
862 printf("port index %d invalid\n", port_id);
865 printf("operation not supported by device\n");
868 printf("device is removed\n");
871 printf("Unable to get module EEPROM: %d\n", ret);
877 char buf[minfo.eeprom_len];
879 einfo.length = minfo.eeprom_len;
882 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
886 printf("port index %d invalid\n", port_id);
889 printf("operation not supported by device\n");
892 printf("device is removed\n");
895 printf("Unable to get module EEPROM: %d\n", ret);
901 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
902 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
906 port_offload_cap_display(portid_t port_id)
908 struct rte_eth_dev_info dev_info;
909 static const char *info_border = "************";
912 if (port_id_is_invalid(port_id, ENABLED_WARN))
915 ret = eth_dev_info_get_print_err(port_id, &dev_info);
919 printf("\n%s Port %d supported offload features: %s\n",
920 info_border, port_id, info_border);
922 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
923 printf("VLAN stripped: ");
924 if (ports[port_id].dev_conf.rxmode.offloads &
925 DEV_RX_OFFLOAD_VLAN_STRIP)
931 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
932 printf("Double VLANs stripped: ");
933 if (ports[port_id].dev_conf.rxmode.offloads &
934 DEV_RX_OFFLOAD_QINQ_STRIP)
940 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
941 printf("RX IPv4 checksum: ");
942 if (ports[port_id].dev_conf.rxmode.offloads &
943 DEV_RX_OFFLOAD_IPV4_CKSUM)
949 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
950 printf("RX UDP checksum: ");
951 if (ports[port_id].dev_conf.rxmode.offloads &
952 DEV_RX_OFFLOAD_UDP_CKSUM)
958 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
959 printf("RX TCP checksum: ");
960 if (ports[port_id].dev_conf.rxmode.offloads &
961 DEV_RX_OFFLOAD_TCP_CKSUM)
967 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
968 printf("RX SCTP checksum: ");
969 if (ports[port_id].dev_conf.rxmode.offloads &
970 DEV_RX_OFFLOAD_SCTP_CKSUM)
976 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
977 printf("RX Outer IPv4 checksum: ");
978 if (ports[port_id].dev_conf.rxmode.offloads &
979 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
985 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
986 printf("RX Outer UDP checksum: ");
987 if (ports[port_id].dev_conf.rxmode.offloads &
988 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
994 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
995 printf("Large receive offload: ");
996 if (ports[port_id].dev_conf.rxmode.offloads &
997 DEV_RX_OFFLOAD_TCP_LRO)
1003 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
1004 printf("HW timestamp: ");
1005 if (ports[port_id].dev_conf.rxmode.offloads &
1006 DEV_RX_OFFLOAD_TIMESTAMP)
1012 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
1013 printf("Rx Keep CRC: ");
1014 if (ports[port_id].dev_conf.rxmode.offloads &
1015 DEV_RX_OFFLOAD_KEEP_CRC)
1021 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
1022 printf("RX offload security: ");
1023 if (ports[port_id].dev_conf.rxmode.offloads &
1024 DEV_RX_OFFLOAD_SECURITY)
1030 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
1031 printf("VLAN insert: ");
1032 if (ports[port_id].dev_conf.txmode.offloads &
1033 DEV_TX_OFFLOAD_VLAN_INSERT)
1039 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
1040 printf("Double VLANs insert: ");
1041 if (ports[port_id].dev_conf.txmode.offloads &
1042 DEV_TX_OFFLOAD_QINQ_INSERT)
1048 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
1049 printf("TX IPv4 checksum: ");
1050 if (ports[port_id].dev_conf.txmode.offloads &
1051 DEV_TX_OFFLOAD_IPV4_CKSUM)
1057 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
1058 printf("TX UDP checksum: ");
1059 if (ports[port_id].dev_conf.txmode.offloads &
1060 DEV_TX_OFFLOAD_UDP_CKSUM)
1066 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
1067 printf("TX TCP checksum: ");
1068 if (ports[port_id].dev_conf.txmode.offloads &
1069 DEV_TX_OFFLOAD_TCP_CKSUM)
1075 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
1076 printf("TX SCTP checksum: ");
1077 if (ports[port_id].dev_conf.txmode.offloads &
1078 DEV_TX_OFFLOAD_SCTP_CKSUM)
1084 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
1085 printf("TX Outer IPv4 checksum: ");
1086 if (ports[port_id].dev_conf.txmode.offloads &
1087 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
1093 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
1094 printf("TX TCP segmentation: ");
1095 if (ports[port_id].dev_conf.txmode.offloads &
1096 DEV_TX_OFFLOAD_TCP_TSO)
1102 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
1103 printf("TX UDP segmentation: ");
1104 if (ports[port_id].dev_conf.txmode.offloads &
1105 DEV_TX_OFFLOAD_UDP_TSO)
1111 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
1112 printf("TSO for VXLAN tunnel packet: ");
1113 if (ports[port_id].dev_conf.txmode.offloads &
1114 DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
1120 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
1121 printf("TSO for GRE tunnel packet: ");
1122 if (ports[port_id].dev_conf.txmode.offloads &
1123 DEV_TX_OFFLOAD_GRE_TNL_TSO)
1129 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
1130 printf("TSO for IPIP tunnel packet: ");
1131 if (ports[port_id].dev_conf.txmode.offloads &
1132 DEV_TX_OFFLOAD_IPIP_TNL_TSO)
1138 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
1139 printf("TSO for GENEVE tunnel packet: ");
1140 if (ports[port_id].dev_conf.txmode.offloads &
1141 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
1147 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
1148 printf("IP tunnel TSO: ");
1149 if (ports[port_id].dev_conf.txmode.offloads &
1150 DEV_TX_OFFLOAD_IP_TNL_TSO)
1156 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
1157 printf("UDP tunnel TSO: ");
1158 if (ports[port_id].dev_conf.txmode.offloads &
1159 DEV_TX_OFFLOAD_UDP_TNL_TSO)
1165 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
1166 printf("TX Outer UDP checksum: ");
1167 if (ports[port_id].dev_conf.txmode.offloads &
1168 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
1174 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) {
1175 printf("Tx scheduling on timestamp: ");
1176 if (ports[port_id].dev_conf.txmode.offloads &
1177 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP)
1186 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1190 if (port_id == (portid_t)RTE_PORT_ALL)
1193 RTE_ETH_FOREACH_DEV(pid)
1197 if (warning == ENABLED_WARN)
1198 printf("Invalid port %d\n", port_id);
1203 void print_valid_ports(void)
1207 printf("The valid ports array is [");
1208 RTE_ETH_FOREACH_DEV(pid) {
1215 vlan_id_is_invalid(uint16_t vlan_id)
1219 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1224 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1226 const struct rte_pci_device *pci_dev;
1227 const struct rte_bus *bus;
1230 if (reg_off & 0x3) {
1231 printf("Port register offset 0x%X not aligned on a 4-byte "
1237 if (!ports[port_id].dev_info.device) {
1238 printf("Invalid device\n");
1242 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1243 if (bus && !strcmp(bus->name, "pci")) {
1244 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1246 printf("Not a PCI device\n");
1250 pci_len = pci_dev->mem_resource[0].len;
1251 if (reg_off >= pci_len) {
1252 printf("Port %d: register offset %u (0x%X) out of port PCI "
1253 "resource (length=%"PRIu64")\n",
1254 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
1261 reg_bit_pos_is_invalid(uint8_t bit_pos)
1265 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
1269 #define display_port_and_reg_off(port_id, reg_off) \
1270 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1273 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1275 display_port_and_reg_off(port_id, (unsigned)reg_off);
1276 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1280 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1285 if (port_id_is_invalid(port_id, ENABLED_WARN))
1287 if (port_reg_off_is_invalid(port_id, reg_off))
1289 if (reg_bit_pos_is_invalid(bit_x))
1291 reg_v = port_id_pci_reg_read(port_id, reg_off);
1292 display_port_and_reg_off(port_id, (unsigned)reg_off);
1293 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1297 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1298 uint8_t bit1_pos, uint8_t bit2_pos)
1304 if (port_id_is_invalid(port_id, ENABLED_WARN))
1306 if (port_reg_off_is_invalid(port_id, reg_off))
1308 if (reg_bit_pos_is_invalid(bit1_pos))
1310 if (reg_bit_pos_is_invalid(bit2_pos))
1312 if (bit1_pos > bit2_pos)
1313 l_bit = bit2_pos, h_bit = bit1_pos;
1315 l_bit = bit1_pos, h_bit = bit2_pos;
1317 reg_v = port_id_pci_reg_read(port_id, reg_off);
1320 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1321 display_port_and_reg_off(port_id, (unsigned)reg_off);
1322 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1323 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1327 port_reg_display(portid_t port_id, uint32_t reg_off)
1331 if (port_id_is_invalid(port_id, ENABLED_WARN))
1333 if (port_reg_off_is_invalid(port_id, reg_off))
1335 reg_v = port_id_pci_reg_read(port_id, reg_off);
1336 display_port_reg_value(port_id, reg_off, reg_v);
1340 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1345 if (port_id_is_invalid(port_id, ENABLED_WARN))
1347 if (port_reg_off_is_invalid(port_id, reg_off))
1349 if (reg_bit_pos_is_invalid(bit_pos))
1352 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1355 reg_v = port_id_pci_reg_read(port_id, reg_off);
1357 reg_v &= ~(1 << bit_pos);
1359 reg_v |= (1 << bit_pos);
1360 port_id_pci_reg_write(port_id, reg_off, reg_v);
1361 display_port_reg_value(port_id, reg_off, reg_v);
1365 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1366 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1373 if (port_id_is_invalid(port_id, ENABLED_WARN))
1375 if (port_reg_off_is_invalid(port_id, reg_off))
1377 if (reg_bit_pos_is_invalid(bit1_pos))
1379 if (reg_bit_pos_is_invalid(bit2_pos))
1381 if (bit1_pos > bit2_pos)
1382 l_bit = bit2_pos, h_bit = bit1_pos;
1384 l_bit = bit1_pos, h_bit = bit2_pos;
1386 if ((h_bit - l_bit) < 31)
1387 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1391 if (value > max_v) {
1392 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1393 (unsigned)value, (unsigned)value,
1394 (unsigned)max_v, (unsigned)max_v);
1397 reg_v = port_id_pci_reg_read(port_id, reg_off);
1398 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1399 reg_v |= (value << l_bit); /* Set changed bits */
1400 port_id_pci_reg_write(port_id, reg_off, reg_v);
1401 display_port_reg_value(port_id, reg_off, reg_v);
1405 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1407 if (port_id_is_invalid(port_id, ENABLED_WARN))
1409 if (port_reg_off_is_invalid(port_id, reg_off))
1411 port_id_pci_reg_write(port_id, reg_off, reg_v);
1412 display_port_reg_value(port_id, reg_off, reg_v);
1416 port_mtu_set(portid_t port_id, uint16_t mtu)
1419 struct rte_port *rte_port = &ports[port_id];
1420 struct rte_eth_dev_info dev_info;
1421 uint16_t eth_overhead;
1424 if (port_id_is_invalid(port_id, ENABLED_WARN))
1427 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1431 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1432 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1433 mtu, dev_info.min_mtu, dev_info.max_mtu);
1436 diag = rte_eth_dev_set_mtu(port_id, mtu);
1438 printf("Set MTU failed. diag=%d\n", diag);
1439 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1441 * Ether overhead in driver is equal to the difference of
1442 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
1443 * device supports jumbo frame.
1445 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
1446 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) {
1447 rte_port->dev_conf.rxmode.offloads |=
1448 DEV_RX_OFFLOAD_JUMBO_FRAME;
1449 rte_port->dev_conf.rxmode.max_rx_pkt_len =
1452 rte_port->dev_conf.rxmode.offloads &=
1453 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1457 /* Generic flow management functions. */
1459 /** Generate a port_flow entry from attributes/pattern/actions. */
1460 static struct port_flow *
1461 port_flow_new(const struct rte_flow_attr *attr,
1462 const struct rte_flow_item *pattern,
1463 const struct rte_flow_action *actions,
1464 struct rte_flow_error *error)
1466 const struct rte_flow_conv_rule rule = {
1468 .pattern_ro = pattern,
1469 .actions_ro = actions,
1471 struct port_flow *pf;
1474 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1477 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1480 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1484 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1491 /** Print a message out of a flow error. */
1493 port_flow_complain(struct rte_flow_error *error)
1495 static const char *const errstrlist[] = {
1496 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1497 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1498 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1499 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1500 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1501 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1502 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1503 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1504 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1505 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1506 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1507 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1508 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1509 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1510 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1511 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1512 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1516 int err = rte_errno;
1518 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1519 !errstrlist[error->type])
1520 errstr = "unknown type";
1522 errstr = errstrlist[error->type];
1523 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__,
1524 error->type, errstr,
1525 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1526 error->cause), buf) : "",
1527 error->message ? error->message : "(no stated reason)",
1533 rss_config_display(struct rte_flow_action_rss *rss_conf)
1537 if (rss_conf == NULL) {
1538 printf("Invalid rule\n");
1544 if (rss_conf->queue_num == 0)
1546 for (i = 0; i < rss_conf->queue_num; i++)
1547 printf(" %d", rss_conf->queue[i]);
1550 printf(" function: ");
1551 switch (rss_conf->func) {
1552 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1553 printf("default\n");
1555 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1556 printf("toeplitz\n");
1558 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1559 printf("simple_xor\n");
1561 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1562 printf("symmetric_toeplitz\n");
1565 printf("Unknown function\n");
1569 printf(" types:\n");
1570 if (rss_conf->types == 0) {
1574 for (i = 0; rss_type_table[i].str; i++) {
1575 if ((rss_conf->types &
1576 rss_type_table[i].rss_type) ==
1577 rss_type_table[i].rss_type &&
1578 rss_type_table[i].rss_type != 0)
1579 printf(" %s\n", rss_type_table[i].str);
1583 /** Validate flow rule. */
1585 port_flow_validate(portid_t port_id,
1586 const struct rte_flow_attr *attr,
1587 const struct rte_flow_item *pattern,
1588 const struct rte_flow_action *actions)
1590 struct rte_flow_error error;
1592 /* Poisoning to make sure PMDs update it in case of error. */
1593 memset(&error, 0x11, sizeof(error));
1594 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1595 return port_flow_complain(&error);
1596 printf("Flow rule validated\n");
1600 /** Update age action context by port_flow pointer. */
1602 update_age_action_context(const struct rte_flow_action *actions,
1603 struct port_flow *pf)
1605 struct rte_flow_action_age *age = NULL;
1607 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1608 switch (actions->type) {
1609 case RTE_FLOW_ACTION_TYPE_AGE:
1610 age = (struct rte_flow_action_age *)
1611 (uintptr_t)actions->conf;
1620 /** Create flow rule. */
1622 port_flow_create(portid_t port_id,
1623 const struct rte_flow_attr *attr,
1624 const struct rte_flow_item *pattern,
1625 const struct rte_flow_action *actions)
1627 struct rte_flow *flow;
1628 struct rte_port *port;
1629 struct port_flow *pf;
1631 struct rte_flow_error error;
1633 port = &ports[port_id];
1634 if (port->flow_list) {
1635 if (port->flow_list->id == UINT32_MAX) {
1636 printf("Highest rule ID is already assigned, delete"
1640 id = port->flow_list->id + 1;
1642 pf = port_flow_new(attr, pattern, actions, &error);
1644 return port_flow_complain(&error);
1645 update_age_action_context(actions, pf);
1646 /* Poisoning to make sure PMDs update it in case of error. */
1647 memset(&error, 0x22, sizeof(error));
1648 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1651 return port_flow_complain(&error);
1653 pf->next = port->flow_list;
1656 port->flow_list = pf;
1657 printf("Flow rule #%u created\n", pf->id);
1661 /** Destroy a number of flow rules. */
1663 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1665 struct rte_port *port;
1666 struct port_flow **tmp;
1670 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1671 port_id == (portid_t)RTE_PORT_ALL)
1673 port = &ports[port_id];
1674 tmp = &port->flow_list;
1678 for (i = 0; i != n; ++i) {
1679 struct rte_flow_error error;
1680 struct port_flow *pf = *tmp;
1682 if (rule[i] != pf->id)
1685 * Poisoning to make sure PMDs update it in case
1688 memset(&error, 0x33, sizeof(error));
1689 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1690 ret = port_flow_complain(&error);
1693 printf("Flow rule #%u destroyed\n", pf->id);
1699 tmp = &(*tmp)->next;
1705 /** Remove all flow rules. */
1707 port_flow_flush(portid_t port_id)
1709 struct rte_flow_error error;
1710 struct rte_port *port = &ports[port_id];
1713 if (port->flow_list == NULL)
1716 /* Poisoning to make sure PMDs update it in case of error. */
1717 memset(&error, 0x44, sizeof(error));
1718 if (rte_flow_flush(port_id, &error)) {
1719 ret = port_flow_complain(&error);
1720 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1721 port_id == (portid_t)RTE_PORT_ALL)
1725 while (port->flow_list) {
1726 struct port_flow *pf = port->flow_list->next;
1728 free(port->flow_list);
1729 port->flow_list = pf;
1734 /** Dump all flow rules. */
1736 port_flow_dump(portid_t port_id, const char *file_name)
1739 FILE *file = stdout;
1740 struct rte_flow_error error;
1742 if (file_name && strlen(file_name)) {
1743 file = fopen(file_name, "w");
1745 printf("Failed to create file %s: %s\n", file_name,
1750 ret = rte_flow_dev_dump(port_id, file, &error);
1752 port_flow_complain(&error);
1753 printf("Failed to dump flow: %s\n", strerror(-ret));
1755 printf("Flow dump finished\n");
1756 if (file_name && strlen(file_name))
1761 /** Query a flow rule. */
1763 port_flow_query(portid_t port_id, uint32_t rule,
1764 const struct rte_flow_action *action)
1766 struct rte_flow_error error;
1767 struct rte_port *port;
1768 struct port_flow *pf;
1771 struct rte_flow_query_count count;
1772 struct rte_flow_action_rss rss_conf;
1776 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1777 port_id == (portid_t)RTE_PORT_ALL)
1779 port = &ports[port_id];
1780 for (pf = port->flow_list; pf; pf = pf->next)
1784 printf("Flow rule #%u not found\n", rule);
1787 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1788 &name, sizeof(name),
1789 (void *)(uintptr_t)action->type, &error);
1791 return port_flow_complain(&error);
1792 switch (action->type) {
1793 case RTE_FLOW_ACTION_TYPE_COUNT:
1794 case RTE_FLOW_ACTION_TYPE_RSS:
1797 printf("Cannot query action type %d (%s)\n",
1798 action->type, name);
1801 /* Poisoning to make sure PMDs update it in case of error. */
1802 memset(&error, 0x55, sizeof(error));
1803 memset(&query, 0, sizeof(query));
1804 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1805 return port_flow_complain(&error);
1806 switch (action->type) {
1807 case RTE_FLOW_ACTION_TYPE_COUNT:
1811 " hits: %" PRIu64 "\n"
1812 " bytes: %" PRIu64 "\n",
1814 query.count.hits_set,
1815 query.count.bytes_set,
1819 case RTE_FLOW_ACTION_TYPE_RSS:
1820 rss_config_display(&query.rss_conf);
1823 printf("Cannot display result for action type %d (%s)\n",
1824 action->type, name);
1830 /** List simply and destroy all aged flows. */
1832 port_flow_aged(portid_t port_id, uint8_t destroy)
1835 int nb_context, total = 0, idx;
1836 struct rte_flow_error error;
1837 struct port_flow *pf;
1839 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1840 port_id == (portid_t)RTE_PORT_ALL)
1842 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
1843 printf("Port %u total aged flows: %d\n", port_id, total);
1845 port_flow_complain(&error);
1850 contexts = malloc(sizeof(void *) * total);
1851 if (contexts == NULL) {
1852 printf("Cannot allocate contexts for aged flow\n");
1855 printf("ID\tGroup\tPrio\tAttr\n");
1856 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
1857 if (nb_context != total) {
1858 printf("Port:%d get aged flows count(%d) != total(%d)\n",
1859 port_id, nb_context, total);
1863 for (idx = 0; idx < nb_context; idx++) {
1864 pf = (struct port_flow *)contexts[idx];
1866 printf("Error: get Null context in port %u\n", port_id);
1869 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t\n",
1871 pf->rule.attr->group,
1872 pf->rule.attr->priority,
1873 pf->rule.attr->ingress ? 'i' : '-',
1874 pf->rule.attr->egress ? 'e' : '-',
1875 pf->rule.attr->transfer ? 't' : '-');
1883 for (idx = 0; idx < nb_context; idx++) {
1884 pf = (struct port_flow *)contexts[idx];
1888 ret = port_flow_destroy(port_id, 1, &flow_id);
1892 printf("%d flows be destroyed\n", total);
1897 /** List flow rules. */
1899 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
1901 struct rte_port *port;
1902 struct port_flow *pf;
1903 struct port_flow *list = NULL;
1906 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1907 port_id == (portid_t)RTE_PORT_ALL)
1909 port = &ports[port_id];
1910 if (!port->flow_list)
1912 /* Sort flows by group, priority and ID. */
1913 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1914 struct port_flow **tmp;
1915 const struct rte_flow_attr *curr = pf->rule.attr;
1918 /* Filter out unwanted groups. */
1919 for (i = 0; i != n; ++i)
1920 if (curr->group == group[i])
1925 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1926 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1928 if (curr->group > comp->group ||
1929 (curr->group == comp->group &&
1930 curr->priority > comp->priority) ||
1931 (curr->group == comp->group &&
1932 curr->priority == comp->priority &&
1933 pf->id > (*tmp)->id))
1940 printf("ID\tGroup\tPrio\tAttr\tRule\n");
1941 for (pf = list; pf != NULL; pf = pf->tmp) {
1942 const struct rte_flow_item *item = pf->rule.pattern;
1943 const struct rte_flow_action *action = pf->rule.actions;
1946 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1948 pf->rule.attr->group,
1949 pf->rule.attr->priority,
1950 pf->rule.attr->ingress ? 'i' : '-',
1951 pf->rule.attr->egress ? 'e' : '-',
1952 pf->rule.attr->transfer ? 't' : '-');
1953 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1954 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1955 &name, sizeof(name),
1956 (void *)(uintptr_t)item->type,
1959 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1960 printf("%s ", name);
1964 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1965 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1966 &name, sizeof(name),
1967 (void *)(uintptr_t)action->type,
1970 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1971 printf(" %s", name);
1978 /** Restrict ingress traffic to the defined flow rules. */
1980 port_flow_isolate(portid_t port_id, int set)
1982 struct rte_flow_error error;
1984 /* Poisoning to make sure PMDs update it in case of error. */
1985 memset(&error, 0x66, sizeof(error));
1986 if (rte_flow_isolate(port_id, set, &error))
1987 return port_flow_complain(&error);
1988 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1990 set ? "now restricted" : "not restricted anymore");
1995 * RX/TX ring descriptors display functions.
1998 rx_queue_id_is_invalid(queueid_t rxq_id)
2000 if (rxq_id < nb_rxq)
2002 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
2007 tx_queue_id_is_invalid(queueid_t txq_id)
2009 if (txq_id < nb_txq)
2011 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
2016 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
2018 struct rte_port *port = &ports[port_id];
2019 struct rte_eth_rxq_info rx_qinfo;
2022 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
2024 *ring_size = rx_qinfo.nb_desc;
2028 if (ret != -ENOTSUP)
2031 * If the rte_eth_rx_queue_info_get is not support for this PMD,
2032 * ring_size stored in testpmd will be used for validity verification.
2033 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
2034 * being 0, it will use a default value provided by PMDs to setup this
2035 * rxq. If the default value is 0, it will use the
2036 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
2038 if (port->nb_rx_desc[rxq_id])
2039 *ring_size = port->nb_rx_desc[rxq_id];
2040 else if (port->dev_info.default_rxportconf.ring_size)
2041 *ring_size = port->dev_info.default_rxportconf.ring_size;
2043 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2048 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
2050 struct rte_port *port = &ports[port_id];
2051 struct rte_eth_txq_info tx_qinfo;
2054 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
2056 *ring_size = tx_qinfo.nb_desc;
2060 if (ret != -ENOTSUP)
2063 * If the rte_eth_tx_queue_info_get is not support for this PMD,
2064 * ring_size stored in testpmd will be used for validity verification.
2065 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
2066 * being 0, it will use a default value provided by PMDs to setup this
2067 * txq. If the default value is 0, it will use the
2068 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
2070 if (port->nb_tx_desc[txq_id])
2071 *ring_size = port->nb_tx_desc[txq_id];
2072 else if (port->dev_info.default_txportconf.ring_size)
2073 *ring_size = port->dev_info.default_txportconf.ring_size;
2075 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2080 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
2085 ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
2089 if (rxdesc_id < ring_size)
2092 printf("Invalid RX descriptor %u (must be < ring_size=%u)\n",
2093 rxdesc_id, ring_size);
2098 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
2103 ret = get_tx_ring_size(port_id, txq_id, &ring_size);
2107 if (txdesc_id < ring_size)
2110 printf("Invalid TX descriptor %u (must be < ring_size=%u)\n",
2111 txdesc_id, ring_size);
2115 static const struct rte_memzone *
2116 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
2118 char mz_name[RTE_MEMZONE_NAMESIZE];
2119 const struct rte_memzone *mz;
2121 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
2122 port_id, q_id, ring_name);
2123 mz = rte_memzone_lookup(mz_name);
2125 printf("%s ring memory zoneof (port %d, queue %d) not"
2126 "found (zone name = %s\n",
2127 ring_name, port_id, q_id, mz_name);
2131 union igb_ring_dword {
2134 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2144 struct igb_ring_desc_32_bytes {
2145 union igb_ring_dword lo_dword;
2146 union igb_ring_dword hi_dword;
2147 union igb_ring_dword resv1;
2148 union igb_ring_dword resv2;
2151 struct igb_ring_desc_16_bytes {
2152 union igb_ring_dword lo_dword;
2153 union igb_ring_dword hi_dword;
2157 ring_rxd_display_dword(union igb_ring_dword dword)
2159 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
2160 (unsigned)dword.words.hi);
2164 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
2165 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2168 __rte_unused portid_t port_id,
2172 struct igb_ring_desc_16_bytes *ring =
2173 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2174 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2176 struct rte_eth_dev_info dev_info;
2178 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2182 if (strstr(dev_info.driver_name, "i40e") != NULL) {
2183 /* 32 bytes RX descriptor, i40e only */
2184 struct igb_ring_desc_32_bytes *ring =
2185 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
2186 ring[desc_id].lo_dword.dword =
2187 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2188 ring_rxd_display_dword(ring[desc_id].lo_dword);
2189 ring[desc_id].hi_dword.dword =
2190 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2191 ring_rxd_display_dword(ring[desc_id].hi_dword);
2192 ring[desc_id].resv1.dword =
2193 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
2194 ring_rxd_display_dword(ring[desc_id].resv1);
2195 ring[desc_id].resv2.dword =
2196 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
2197 ring_rxd_display_dword(ring[desc_id].resv2);
2202 /* 16 bytes RX descriptor */
2203 ring[desc_id].lo_dword.dword =
2204 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2205 ring_rxd_display_dword(ring[desc_id].lo_dword);
2206 ring[desc_id].hi_dword.dword =
2207 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2208 ring_rxd_display_dword(ring[desc_id].hi_dword);
2212 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
2214 struct igb_ring_desc_16_bytes *ring;
2215 struct igb_ring_desc_16_bytes txd;
2217 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2218 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2219 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2220 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
2221 (unsigned)txd.lo_dword.words.lo,
2222 (unsigned)txd.lo_dword.words.hi,
2223 (unsigned)txd.hi_dword.words.lo,
2224 (unsigned)txd.hi_dword.words.hi);
2228 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
2230 const struct rte_memzone *rx_mz;
2232 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
2234 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
2237 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
2241 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
2243 const struct rte_memzone *tx_mz;
2245 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
2247 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
2250 ring_tx_descriptor_display(tx_mz, txd_id);
2254 fwd_lcores_config_display(void)
2258 printf("List of forwarding lcores:");
2259 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
2260 printf(" %2u", fwd_lcores_cpuids[lc_id]);
2264 rxtx_config_display(void)
2269 printf(" %s packet forwarding%s packets/burst=%d\n",
2270 cur_fwd_eng->fwd_mode_name,
2271 retry_enabled == 0 ? "" : " with retry",
2274 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
2275 printf(" packet len=%u - nb packet segments=%d\n",
2276 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
2278 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
2279 nb_fwd_lcores, nb_fwd_ports);
2281 RTE_ETH_FOREACH_DEV(pid) {
2282 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
2283 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
2284 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
2285 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
2286 struct rte_eth_rxq_info rx_qinfo;
2287 struct rte_eth_txq_info tx_qinfo;
2288 uint16_t rx_free_thresh_tmp;
2289 uint16_t tx_free_thresh_tmp;
2290 uint16_t tx_rs_thresh_tmp;
2291 uint16_t nb_rx_desc_tmp;
2292 uint16_t nb_tx_desc_tmp;
2293 uint64_t offloads_tmp;
2294 uint8_t pthresh_tmp;
2295 uint8_t hthresh_tmp;
2296 uint8_t wthresh_tmp;
2299 /* per port config */
2300 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
2301 (unsigned int)pid, nb_rxq, nb_txq);
2303 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
2304 ports[pid].dev_conf.rxmode.offloads,
2305 ports[pid].dev_conf.txmode.offloads);
2307 /* per rx queue config only for first queue to be less verbose */
2308 for (qid = 0; qid < 1; qid++) {
2309 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
2311 nb_rx_desc_tmp = nb_rx_desc[qid];
2312 rx_free_thresh_tmp =
2313 rx_conf[qid].rx_free_thresh;
2314 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
2315 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
2316 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
2317 offloads_tmp = rx_conf[qid].offloads;
2319 nb_rx_desc_tmp = rx_qinfo.nb_desc;
2320 rx_free_thresh_tmp =
2321 rx_qinfo.conf.rx_free_thresh;
2322 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
2323 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
2324 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
2325 offloads_tmp = rx_qinfo.conf.offloads;
2328 printf(" RX queue: %d\n", qid);
2329 printf(" RX desc=%d - RX free threshold=%d\n",
2330 nb_rx_desc_tmp, rx_free_thresh_tmp);
2331 printf(" RX threshold registers: pthresh=%d hthresh=%d "
2333 pthresh_tmp, hthresh_tmp, wthresh_tmp);
2334 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp);
2337 /* per tx queue config only for first queue to be less verbose */
2338 for (qid = 0; qid < 1; qid++) {
2339 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
2341 nb_tx_desc_tmp = nb_tx_desc[qid];
2342 tx_free_thresh_tmp =
2343 tx_conf[qid].tx_free_thresh;
2344 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
2345 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
2346 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
2347 offloads_tmp = tx_conf[qid].offloads;
2348 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
2350 nb_tx_desc_tmp = tx_qinfo.nb_desc;
2351 tx_free_thresh_tmp =
2352 tx_qinfo.conf.tx_free_thresh;
2353 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
2354 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
2355 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
2356 offloads_tmp = tx_qinfo.conf.offloads;
2357 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
2360 printf(" TX queue: %d\n", qid);
2361 printf(" TX desc=%d - TX free threshold=%d\n",
2362 nb_tx_desc_tmp, tx_free_thresh_tmp);
2363 printf(" TX threshold registers: pthresh=%d hthresh=%d "
2365 pthresh_tmp, hthresh_tmp, wthresh_tmp);
2366 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
2367 offloads_tmp, tx_rs_thresh_tmp);
2373 port_rss_reta_info(portid_t port_id,
2374 struct rte_eth_rss_reta_entry64 *reta_conf,
2375 uint16_t nb_entries)
2377 uint16_t i, idx, shift;
2380 if (port_id_is_invalid(port_id, ENABLED_WARN))
2383 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
2385 printf("Failed to get RSS RETA info, return code = %d\n", ret);
2389 for (i = 0; i < nb_entries; i++) {
2390 idx = i / RTE_RETA_GROUP_SIZE;
2391 shift = i % RTE_RETA_GROUP_SIZE;
2392 if (!(reta_conf[idx].mask & (1ULL << shift)))
2394 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
2395 i, reta_conf[idx].reta[shift]);
2400 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
2404 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
2406 struct rte_eth_rss_conf rss_conf = {0};
2407 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
2411 struct rte_eth_dev_info dev_info;
2412 uint8_t hash_key_size;
2415 if (port_id_is_invalid(port_id, ENABLED_WARN))
2418 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2422 if (dev_info.hash_key_size > 0 &&
2423 dev_info.hash_key_size <= sizeof(rss_key))
2424 hash_key_size = dev_info.hash_key_size;
2426 printf("dev_info did not provide a valid hash key size\n");
2430 /* Get RSS hash key if asked to display it */
2431 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
2432 rss_conf.rss_key_len = hash_key_size;
2433 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2437 printf("port index %d invalid\n", port_id);
2440 printf("operation not supported by device\n");
2443 printf("operation failed - diag=%d\n", diag);
2448 rss_hf = rss_conf.rss_hf;
2450 printf("RSS disabled\n");
2453 printf("RSS functions:\n ");
2454 for (i = 0; rss_type_table[i].str; i++) {
2455 if (rss_hf & rss_type_table[i].rss_type)
2456 printf("%s ", rss_type_table[i].str);
2461 printf("RSS key:\n");
2462 for (i = 0; i < hash_key_size; i++)
2463 printf("%02X", rss_key[i]);
2468 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
2471 struct rte_eth_rss_conf rss_conf;
2475 rss_conf.rss_key = NULL;
2476 rss_conf.rss_key_len = hash_key_len;
2477 rss_conf.rss_hf = 0;
2478 for (i = 0; rss_type_table[i].str; i++) {
2479 if (!strcmp(rss_type_table[i].str, rss_type))
2480 rss_conf.rss_hf = rss_type_table[i].rss_type;
2482 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2484 rss_conf.rss_key = hash_key;
2485 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
2492 printf("port index %d invalid\n", port_id);
2495 printf("operation not supported by device\n");
2498 printf("operation failed - diag=%d\n", diag);
2504 * Setup forwarding configuration for each logical core.
2507 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
2509 streamid_t nb_fs_per_lcore;
2517 nb_fs = cfg->nb_fwd_streams;
2518 nb_fc = cfg->nb_fwd_lcores;
2519 if (nb_fs <= nb_fc) {
2520 nb_fs_per_lcore = 1;
2523 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2524 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2527 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2529 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2530 fwd_lcores[lc_id]->stream_idx = sm_id;
2531 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2532 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2536 * Assign extra remaining streams, if any.
2538 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2539 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2540 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2541 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2542 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2547 fwd_topology_tx_port_get(portid_t rxp)
2549 static int warning_once = 1;
2551 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2553 switch (port_topology) {
2555 case PORT_TOPOLOGY_PAIRED:
2556 if ((rxp & 0x1) == 0) {
2557 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2560 printf("\nWarning! port-topology=paired"
2561 " and odd forward ports number,"
2562 " the last port will pair with"
2569 case PORT_TOPOLOGY_CHAINED:
2570 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2571 case PORT_TOPOLOGY_LOOP:
2577 simple_fwd_config_setup(void)
2581 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2582 cur_fwd_config.nb_fwd_streams =
2583 (streamid_t) cur_fwd_config.nb_fwd_ports;
2585 /* reinitialize forwarding streams */
2589 * In the simple forwarding test, the number of forwarding cores
2590 * must be lower or equal to the number of forwarding ports.
2592 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2593 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2594 cur_fwd_config.nb_fwd_lcores =
2595 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2596 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2598 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2599 fwd_streams[i]->rx_port = fwd_ports_ids[i];
2600 fwd_streams[i]->rx_queue = 0;
2601 fwd_streams[i]->tx_port =
2602 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2603 fwd_streams[i]->tx_queue = 0;
2604 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2605 fwd_streams[i]->retry_enabled = retry_enabled;
2610 * For the RSS forwarding test all streams distributed over lcores. Each stream
2611 * being composed of a RX queue to poll on a RX port for input messages,
2612 * associated with a TX queue of a TX port where to send forwarded packets.
2615 rss_fwd_config_setup(void)
2626 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2627 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2628 cur_fwd_config.nb_fwd_streams =
2629 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2631 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2632 cur_fwd_config.nb_fwd_lcores =
2633 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2635 /* reinitialize forwarding streams */
2638 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2640 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2641 struct fwd_stream *fs;
2643 fs = fwd_streams[sm_id];
2644 txp = fwd_topology_tx_port_get(rxp);
2645 fs->rx_port = fwd_ports_ids[rxp];
2647 fs->tx_port = fwd_ports_ids[txp];
2649 fs->peer_addr = fs->tx_port;
2650 fs->retry_enabled = retry_enabled;
2652 if (rxp < nb_fwd_ports)
2660 * For the DCB forwarding test, each core is assigned on each traffic class.
2662 * Each core is assigned a multi-stream, each stream being composed of
2663 * a RX queue to poll on a RX port for input messages, associated with
2664 * a TX queue of a TX port where to send forwarded packets. All RX and
2665 * TX queues are mapping to the same traffic class.
2666 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2670 dcb_fwd_config_setup(void)
2672 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2673 portid_t txp, rxp = 0;
2674 queueid_t txq, rxq = 0;
2676 uint16_t nb_rx_queue, nb_tx_queue;
2677 uint16_t i, j, k, sm_id = 0;
2680 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2681 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2682 cur_fwd_config.nb_fwd_streams =
2683 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2685 /* reinitialize forwarding streams */
2689 /* get the dcb info on the first RX and TX ports */
2690 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2691 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2693 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2694 fwd_lcores[lc_id]->stream_nb = 0;
2695 fwd_lcores[lc_id]->stream_idx = sm_id;
2696 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2697 /* if the nb_queue is zero, means this tc is
2698 * not enabled on the POOL
2700 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2702 k = fwd_lcores[lc_id]->stream_nb +
2703 fwd_lcores[lc_id]->stream_idx;
2704 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2705 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2706 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2707 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2708 for (j = 0; j < nb_rx_queue; j++) {
2709 struct fwd_stream *fs;
2711 fs = fwd_streams[k + j];
2712 fs->rx_port = fwd_ports_ids[rxp];
2713 fs->rx_queue = rxq + j;
2714 fs->tx_port = fwd_ports_ids[txp];
2715 fs->tx_queue = txq + j % nb_tx_queue;
2716 fs->peer_addr = fs->tx_port;
2717 fs->retry_enabled = retry_enabled;
2719 fwd_lcores[lc_id]->stream_nb +=
2720 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2722 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2725 if (tc < rxp_dcb_info.nb_tcs)
2727 /* Restart from TC 0 on next RX port */
2729 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2731 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2734 if (rxp >= nb_fwd_ports)
2736 /* get the dcb information on next RX and TX ports */
2737 if ((rxp & 0x1) == 0)
2738 txp = (portid_t) (rxp + 1);
2740 txp = (portid_t) (rxp - 1);
2741 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2742 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2747 icmp_echo_config_setup(void)
2754 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2755 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2756 (nb_txq * nb_fwd_ports);
2758 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2759 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2760 cur_fwd_config.nb_fwd_streams =
2761 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2762 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2763 cur_fwd_config.nb_fwd_lcores =
2764 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2765 if (verbose_level > 0) {
2766 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2768 cur_fwd_config.nb_fwd_lcores,
2769 cur_fwd_config.nb_fwd_ports,
2770 cur_fwd_config.nb_fwd_streams);
2773 /* reinitialize forwarding streams */
2775 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2777 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2778 if (verbose_level > 0)
2779 printf(" core=%d: \n", lc_id);
2780 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2781 struct fwd_stream *fs;
2782 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2783 fs->rx_port = fwd_ports_ids[rxp];
2785 fs->tx_port = fs->rx_port;
2787 fs->peer_addr = fs->tx_port;
2788 fs->retry_enabled = retry_enabled;
2789 if (verbose_level > 0)
2790 printf(" stream=%d port=%d rxq=%d txq=%d\n",
2791 sm_id, fs->rx_port, fs->rx_queue,
2793 rxq = (queueid_t) (rxq + 1);
2794 if (rxq == nb_rxq) {
2796 rxp = (portid_t) (rxp + 1);
2803 fwd_config_setup(void)
2805 cur_fwd_config.fwd_eng = cur_fwd_eng;
2806 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2807 icmp_echo_config_setup();
2811 if ((nb_rxq > 1) && (nb_txq > 1)){
2813 dcb_fwd_config_setup();
2815 rss_fwd_config_setup();
2818 simple_fwd_config_setup();
2822 mp_alloc_to_str(uint8_t mode)
2825 case MP_ALLOC_NATIVE:
2831 case MP_ALLOC_XMEM_HUGE:
2841 pkt_fwd_config_display(struct fwd_config *cfg)
2843 struct fwd_stream *fs;
2847 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2848 "NUMA support %s, MP allocation mode: %s\n",
2849 cfg->fwd_eng->fwd_mode_name,
2850 retry_enabled == 0 ? "" : " with retry",
2851 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2852 numa_support == 1 ? "enabled" : "disabled",
2853 mp_alloc_to_str(mp_alloc_type));
2856 printf("TX retry num: %u, delay between TX retries: %uus\n",
2857 burst_tx_retry_num, burst_tx_delay_time);
2858 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2859 printf("Logical Core %u (socket %u) forwards packets on "
2861 fwd_lcores_cpuids[lc_id],
2862 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2863 fwd_lcores[lc_id]->stream_nb);
2864 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2865 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2866 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
2867 "P=%d/Q=%d (socket %u) ",
2868 fs->rx_port, fs->rx_queue,
2869 ports[fs->rx_port].socket_id,
2870 fs->tx_port, fs->tx_queue,
2871 ports[fs->tx_port].socket_id);
2872 print_ethaddr("peer=",
2873 &peer_eth_addrs[fs->peer_addr]);
2881 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2883 struct rte_ether_addr new_peer_addr;
2884 if (!rte_eth_dev_is_valid_port(port_id)) {
2885 printf("Error: Invalid port number %i\n", port_id);
2888 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
2889 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2892 peer_eth_addrs[port_id] = new_peer_addr;
2896 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2899 unsigned int lcore_cpuid;
2904 for (i = 0; i < nb_lc; i++) {
2905 lcore_cpuid = lcorelist[i];
2906 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2907 printf("lcore %u not enabled\n", lcore_cpuid);
2910 if (lcore_cpuid == rte_get_master_lcore()) {
2911 printf("lcore %u cannot be masked on for running "
2912 "packet forwarding, which is the master lcore "
2913 "and reserved for command line parsing only\n",
2918 fwd_lcores_cpuids[i] = lcore_cpuid;
2920 if (record_now == 0) {
2924 nb_cfg_lcores = (lcoreid_t) nb_lc;
2925 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2926 printf("previous number of forwarding cores %u - changed to "
2927 "number of configured cores %u\n",
2928 (unsigned int) nb_fwd_lcores, nb_lc);
2929 nb_fwd_lcores = (lcoreid_t) nb_lc;
2936 set_fwd_lcores_mask(uint64_t lcoremask)
2938 unsigned int lcorelist[64];
2942 if (lcoremask == 0) {
2943 printf("Invalid NULL mask of cores\n");
2947 for (i = 0; i < 64; i++) {
2948 if (! ((uint64_t)(1ULL << i) & lcoremask))
2950 lcorelist[nb_lc++] = i;
2952 return set_fwd_lcores_list(lcorelist, nb_lc);
2956 set_fwd_lcores_number(uint16_t nb_lc)
2958 if (nb_lc > nb_cfg_lcores) {
2959 printf("nb fwd cores %u > %u (max. number of configured "
2960 "lcores) - ignored\n",
2961 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2964 nb_fwd_lcores = (lcoreid_t) nb_lc;
2965 printf("Number of forwarding cores set to %u\n",
2966 (unsigned int) nb_fwd_lcores);
2970 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2978 for (i = 0; i < nb_pt; i++) {
2979 port_id = (portid_t) portlist[i];
2980 if (port_id_is_invalid(port_id, ENABLED_WARN))
2983 fwd_ports_ids[i] = port_id;
2985 if (record_now == 0) {
2989 nb_cfg_ports = (portid_t) nb_pt;
2990 if (nb_fwd_ports != (portid_t) nb_pt) {
2991 printf("previous number of forwarding ports %u - changed to "
2992 "number of configured ports %u\n",
2993 (unsigned int) nb_fwd_ports, nb_pt);
2994 nb_fwd_ports = (portid_t) nb_pt;
2999 * Parse the user input and obtain the list of forwarding ports
3002 * String containing the user input. User can specify
3003 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
3004 * For example, if the user wants to use all the available
3005 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3.
3006 * If the user wants to use only the ports 1,2 then the input
3008 * valid characters are '-' and ','
3009 * @param[out] values
3010 * This array will be filled with a list of port IDs
3011 * based on the user input
3012 * Note that duplicate entries are discarded and only the first
3013 * count entries in this array are port IDs and all the rest
3014 * will contain default values
3015 * @param[in] maxsize
3016 * This parameter denotes 2 things
3017 * 1) Number of elements in the values array
3018 * 2) Maximum value of each element in the values array
3020 * On success, returns total count of parsed port IDs
3021 * On failure, returns 0
3024 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
3026 unsigned int count = 0;
3030 unsigned int marked[maxsize];
3032 if (list == NULL || values == NULL)
3035 for (i = 0; i < (int)maxsize; i++)
3041 /*Remove the blank spaces if any*/
3042 while (isblank(*list))
3047 value = strtol(list, &end, 10);
3048 if (errno || end == NULL)
3050 if (value < 0 || value >= (int)maxsize)
3052 while (isblank(*end))
3054 if (*end == '-' && min == INT_MAX) {
3056 } else if ((*end == ',') || (*end == '\0')) {
3060 for (i = min; i <= max; i++) {
3061 if (count < maxsize) {
3073 } while (*end != '\0');
3079 parse_fwd_portlist(const char *portlist)
3081 unsigned int portcount;
3082 unsigned int portindex[RTE_MAX_ETHPORTS];
3083 unsigned int i, valid_port_count = 0;
3085 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
3087 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
3090 * Here we verify the validity of the ports
3091 * and thereby calculate the total number of
3094 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
3095 if (rte_eth_dev_is_valid_port(portindex[i])) {
3096 portindex[valid_port_count] = portindex[i];
3101 set_fwd_ports_list(portindex, valid_port_count);
3105 set_fwd_ports_mask(uint64_t portmask)
3107 unsigned int portlist[64];
3111 if (portmask == 0) {
3112 printf("Invalid NULL mask of ports\n");
3116 RTE_ETH_FOREACH_DEV(i) {
3117 if (! ((uint64_t)(1ULL << i) & portmask))
3119 portlist[nb_pt++] = i;
3121 set_fwd_ports_list(portlist, nb_pt);
3125 set_fwd_ports_number(uint16_t nb_pt)
3127 if (nb_pt > nb_cfg_ports) {
3128 printf("nb fwd ports %u > %u (number of configured "
3129 "ports) - ignored\n",
3130 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
3133 nb_fwd_ports = (portid_t) nb_pt;
3134 printf("Number of forwarding ports set to %u\n",
3135 (unsigned int) nb_fwd_ports);
3139 port_is_forwarding(portid_t port_id)
3143 if (port_id_is_invalid(port_id, ENABLED_WARN))
3146 for (i = 0; i < nb_fwd_ports; i++) {
3147 if (fwd_ports_ids[i] == port_id)
3155 set_nb_pkt_per_burst(uint16_t nb)
3157 if (nb > MAX_PKT_BURST) {
3158 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
3160 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
3163 nb_pkt_per_burst = nb;
3164 printf("Number of packets per burst set to %u\n",
3165 (unsigned int) nb_pkt_per_burst);
3169 tx_split_get_name(enum tx_pkt_split split)
3173 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3174 if (tx_split_name[i].split == split)
3175 return tx_split_name[i].name;
3181 set_tx_pkt_split(const char *name)
3185 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3186 if (strcmp(tx_split_name[i].name, name) == 0) {
3187 tx_pkt_split = tx_split_name[i].split;
3191 printf("unknown value: \"%s\"\n", name);
3195 show_tx_pkt_segments(void)
3201 split = tx_split_get_name(tx_pkt_split);
3203 printf("Number of segments: %u\n", n);
3204 printf("Segment sizes: ");
3205 for (i = 0; i != n - 1; i++)
3206 printf("%hu,", tx_pkt_seg_lengths[i]);
3207 printf("%hu\n", tx_pkt_seg_lengths[i]);
3208 printf("Split packet: %s\n", split);
3212 nb_segs_is_invalid(unsigned int nb_segs)
3219 RTE_ETH_FOREACH_DEV(port_id) {
3220 for (queue_id = 0; queue_id < nb_txq; queue_id++) {
3221 ret = get_tx_ring_size(port_id, queue_id, &ring_size);
3226 if (ring_size < nb_segs) {
3227 printf("nb segments per TX packets=%u >= "
3228 "TX queue(%u) ring_size=%u - ignored\n",
3229 nb_segs, queue_id, ring_size);
3239 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
3241 uint16_t tx_pkt_len;
3244 if (nb_segs_is_invalid(nb_segs))
3248 * Check that each segment length is greater or equal than
3249 * the mbuf data sise.
3250 * Check also that the total packet length is greater or equal than the
3251 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
3255 for (i = 0; i < nb_segs; i++) {
3256 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
3257 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
3258 i, seg_lengths[i], (unsigned) mbuf_data_size);
3261 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
3263 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
3264 printf("total packet length=%u < %d - give up\n",
3265 (unsigned) tx_pkt_len,
3266 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
3270 for (i = 0; i < nb_segs; i++)
3271 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
3273 tx_pkt_length = tx_pkt_len;
3274 tx_pkt_nb_segs = (uint8_t) nb_segs;
3278 show_tx_pkt_times(void)
3280 printf("Interburst gap: %u\n", tx_pkt_times_inter);
3281 printf("Intraburst gap: %u\n", tx_pkt_times_intra);
3285 set_tx_pkt_times(unsigned int *tx_times)
3288 int offload_found = 0;
3292 static const struct rte_mbuf_dynfield desc_offs = {
3293 .name = RTE_MBUF_DYNFIELD_TIMESTAMP_NAME,
3294 .size = sizeof(uint64_t),
3295 .align = __alignof__(uint64_t),
3297 static const struct rte_mbuf_dynflag desc_flag = {
3298 .name = RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME,
3301 RTE_ETH_FOREACH_DEV(port_id) {
3302 struct rte_eth_dev_info dev_info = { 0 };
3305 ret = rte_eth_dev_info_get(port_id, &dev_info);
3306 if (ret == 0 && dev_info.tx_offload_capa &
3307 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) {
3312 if (!offload_found) {
3313 printf("No device supporting Tx timestamp scheduling found, "
3314 "dynamic flag and field not registered\n");
3317 offset = rte_mbuf_dynfield_register(&desc_offs);
3318 if (offset < 0 && rte_errno != EEXIST)
3319 printf("Dynamic timestamp field registration error: %d",
3321 flag = rte_mbuf_dynflag_register(&desc_flag);
3322 if (flag < 0 && rte_errno != EEXIST)
3323 printf("Dynamic timestamp flag registration error: %d",
3325 tx_pkt_times_inter = tx_times[0];
3326 tx_pkt_times_intra = tx_times[1];
3330 setup_gro(const char *onoff, portid_t port_id)
3332 if (!rte_eth_dev_is_valid_port(port_id)) {
3333 printf("invalid port id %u\n", port_id);
3336 if (test_done == 0) {
3337 printf("Before enable/disable GRO,"
3338 " please stop forwarding first\n");
3341 if (strcmp(onoff, "on") == 0) {
3342 if (gro_ports[port_id].enable != 0) {
3343 printf("Port %u has enabled GRO. Please"
3344 " disable GRO first\n", port_id);
3347 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3348 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
3349 gro_ports[port_id].param.max_flow_num =
3350 GRO_DEFAULT_FLOW_NUM;
3351 gro_ports[port_id].param.max_item_per_flow =
3352 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
3354 gro_ports[port_id].enable = 1;
3356 if (gro_ports[port_id].enable == 0) {
3357 printf("Port %u has disabled GRO\n", port_id);
3360 gro_ports[port_id].enable = 0;
3365 setup_gro_flush_cycles(uint8_t cycles)
3367 if (test_done == 0) {
3368 printf("Before change flush interval for GRO,"
3369 " please stop forwarding first.\n");
3373 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
3374 GRO_DEFAULT_FLUSH_CYCLES) {
3375 printf("The flushing cycle be in the range"
3376 " of 1 to %u. Revert to the default"
3378 GRO_MAX_FLUSH_CYCLES,
3379 GRO_DEFAULT_FLUSH_CYCLES);
3380 cycles = GRO_DEFAULT_FLUSH_CYCLES;
3383 gro_flush_cycles = cycles;
3387 show_gro(portid_t port_id)
3389 struct rte_gro_param *param;
3390 uint32_t max_pkts_num;
3392 param = &gro_ports[port_id].param;
3394 if (!rte_eth_dev_is_valid_port(port_id)) {
3395 printf("Invalid port id %u.\n", port_id);
3398 if (gro_ports[port_id].enable) {
3399 printf("GRO type: TCP/IPv4\n");
3400 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3401 max_pkts_num = param->max_flow_num *
3402 param->max_item_per_flow;
3404 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
3405 printf("Max number of packets to perform GRO: %u\n",
3407 printf("Flushing cycles: %u\n", gro_flush_cycles);
3409 printf("Port %u doesn't enable GRO.\n", port_id);
3413 setup_gso(const char *mode, portid_t port_id)
3415 if (!rte_eth_dev_is_valid_port(port_id)) {
3416 printf("invalid port id %u\n", port_id);
3419 if (strcmp(mode, "on") == 0) {
3420 if (test_done == 0) {
3421 printf("before enabling GSO,"
3422 " please stop forwarding first\n");
3425 gso_ports[port_id].enable = 1;
3426 } else if (strcmp(mode, "off") == 0) {
3427 if (test_done == 0) {
3428 printf("before disabling GSO,"
3429 " please stop forwarding first\n");
3432 gso_ports[port_id].enable = 0;
3437 list_pkt_forwarding_modes(void)
3439 static char fwd_modes[128] = "";
3440 const char *separator = "|";
3441 struct fwd_engine *fwd_eng;
3444 if (strlen (fwd_modes) == 0) {
3445 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3446 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3447 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3448 strncat(fwd_modes, separator,
3449 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3451 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3458 list_pkt_forwarding_retry_modes(void)
3460 static char fwd_modes[128] = "";
3461 const char *separator = "|";
3462 struct fwd_engine *fwd_eng;
3465 if (strlen(fwd_modes) == 0) {
3466 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3467 if (fwd_eng == &rx_only_engine)
3469 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3471 strlen(fwd_modes) - 1);
3472 strncat(fwd_modes, separator,
3474 strlen(fwd_modes) - 1);
3476 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3483 set_pkt_forwarding_mode(const char *fwd_mode_name)
3485 struct fwd_engine *fwd_eng;
3489 while ((fwd_eng = fwd_engines[i]) != NULL) {
3490 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
3491 printf("Set %s packet forwarding mode%s\n",
3493 retry_enabled == 0 ? "" : " with retry");
3494 cur_fwd_eng = fwd_eng;
3499 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
3503 add_rx_dump_callbacks(portid_t portid)
3505 struct rte_eth_dev_info dev_info;
3509 if (port_id_is_invalid(portid, ENABLED_WARN))
3512 ret = eth_dev_info_get_print_err(portid, &dev_info);
3516 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3517 if (!ports[portid].rx_dump_cb[queue])
3518 ports[portid].rx_dump_cb[queue] =
3519 rte_eth_add_rx_callback(portid, queue,
3520 dump_rx_pkts, NULL);
3524 add_tx_dump_callbacks(portid_t portid)
3526 struct rte_eth_dev_info dev_info;
3530 if (port_id_is_invalid(portid, ENABLED_WARN))
3533 ret = eth_dev_info_get_print_err(portid, &dev_info);
3537 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3538 if (!ports[portid].tx_dump_cb[queue])
3539 ports[portid].tx_dump_cb[queue] =
3540 rte_eth_add_tx_callback(portid, queue,
3541 dump_tx_pkts, NULL);
3545 remove_rx_dump_callbacks(portid_t portid)
3547 struct rte_eth_dev_info dev_info;
3551 if (port_id_is_invalid(portid, ENABLED_WARN))
3554 ret = eth_dev_info_get_print_err(portid, &dev_info);
3558 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3559 if (ports[portid].rx_dump_cb[queue]) {
3560 rte_eth_remove_rx_callback(portid, queue,
3561 ports[portid].rx_dump_cb[queue]);
3562 ports[portid].rx_dump_cb[queue] = NULL;
3567 remove_tx_dump_callbacks(portid_t portid)
3569 struct rte_eth_dev_info dev_info;
3573 if (port_id_is_invalid(portid, ENABLED_WARN))
3576 ret = eth_dev_info_get_print_err(portid, &dev_info);
3580 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3581 if (ports[portid].tx_dump_cb[queue]) {
3582 rte_eth_remove_tx_callback(portid, queue,
3583 ports[portid].tx_dump_cb[queue]);
3584 ports[portid].tx_dump_cb[queue] = NULL;
3589 configure_rxtx_dump_callbacks(uint16_t verbose)
3593 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3594 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
3598 RTE_ETH_FOREACH_DEV(portid)
3600 if (verbose == 1 || verbose > 2)
3601 add_rx_dump_callbacks(portid);
3603 remove_rx_dump_callbacks(portid);
3605 add_tx_dump_callbacks(portid);
3607 remove_tx_dump_callbacks(portid);
3612 set_verbose_level(uint16_t vb_level)
3614 printf("Change verbose level from %u to %u\n",
3615 (unsigned int) verbose_level, (unsigned int) vb_level);
3616 verbose_level = vb_level;
3617 configure_rxtx_dump_callbacks(verbose_level);
3621 vlan_extend_set(portid_t port_id, int on)
3625 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3627 if (port_id_is_invalid(port_id, ENABLED_WARN))
3630 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3633 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
3634 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3636 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
3637 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3640 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3642 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
3643 "diag=%d\n", port_id, on, diag);
3646 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3650 rx_vlan_strip_set(portid_t port_id, int on)
3654 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3656 if (port_id_is_invalid(port_id, ENABLED_WARN))
3659 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3662 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
3663 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3665 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
3666 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3669 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3671 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
3672 "diag=%d\n", port_id, on, diag);
3675 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3679 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
3683 if (port_id_is_invalid(port_id, ENABLED_WARN))
3686 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
3688 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
3689 "diag=%d\n", port_id, queue_id, on, diag);
3693 rx_vlan_filter_set(portid_t port_id, int on)
3697 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3699 if (port_id_is_invalid(port_id, ENABLED_WARN))
3702 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3705 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
3706 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3708 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
3709 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3712 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3714 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
3715 "diag=%d\n", port_id, on, diag);
3718 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3722 rx_vlan_qinq_strip_set(portid_t port_id, int on)
3726 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3728 if (port_id_is_invalid(port_id, ENABLED_WARN))
3731 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3734 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
3735 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3737 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
3738 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3741 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3743 printf("%s(port_pi=%d, on=%d) failed "
3744 "diag=%d\n", __func__, port_id, on, diag);
3747 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3751 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
3755 if (port_id_is_invalid(port_id, ENABLED_WARN))
3757 if (vlan_id_is_invalid(vlan_id))
3759 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
3762 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
3764 port_id, vlan_id, on, diag);
3769 rx_vlan_all_filter_set(portid_t port_id, int on)
3773 if (port_id_is_invalid(port_id, ENABLED_WARN))
3775 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
3776 if (rx_vft_set(port_id, vlan_id, on))
3782 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
3786 if (port_id_is_invalid(port_id, ENABLED_WARN))
3789 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
3793 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
3795 port_id, vlan_type, tp_id, diag);
3799 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
3801 struct rte_eth_dev_info dev_info;
3804 if (vlan_id_is_invalid(vlan_id))
3807 if (ports[port_id].dev_conf.txmode.offloads &
3808 DEV_TX_OFFLOAD_QINQ_INSERT) {
3809 printf("Error, as QinQ has been enabled.\n");
3813 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3817 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
3818 printf("Error: vlan insert is not supported by port %d\n",
3823 tx_vlan_reset(port_id);
3824 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
3825 ports[port_id].tx_vlan_id = vlan_id;
3829 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
3831 struct rte_eth_dev_info dev_info;
3834 if (vlan_id_is_invalid(vlan_id))
3836 if (vlan_id_is_invalid(vlan_id_outer))
3839 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3843 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
3844 printf("Error: qinq insert not supported by port %d\n",
3849 tx_vlan_reset(port_id);
3850 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
3851 DEV_TX_OFFLOAD_QINQ_INSERT);
3852 ports[port_id].tx_vlan_id = vlan_id;
3853 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
3857 tx_vlan_reset(portid_t port_id)
3859 ports[port_id].dev_conf.txmode.offloads &=
3860 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
3861 DEV_TX_OFFLOAD_QINQ_INSERT);
3862 ports[port_id].tx_vlan_id = 0;
3863 ports[port_id].tx_vlan_id_outer = 0;
3867 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
3869 if (port_id_is_invalid(port_id, ENABLED_WARN))
3872 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
3876 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
3879 uint8_t existing_mapping_found = 0;
3881 if (port_id_is_invalid(port_id, ENABLED_WARN))
3884 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
3887 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
3888 printf("map_value not in required range 0..%d\n",
3889 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
3893 if (!is_rx) { /*then tx*/
3894 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3895 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3896 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
3897 tx_queue_stats_mappings[i].stats_counter_id = map_value;
3898 existing_mapping_found = 1;
3902 if (!existing_mapping_found) { /* A new additional mapping... */
3903 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
3904 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
3905 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
3906 nb_tx_queue_stats_mappings++;
3910 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3911 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3912 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
3913 rx_queue_stats_mappings[i].stats_counter_id = map_value;
3914 existing_mapping_found = 1;
3918 if (!existing_mapping_found) { /* A new additional mapping... */
3919 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
3920 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
3921 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
3922 nb_rx_queue_stats_mappings++;
3928 set_xstats_hide_zero(uint8_t on_off)
3930 xstats_hide_zero = on_off;
3934 set_record_core_cycles(uint8_t on_off)
3936 record_core_cycles = on_off;
3940 set_record_burst_stats(uint8_t on_off)
3942 record_burst_stats = on_off;
3946 print_fdir_mask(struct rte_eth_fdir_masks *mask)
3948 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
3950 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3951 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
3952 " tunnel_id: 0x%08x",
3953 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
3954 rte_be_to_cpu_32(mask->tunnel_id_mask));
3955 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3956 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
3957 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
3958 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
3960 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
3961 rte_be_to_cpu_16(mask->src_port_mask),
3962 rte_be_to_cpu_16(mask->dst_port_mask));
3964 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3965 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
3966 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
3967 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
3968 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
3970 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3971 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
3972 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
3973 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
3974 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
3981 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3983 struct rte_eth_flex_payload_cfg *cfg;
3986 for (i = 0; i < flex_conf->nb_payloads; i++) {
3987 cfg = &flex_conf->flex_set[i];
3988 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3990 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3991 printf("\n L2_PAYLOAD: ");
3992 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3993 printf("\n L3_PAYLOAD: ");
3994 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3995 printf("\n L4_PAYLOAD: ");
3997 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
3998 for (j = 0; j < num; j++)
3999 printf(" %-5u", cfg->src_offset[j]);
4005 flowtype_to_str(uint16_t flow_type)
4007 struct flow_type_info {
4013 static struct flow_type_info flowtype_str_table[] = {
4014 {"raw", RTE_ETH_FLOW_RAW},
4015 {"ipv4", RTE_ETH_FLOW_IPV4},
4016 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
4017 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
4018 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
4019 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
4020 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
4021 {"ipv6", RTE_ETH_FLOW_IPV6},
4022 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
4023 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
4024 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
4025 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
4026 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
4027 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
4028 {"port", RTE_ETH_FLOW_PORT},
4029 {"vxlan", RTE_ETH_FLOW_VXLAN},
4030 {"geneve", RTE_ETH_FLOW_GENEVE},
4031 {"nvgre", RTE_ETH_FLOW_NVGRE},
4032 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
4035 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
4036 if (flowtype_str_table[i].ftype == flow_type)
4037 return flowtype_str_table[i].str;
4044 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4046 struct rte_eth_fdir_flex_mask *mask;
4050 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
4051 mask = &flex_conf->flex_mask[i];
4052 p = flowtype_to_str(mask->flow_type);
4053 printf("\n %s:\t", p ? p : "unknown");
4054 for (j = 0; j < num; j++)
4055 printf(" %02x", mask->mask[j]);
4061 print_fdir_flow_type(uint32_t flow_types_mask)
4066 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
4067 if (!(flow_types_mask & (1 << i)))
4069 p = flowtype_to_str(i);
4079 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
4080 struct rte_eth_fdir_stats *fdir_stat)
4084 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
4086 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
4087 RTE_ETH_FILTER_INFO, fdir_info);
4088 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
4089 RTE_ETH_FILTER_STATS, fdir_stat);
4093 #ifdef RTE_LIBRTE_I40E_PMD
4094 if (ret == -ENOTSUP) {
4095 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
4097 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
4100 #ifdef RTE_LIBRTE_IXGBE_PMD
4101 if (ret == -ENOTSUP) {
4102 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
4104 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
4111 printf("\n FDIR is not supported on port %-2d\n",
4115 printf("programming error: (%s)\n", strerror(-ret));
4122 fdir_get_infos(portid_t port_id)
4124 struct rte_eth_fdir_stats fdir_stat;
4125 struct rte_eth_fdir_info fdir_info;
4127 static const char *fdir_stats_border = "########################";
4129 if (port_id_is_invalid(port_id, ENABLED_WARN))
4132 memset(&fdir_info, 0, sizeof(fdir_info));
4133 memset(&fdir_stat, 0, sizeof(fdir_stat));
4134 if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
4137 printf("\n %s FDIR infos for port %-2d %s\n",
4138 fdir_stats_border, port_id, fdir_stats_border);
4140 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
4141 printf(" PERFECT\n");
4142 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
4143 printf(" PERFECT-MAC-VLAN\n");
4144 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4145 printf(" PERFECT-TUNNEL\n");
4146 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
4147 printf(" SIGNATURE\n");
4149 printf(" DISABLE\n");
4150 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
4151 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
4152 printf(" SUPPORTED FLOW TYPE: ");
4153 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
4155 printf(" FLEX PAYLOAD INFO:\n");
4156 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
4157 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
4158 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
4159 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
4160 fdir_info.flex_payload_unit,
4161 fdir_info.max_flex_payload_segment_num,
4162 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
4164 print_fdir_mask(&fdir_info.mask);
4165 if (fdir_info.flex_conf.nb_payloads > 0) {
4166 printf(" FLEX PAYLOAD SRC OFFSET:");
4167 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4169 if (fdir_info.flex_conf.nb_flexmasks > 0) {
4170 printf(" FLEX MASK CFG:");
4171 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4173 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
4174 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
4175 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
4176 fdir_info.guarant_spc, fdir_info.best_spc);
4177 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
4178 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
4179 " add: %-10"PRIu64" remove: %"PRIu64"\n"
4180 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
4181 fdir_stat.collision, fdir_stat.free,
4182 fdir_stat.maxhash, fdir_stat.maxlen,
4183 fdir_stat.add, fdir_stat.remove,
4184 fdir_stat.f_add, fdir_stat.f_remove);
4185 printf(" %s############################%s\n",
4186 fdir_stats_border, fdir_stats_border);
4190 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
4192 struct rte_port *port;
4193 struct rte_eth_fdir_flex_conf *flex_conf;
4196 port = &ports[port_id];
4197 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4198 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
4199 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
4204 if (i >= RTE_ETH_FLOW_MAX) {
4205 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
4206 idx = flex_conf->nb_flexmasks;
4207 flex_conf->nb_flexmasks++;
4209 printf("The flex mask table is full. Can not set flex"
4210 " mask for flow_type(%u).", cfg->flow_type);
4214 rte_memcpy(&flex_conf->flex_mask[idx],
4216 sizeof(struct rte_eth_fdir_flex_mask));
4220 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
4222 struct rte_port *port;
4223 struct rte_eth_fdir_flex_conf *flex_conf;
4226 port = &ports[port_id];
4227 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4228 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
4229 if (cfg->type == flex_conf->flex_set[i].type) {
4234 if (i >= RTE_ETH_PAYLOAD_MAX) {
4235 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
4236 idx = flex_conf->nb_payloads;
4237 flex_conf->nb_payloads++;
4239 printf("The flex payload table is full. Can not set"
4240 " flex payload for type(%u).", cfg->type);
4244 rte_memcpy(&flex_conf->flex_set[idx],
4246 sizeof(struct rte_eth_flex_payload_cfg));
4251 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
4253 #ifdef RTE_LIBRTE_IXGBE_PMD
4257 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
4259 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
4263 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
4264 is_rx ? "rx" : "tx", port_id, diag);
4267 printf("VF %s setting not supported for port %d\n",
4268 is_rx ? "Rx" : "Tx", port_id);
4274 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
4277 struct rte_eth_link link;
4280 if (port_id_is_invalid(port_id, ENABLED_WARN))
4282 ret = eth_link_get_nowait_print_err(port_id, &link);
4285 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
4286 rate > link.link_speed) {
4287 printf("Invalid rate value:%u bigger than link speed: %u\n",
4288 rate, link.link_speed);
4291 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
4294 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
4300 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
4302 int diag = -ENOTSUP;
4306 RTE_SET_USED(q_msk);
4308 #ifdef RTE_LIBRTE_IXGBE_PMD
4309 if (diag == -ENOTSUP)
4310 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
4313 #ifdef RTE_LIBRTE_BNXT_PMD
4314 if (diag == -ENOTSUP)
4315 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
4320 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
4326 * Functions to manage the set of filtered Multicast MAC addresses.
4328 * A pool of filtered multicast MAC addresses is associated with each port.
4329 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
4330 * The address of the pool and the number of valid multicast MAC addresses
4331 * recorded in the pool are stored in the fields "mc_addr_pool" and
4332 * "mc_addr_nb" of the "rte_port" data structure.
4334 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
4335 * to be supplied a contiguous array of multicast MAC addresses.
4336 * To comply with this constraint, the set of multicast addresses recorded
4337 * into the pool are systematically compacted at the beginning of the pool.
4338 * Hence, when a multicast address is removed from the pool, all following
4339 * addresses, if any, are copied back to keep the set contiguous.
4341 #define MCAST_POOL_INC 32
4344 mcast_addr_pool_extend(struct rte_port *port)
4346 struct rte_ether_addr *mc_pool;
4347 size_t mc_pool_size;
4350 * If a free entry is available at the end of the pool, just
4351 * increment the number of recorded multicast addresses.
4353 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
4359 * [re]allocate a pool with MCAST_POOL_INC more entries.
4360 * The previous test guarantees that port->mc_addr_nb is a multiple
4361 * of MCAST_POOL_INC.
4363 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
4365 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
4367 if (mc_pool == NULL) {
4368 printf("allocation of pool of %u multicast addresses failed\n",
4369 port->mc_addr_nb + MCAST_POOL_INC);
4373 port->mc_addr_pool = mc_pool;
4380 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
4382 if (mcast_addr_pool_extend(port) != 0)
4384 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
4388 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
4391 if (addr_idx == port->mc_addr_nb) {
4392 /* No need to recompact the set of multicast addressses. */
4393 if (port->mc_addr_nb == 0) {
4394 /* free the pool of multicast addresses. */
4395 free(port->mc_addr_pool);
4396 port->mc_addr_pool = NULL;
4400 memmove(&port->mc_addr_pool[addr_idx],
4401 &port->mc_addr_pool[addr_idx + 1],
4402 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
4406 eth_port_multicast_addr_list_set(portid_t port_id)
4408 struct rte_port *port;
4411 port = &ports[port_id];
4412 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
4415 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
4416 port_id, port->mc_addr_nb, diag);
4422 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
4424 struct rte_port *port;
4427 if (port_id_is_invalid(port_id, ENABLED_WARN))
4430 port = &ports[port_id];
4433 * Check that the added multicast MAC address is not already recorded
4434 * in the pool of multicast addresses.
4436 for (i = 0; i < port->mc_addr_nb; i++) {
4437 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
4438 printf("multicast address already filtered by port\n");
4443 mcast_addr_pool_append(port, mc_addr);
4444 if (eth_port_multicast_addr_list_set(port_id) < 0)
4445 /* Rollback on failure, remove the address from the pool */
4446 mcast_addr_pool_remove(port, i);
4450 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
4452 struct rte_port *port;
4455 if (port_id_is_invalid(port_id, ENABLED_WARN))
4458 port = &ports[port_id];
4461 * Search the pool of multicast MAC addresses for the removed address.
4463 for (i = 0; i < port->mc_addr_nb; i++) {
4464 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
4467 if (i == port->mc_addr_nb) {
4468 printf("multicast address not filtered by port %d\n", port_id);
4472 mcast_addr_pool_remove(port, i);
4473 if (eth_port_multicast_addr_list_set(port_id) < 0)
4474 /* Rollback on failure, add the address back into the pool */
4475 mcast_addr_pool_append(port, mc_addr);
4479 port_dcb_info_display(portid_t port_id)
4481 struct rte_eth_dcb_info dcb_info;
4484 static const char *border = "================";
4486 if (port_id_is_invalid(port_id, ENABLED_WARN))
4489 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
4491 printf("\n Failed to get dcb infos on port %-2d\n",
4495 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
4496 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
4498 for (i = 0; i < dcb_info.nb_tcs; i++)
4500 printf("\n Priority : ");
4501 for (i = 0; i < dcb_info.nb_tcs; i++)
4502 printf("\t%4d", dcb_info.prio_tc[i]);
4503 printf("\n BW percent :");
4504 for (i = 0; i < dcb_info.nb_tcs; i++)
4505 printf("\t%4d%%", dcb_info.tc_bws[i]);
4506 printf("\n RXQ base : ");
4507 for (i = 0; i < dcb_info.nb_tcs; i++)
4508 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
4509 printf("\n RXQ number :");
4510 for (i = 0; i < dcb_info.nb_tcs; i++)
4511 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
4512 printf("\n TXQ base : ");
4513 for (i = 0; i < dcb_info.nb_tcs; i++)
4514 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
4515 printf("\n TXQ number :");
4516 for (i = 0; i < dcb_info.nb_tcs; i++)
4517 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
4522 open_file(const char *file_path, uint32_t *size)
4524 int fd = open(file_path, O_RDONLY);
4526 uint8_t *buf = NULL;
4534 printf("%s: Failed to open %s\n", __func__, file_path);
4538 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
4540 printf("%s: File operations failed\n", __func__);
4544 pkg_size = st_buf.st_size;
4547 printf("%s: File operations failed\n", __func__);
4551 buf = (uint8_t *)malloc(pkg_size);
4554 printf("%s: Failed to malloc memory\n", __func__);
4558 ret = read(fd, buf, pkg_size);
4561 printf("%s: File read operation failed\n", __func__);
4575 save_file(const char *file_path, uint8_t *buf, uint32_t size)
4577 FILE *fh = fopen(file_path, "wb");
4580 printf("%s: Failed to open %s\n", __func__, file_path);
4584 if (fwrite(buf, 1, size, fh) != size) {
4586 printf("%s: File write operation failed\n", __func__);
4596 close_file(uint8_t *buf)
4607 port_queue_region_info_display(portid_t port_id, void *buf)
4609 #ifdef RTE_LIBRTE_I40E_PMD
4611 struct rte_pmd_i40e_queue_regions *info =
4612 (struct rte_pmd_i40e_queue_regions *)buf;
4613 static const char *queue_region_info_stats_border = "-------";
4615 if (!info->queue_region_number)
4616 printf("there is no region has been set before");
4618 printf("\n %s All queue region info for port=%2d %s",
4619 queue_region_info_stats_border, port_id,
4620 queue_region_info_stats_border);
4621 printf("\n queue_region_number: %-14u \n",
4622 info->queue_region_number);
4624 for (i = 0; i < info->queue_region_number; i++) {
4625 printf("\n region_id: %-14u queue_number: %-14u "
4626 "queue_start_index: %-14u \n",
4627 info->region[i].region_id,
4628 info->region[i].queue_num,
4629 info->region[i].queue_start_index);
4631 printf(" user_priority_num is %-14u :",
4632 info->region[i].user_priority_num);
4633 for (j = 0; j < info->region[i].user_priority_num; j++)
4634 printf(" %-14u ", info->region[i].user_priority[j]);
4636 printf("\n flowtype_num is %-14u :",
4637 info->region[i].flowtype_num);
4638 for (j = 0; j < info->region[i].flowtype_num; j++)
4639 printf(" %-14u ", info->region[i].hw_flowtype[j]);
4642 RTE_SET_USED(port_id);
4650 show_macs(portid_t port_id)
4652 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4653 struct rte_eth_dev_info dev_info;
4654 struct rte_ether_addr *addr;
4655 uint32_t i, num_macs = 0;
4656 struct rte_eth_dev *dev;
4658 dev = &rte_eth_devices[port_id];
4660 rte_eth_dev_info_get(port_id, &dev_info);
4662 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4663 addr = &dev->data->mac_addrs[i];
4665 /* skip zero address */
4666 if (rte_is_zero_ether_addr(addr))
4672 printf("Number of MAC address added: %d\n", num_macs);
4674 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4675 addr = &dev->data->mac_addrs[i];
4677 /* skip zero address */
4678 if (rte_is_zero_ether_addr(addr))
4681 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4682 printf(" %s\n", buf);
4687 show_mcast_macs(portid_t port_id)
4689 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4690 struct rte_ether_addr *addr;
4691 struct rte_port *port;
4694 port = &ports[port_id];
4696 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
4698 for (i = 0; i < port->mc_addr_nb; i++) {
4699 addr = &port->mc_addr_pool[i];
4701 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4702 printf(" %s\n", buf);