1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
55 #define ETHDEV_FWVERS_LEN 32
57 static char *flowtype_to_str(uint16_t flow_type);
60 enum tx_pkt_split split;
64 .split = TX_PKT_SPLIT_OFF,
68 .split = TX_PKT_SPLIT_ON,
72 .split = TX_PKT_SPLIT_RND,
77 const struct rss_type_info rss_type_table[] = {
78 { "all", ETH_RSS_IP | ETH_RSS_TCP |
79 ETH_RSS_UDP | ETH_RSS_SCTP |
82 { "ipv4", ETH_RSS_IPV4 },
83 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
84 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
85 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
86 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
87 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
88 { "ipv6", ETH_RSS_IPV6 },
89 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
90 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
91 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
92 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
93 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
94 { "l2-payload", ETH_RSS_L2_PAYLOAD },
95 { "ipv6-ex", ETH_RSS_IPV6_EX },
96 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
97 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
98 { "port", ETH_RSS_PORT },
99 { "vxlan", ETH_RSS_VXLAN },
100 { "geneve", ETH_RSS_GENEVE },
101 { "nvgre", ETH_RSS_NVGRE },
102 { "ip", ETH_RSS_IP },
103 { "udp", ETH_RSS_UDP },
104 { "tcp", ETH_RSS_TCP },
105 { "sctp", ETH_RSS_SCTP },
106 { "tunnel", ETH_RSS_TUNNEL },
107 { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
108 { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
109 { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
110 { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
111 { "esp", ETH_RSS_ESP },
112 { "ah", ETH_RSS_AH },
117 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
119 char buf[RTE_ETHER_ADDR_FMT_SIZE];
120 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
121 printf("%s%s", name, buf);
125 nic_stats_display(portid_t port_id)
127 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
128 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
129 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
130 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
131 static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
132 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
134 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
135 struct rte_eth_stats stats;
136 struct rte_port *port = &ports[port_id];
139 static const char *nic_stats_border = "########################";
141 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
145 rte_eth_stats_get(port_id, &stats);
146 printf("\n %s NIC statistics for port %-2d %s\n",
147 nic_stats_border, port_id, nic_stats_border);
149 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
150 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
152 stats.ipackets, stats.imissed, stats.ibytes);
153 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
154 printf(" RX-nombuf: %-10"PRIu64"\n",
156 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
158 stats.opackets, stats.oerrors, stats.obytes);
161 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
162 " RX-bytes: %10"PRIu64"\n",
163 stats.ipackets, stats.ierrors, stats.ibytes);
164 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
165 printf(" RX-nombuf: %10"PRIu64"\n",
167 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
168 " TX-bytes: %10"PRIu64"\n",
169 stats.opackets, stats.oerrors, stats.obytes);
172 if (port->rx_queue_stats_mapping_enabled) {
174 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
175 printf(" Stats reg %2d RX-packets: %10"PRIu64
176 " RX-errors: %10"PRIu64
177 " RX-bytes: %10"PRIu64"\n",
178 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
181 if (port->tx_queue_stats_mapping_enabled) {
183 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
184 printf(" Stats reg %2d TX-packets: %10"PRIu64
185 " TX-bytes: %10"PRIu64"\n",
186 i, stats.q_opackets[i], stats.q_obytes[i]);
190 diff_cycles = prev_cycles[port_id];
191 prev_cycles[port_id] = rte_rdtsc();
193 diff_cycles = prev_cycles[port_id] - diff_cycles;
195 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
196 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
197 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
198 (stats.opackets - prev_pkts_tx[port_id]) : 0;
199 prev_pkts_rx[port_id] = stats.ipackets;
200 prev_pkts_tx[port_id] = stats.opackets;
201 mpps_rx = diff_cycles > 0 ?
202 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
203 mpps_tx = diff_cycles > 0 ?
204 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
206 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
207 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
208 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
209 (stats.obytes - prev_bytes_tx[port_id]) : 0;
210 prev_bytes_rx[port_id] = stats.ibytes;
211 prev_bytes_tx[port_id] = stats.obytes;
212 mbps_rx = diff_cycles > 0 ?
213 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0;
214 mbps_tx = diff_cycles > 0 ?
215 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0;
217 printf("\n Throughput (since last show)\n");
218 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
219 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
220 mpps_tx, mbps_tx * 8);
222 printf(" %s############################%s\n",
223 nic_stats_border, nic_stats_border);
227 nic_stats_clear(portid_t port_id)
229 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
233 rte_eth_stats_reset(port_id);
234 printf("\n NIC statistics for port %d cleared\n", port_id);
238 nic_xstats_display(portid_t port_id)
240 struct rte_eth_xstat *xstats;
241 int cnt_xstats, idx_xstat;
242 struct rte_eth_xstat_name *xstats_names;
244 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
248 printf("###### NIC extended statistics for port %-2d\n", port_id);
249 if (!rte_eth_dev_is_valid_port(port_id)) {
250 printf("Error: Invalid port number %i\n", port_id);
255 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
256 if (cnt_xstats < 0) {
257 printf("Error: Cannot get count of xstats\n");
261 /* Get id-name lookup table */
262 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
263 if (xstats_names == NULL) {
264 printf("Cannot allocate memory for xstats lookup\n");
267 if (cnt_xstats != rte_eth_xstats_get_names(
268 port_id, xstats_names, cnt_xstats)) {
269 printf("Error: Cannot get xstats lookup\n");
274 /* Get stats themselves */
275 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
276 if (xstats == NULL) {
277 printf("Cannot allocate memory for xstats\n");
281 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
282 printf("Error: Unable to get xstats\n");
289 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
290 if (xstats_hide_zero && !xstats[idx_xstat].value)
292 printf("%s: %"PRIu64"\n",
293 xstats_names[idx_xstat].name,
294 xstats[idx_xstat].value);
301 nic_xstats_clear(portid_t port_id)
305 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
309 ret = rte_eth_xstats_reset(port_id);
311 printf("%s: Error: failed to reset xstats (port %u): %s",
312 __func__, port_id, strerror(ret));
317 nic_stats_mapping_display(portid_t port_id)
319 struct rte_port *port = &ports[port_id];
322 static const char *nic_stats_mapping_border = "########################";
324 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
329 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
330 printf("Port id %d - either does not support queue statistic mapping or"
331 " no queue statistic mapping set\n", port_id);
335 printf("\n %s NIC statistics mapping for port %-2d %s\n",
336 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
338 if (port->rx_queue_stats_mapping_enabled) {
339 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
340 if (rx_queue_stats_mappings[i].port_id == port_id) {
341 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
342 rx_queue_stats_mappings[i].queue_id,
343 rx_queue_stats_mappings[i].stats_counter_id);
350 if (port->tx_queue_stats_mapping_enabled) {
351 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
352 if (tx_queue_stats_mappings[i].port_id == port_id) {
353 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
354 tx_queue_stats_mappings[i].queue_id,
355 tx_queue_stats_mappings[i].stats_counter_id);
360 printf(" %s####################################%s\n",
361 nic_stats_mapping_border, nic_stats_mapping_border);
365 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
367 struct rte_eth_burst_mode mode;
368 struct rte_eth_rxq_info qinfo;
370 static const char *info_border = "*********************";
372 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
374 printf("Failed to retrieve information for port: %u, "
375 "RX queue: %hu\nerror desc: %s(%d)\n",
376 port_id, queue_id, strerror(-rc), rc);
380 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
381 info_border, port_id, queue_id, info_border);
383 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
384 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
385 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
386 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
387 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
388 printf("\nRX drop packets: %s",
389 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
390 printf("\nRX deferred start: %s",
391 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
392 printf("\nRX scattered packets: %s",
393 (qinfo.scattered_rx != 0) ? "on" : "off");
394 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
396 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
397 printf("\nBurst mode: %s%s",
399 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
400 " (per queue)" : "");
406 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
408 struct rte_eth_burst_mode mode;
409 struct rte_eth_txq_info qinfo;
411 static const char *info_border = "*********************";
413 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
415 printf("Failed to retrieve information for port: %u, "
416 "TX queue: %hu\nerror desc: %s(%d)\n",
417 port_id, queue_id, strerror(-rc), rc);
421 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
422 info_border, port_id, queue_id, info_border);
424 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
425 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
426 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
427 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
428 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
429 printf("\nTX deferred start: %s",
430 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
431 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
433 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
434 printf("\nBurst mode: %s%s",
436 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
437 " (per queue)" : "");
442 static int bus_match_all(const struct rte_bus *bus, const void *data)
450 device_infos_display(const char *identifier)
452 static const char *info_border = "*********************";
453 struct rte_bus *start = NULL, *next;
454 struct rte_dev_iterator dev_iter;
455 char name[RTE_ETH_NAME_MAX_LEN];
456 struct rte_ether_addr mac_addr;
457 struct rte_device *dev;
458 struct rte_devargs da;
462 memset(&da, 0, sizeof(da));
466 if (rte_devargs_parsef(&da, "%s", identifier)) {
467 printf("cannot parse identifier\n");
474 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
477 if (identifier && da.bus != next)
480 /* Skip buses that don't have iterate method */
481 if (!next->dev_iterate)
484 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
485 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
489 /* Check for matching device if identifier is present */
491 strncmp(da.name, dev->name, strlen(dev->name)))
493 printf("\n%s Infos for device %s %s\n",
494 info_border, dev->name, info_border);
495 printf("Bus name: %s", dev->bus->name);
496 printf("\nDriver name: %s", dev->driver->name);
497 printf("\nDevargs: %s",
498 dev->devargs ? dev->devargs->args : "");
499 printf("\nConnect to socket: %d", dev->numa_node);
502 /* List ports with matching device name */
503 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
504 printf("\n\tPort id: %-2d", port_id);
505 if (eth_macaddr_get_print_err(port_id,
507 print_ethaddr("\n\tMAC address: ",
509 rte_eth_dev_get_name_by_port(port_id, name);
510 printf("\n\tDevice name: %s", name);
518 port_infos_display(portid_t port_id)
520 struct rte_port *port;
521 struct rte_ether_addr mac_addr;
522 struct rte_eth_link link;
523 struct rte_eth_dev_info dev_info;
525 struct rte_mempool * mp;
526 static const char *info_border = "*********************";
528 char name[RTE_ETH_NAME_MAX_LEN];
530 char fw_version[ETHDEV_FWVERS_LEN];
532 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
536 port = &ports[port_id];
537 ret = eth_link_get_nowait_print_err(port_id, &link);
541 ret = eth_dev_info_get_print_err(port_id, &dev_info);
545 printf("\n%s Infos for port %-2d %s\n",
546 info_border, port_id, info_border);
547 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
548 print_ethaddr("MAC address: ", &mac_addr);
549 rte_eth_dev_get_name_by_port(port_id, name);
550 printf("\nDevice name: %s", name);
551 printf("\nDriver name: %s", dev_info.driver_name);
553 if (rte_eth_dev_fw_version_get(port_id, fw_version,
554 ETHDEV_FWVERS_LEN) == 0)
555 printf("\nFirmware-version: %s", fw_version);
557 printf("\nFirmware-version: %s", "not available");
559 if (dev_info.device->devargs && dev_info.device->devargs->args)
560 printf("\nDevargs: %s", dev_info.device->devargs->args);
561 printf("\nConnect to socket: %u", port->socket_id);
563 if (port_numa[port_id] != NUMA_NO_CONFIG) {
564 mp = mbuf_pool_find(port_numa[port_id]);
566 printf("\nmemory allocation on the socket: %d",
569 printf("\nmemory allocation on the socket: %u",port->socket_id);
571 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
572 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
573 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
574 ("full-duplex") : ("half-duplex"));
576 if (!rte_eth_dev_get_mtu(port_id, &mtu))
577 printf("MTU: %u\n", mtu);
579 printf("Promiscuous mode: %s\n",
580 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
581 printf("Allmulticast mode: %s\n",
582 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
583 printf("Maximum number of MAC addresses: %u\n",
584 (unsigned int)(port->dev_info.max_mac_addrs));
585 printf("Maximum number of MAC addresses of hash filtering: %u\n",
586 (unsigned int)(port->dev_info.max_hash_mac_addrs));
588 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
589 if (vlan_offload >= 0){
590 printf("VLAN offload: \n");
591 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
592 printf(" strip on, ");
594 printf(" strip off, ");
596 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
597 printf("filter on, ");
599 printf("filter off, ");
601 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
602 printf("extend on, ");
604 printf("extend off, ");
606 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
607 printf("qinq strip on\n");
609 printf("qinq strip off\n");
612 if (dev_info.hash_key_size > 0)
613 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
614 if (dev_info.reta_size > 0)
615 printf("Redirection table size: %u\n", dev_info.reta_size);
616 if (!dev_info.flow_type_rss_offloads)
617 printf("No RSS offload flow type is supported.\n");
622 printf("Supported RSS offload flow types:\n");
623 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
624 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
625 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
627 p = flowtype_to_str(i);
631 printf(" user defined %d\n", i);
635 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
636 printf("Maximum configurable length of RX packet: %u\n",
637 dev_info.max_rx_pktlen);
638 printf("Maximum configurable size of LRO aggregated packet: %u\n",
639 dev_info.max_lro_pkt_size);
640 if (dev_info.max_vfs)
641 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
642 if (dev_info.max_vmdq_pools)
643 printf("Maximum number of VMDq pools: %u\n",
644 dev_info.max_vmdq_pools);
646 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
647 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
648 printf("Max possible number of RXDs per queue: %hu\n",
649 dev_info.rx_desc_lim.nb_max);
650 printf("Min possible number of RXDs per queue: %hu\n",
651 dev_info.rx_desc_lim.nb_min);
652 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
654 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
655 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
656 printf("Max possible number of TXDs per queue: %hu\n",
657 dev_info.tx_desc_lim.nb_max);
658 printf("Min possible number of TXDs per queue: %hu\n",
659 dev_info.tx_desc_lim.nb_min);
660 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
661 printf("Max segment number per packet: %hu\n",
662 dev_info.tx_desc_lim.nb_seg_max);
663 printf("Max segment number per MTU/TSO: %hu\n",
664 dev_info.tx_desc_lim.nb_mtu_seg_max);
666 /* Show switch info only if valid switch domain and port id is set */
667 if (dev_info.switch_info.domain_id !=
668 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
669 if (dev_info.switch_info.name)
670 printf("Switch name: %s\n", dev_info.switch_info.name);
672 printf("Switch domain Id: %u\n",
673 dev_info.switch_info.domain_id);
674 printf("Switch Port Id: %u\n",
675 dev_info.switch_info.port_id);
680 port_summary_header_display(void)
682 uint16_t port_number;
684 port_number = rte_eth_dev_count_avail();
685 printf("Number of available ports: %i\n", port_number);
686 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
687 "Driver", "Status", "Link");
691 port_summary_display(portid_t port_id)
693 struct rte_ether_addr mac_addr;
694 struct rte_eth_link link;
695 struct rte_eth_dev_info dev_info;
696 char name[RTE_ETH_NAME_MAX_LEN];
699 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
704 ret = eth_link_get_nowait_print_err(port_id, &link);
708 ret = eth_dev_info_get_print_err(port_id, &dev_info);
712 rte_eth_dev_get_name_by_port(port_id, name);
713 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
717 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n",
718 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
719 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
720 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
721 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
722 (unsigned int) link.link_speed);
726 port_offload_cap_display(portid_t port_id)
728 struct rte_eth_dev_info dev_info;
729 static const char *info_border = "************";
732 if (port_id_is_invalid(port_id, ENABLED_WARN))
735 ret = eth_dev_info_get_print_err(port_id, &dev_info);
739 printf("\n%s Port %d supported offload features: %s\n",
740 info_border, port_id, info_border);
742 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
743 printf("VLAN stripped: ");
744 if (ports[port_id].dev_conf.rxmode.offloads &
745 DEV_RX_OFFLOAD_VLAN_STRIP)
751 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
752 printf("Double VLANs stripped: ");
753 if (ports[port_id].dev_conf.rxmode.offloads &
754 DEV_RX_OFFLOAD_QINQ_STRIP)
760 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
761 printf("RX IPv4 checksum: ");
762 if (ports[port_id].dev_conf.rxmode.offloads &
763 DEV_RX_OFFLOAD_IPV4_CKSUM)
769 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
770 printf("RX UDP checksum: ");
771 if (ports[port_id].dev_conf.rxmode.offloads &
772 DEV_RX_OFFLOAD_UDP_CKSUM)
778 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
779 printf("RX TCP checksum: ");
780 if (ports[port_id].dev_conf.rxmode.offloads &
781 DEV_RX_OFFLOAD_TCP_CKSUM)
787 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
788 printf("RX SCTP checksum: ");
789 if (ports[port_id].dev_conf.rxmode.offloads &
790 DEV_RX_OFFLOAD_SCTP_CKSUM)
796 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
797 printf("RX Outer IPv4 checksum: ");
798 if (ports[port_id].dev_conf.rxmode.offloads &
799 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
805 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
806 printf("RX Outer UDP checksum: ");
807 if (ports[port_id].dev_conf.rxmode.offloads &
808 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
814 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
815 printf("Large receive offload: ");
816 if (ports[port_id].dev_conf.rxmode.offloads &
817 DEV_RX_OFFLOAD_TCP_LRO)
823 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
824 printf("HW timestamp: ");
825 if (ports[port_id].dev_conf.rxmode.offloads &
826 DEV_RX_OFFLOAD_TIMESTAMP)
832 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
833 printf("Rx Keep CRC: ");
834 if (ports[port_id].dev_conf.rxmode.offloads &
835 DEV_RX_OFFLOAD_KEEP_CRC)
841 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
842 printf("RX offload security: ");
843 if (ports[port_id].dev_conf.rxmode.offloads &
844 DEV_RX_OFFLOAD_SECURITY)
850 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
851 printf("VLAN insert: ");
852 if (ports[port_id].dev_conf.txmode.offloads &
853 DEV_TX_OFFLOAD_VLAN_INSERT)
859 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
860 printf("Double VLANs insert: ");
861 if (ports[port_id].dev_conf.txmode.offloads &
862 DEV_TX_OFFLOAD_QINQ_INSERT)
868 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
869 printf("TX IPv4 checksum: ");
870 if (ports[port_id].dev_conf.txmode.offloads &
871 DEV_TX_OFFLOAD_IPV4_CKSUM)
877 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
878 printf("TX UDP checksum: ");
879 if (ports[port_id].dev_conf.txmode.offloads &
880 DEV_TX_OFFLOAD_UDP_CKSUM)
886 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
887 printf("TX TCP checksum: ");
888 if (ports[port_id].dev_conf.txmode.offloads &
889 DEV_TX_OFFLOAD_TCP_CKSUM)
895 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
896 printf("TX SCTP checksum: ");
897 if (ports[port_id].dev_conf.txmode.offloads &
898 DEV_TX_OFFLOAD_SCTP_CKSUM)
904 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
905 printf("TX Outer IPv4 checksum: ");
906 if (ports[port_id].dev_conf.txmode.offloads &
907 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
913 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
914 printf("TX TCP segmentation: ");
915 if (ports[port_id].dev_conf.txmode.offloads &
916 DEV_TX_OFFLOAD_TCP_TSO)
922 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
923 printf("TX UDP segmentation: ");
924 if (ports[port_id].dev_conf.txmode.offloads &
925 DEV_TX_OFFLOAD_UDP_TSO)
931 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
932 printf("TSO for VXLAN tunnel packet: ");
933 if (ports[port_id].dev_conf.txmode.offloads &
934 DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
940 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
941 printf("TSO for GRE tunnel packet: ");
942 if (ports[port_id].dev_conf.txmode.offloads &
943 DEV_TX_OFFLOAD_GRE_TNL_TSO)
949 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
950 printf("TSO for IPIP tunnel packet: ");
951 if (ports[port_id].dev_conf.txmode.offloads &
952 DEV_TX_OFFLOAD_IPIP_TNL_TSO)
958 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
959 printf("TSO for GENEVE tunnel packet: ");
960 if (ports[port_id].dev_conf.txmode.offloads &
961 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
967 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
968 printf("IP tunnel TSO: ");
969 if (ports[port_id].dev_conf.txmode.offloads &
970 DEV_TX_OFFLOAD_IP_TNL_TSO)
976 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
977 printf("UDP tunnel TSO: ");
978 if (ports[port_id].dev_conf.txmode.offloads &
979 DEV_TX_OFFLOAD_UDP_TNL_TSO)
985 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
986 printf("TX Outer UDP checksum: ");
987 if (ports[port_id].dev_conf.txmode.offloads &
988 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
997 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1001 if (port_id == (portid_t)RTE_PORT_ALL)
1004 RTE_ETH_FOREACH_DEV(pid)
1008 if (warning == ENABLED_WARN)
1009 printf("Invalid port %d\n", port_id);
1014 void print_valid_ports(void)
1018 printf("The valid ports array is [");
1019 RTE_ETH_FOREACH_DEV(pid) {
1026 vlan_id_is_invalid(uint16_t vlan_id)
1030 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1035 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1037 const struct rte_pci_device *pci_dev;
1038 const struct rte_bus *bus;
1041 if (reg_off & 0x3) {
1042 printf("Port register offset 0x%X not aligned on a 4-byte "
1048 if (!ports[port_id].dev_info.device) {
1049 printf("Invalid device\n");
1053 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1054 if (bus && !strcmp(bus->name, "pci")) {
1055 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1057 printf("Not a PCI device\n");
1061 pci_len = pci_dev->mem_resource[0].len;
1062 if (reg_off >= pci_len) {
1063 printf("Port %d: register offset %u (0x%X) out of port PCI "
1064 "resource (length=%"PRIu64")\n",
1065 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
1072 reg_bit_pos_is_invalid(uint8_t bit_pos)
1076 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
1080 #define display_port_and_reg_off(port_id, reg_off) \
1081 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1084 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1086 display_port_and_reg_off(port_id, (unsigned)reg_off);
1087 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1091 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1096 if (port_id_is_invalid(port_id, ENABLED_WARN))
1098 if (port_reg_off_is_invalid(port_id, reg_off))
1100 if (reg_bit_pos_is_invalid(bit_x))
1102 reg_v = port_id_pci_reg_read(port_id, reg_off);
1103 display_port_and_reg_off(port_id, (unsigned)reg_off);
1104 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1108 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1109 uint8_t bit1_pos, uint8_t bit2_pos)
1115 if (port_id_is_invalid(port_id, ENABLED_WARN))
1117 if (port_reg_off_is_invalid(port_id, reg_off))
1119 if (reg_bit_pos_is_invalid(bit1_pos))
1121 if (reg_bit_pos_is_invalid(bit2_pos))
1123 if (bit1_pos > bit2_pos)
1124 l_bit = bit2_pos, h_bit = bit1_pos;
1126 l_bit = bit1_pos, h_bit = bit2_pos;
1128 reg_v = port_id_pci_reg_read(port_id, reg_off);
1131 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1132 display_port_and_reg_off(port_id, (unsigned)reg_off);
1133 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1134 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1138 port_reg_display(portid_t port_id, uint32_t reg_off)
1142 if (port_id_is_invalid(port_id, ENABLED_WARN))
1144 if (port_reg_off_is_invalid(port_id, reg_off))
1146 reg_v = port_id_pci_reg_read(port_id, reg_off);
1147 display_port_reg_value(port_id, reg_off, reg_v);
1151 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1156 if (port_id_is_invalid(port_id, ENABLED_WARN))
1158 if (port_reg_off_is_invalid(port_id, reg_off))
1160 if (reg_bit_pos_is_invalid(bit_pos))
1163 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1166 reg_v = port_id_pci_reg_read(port_id, reg_off);
1168 reg_v &= ~(1 << bit_pos);
1170 reg_v |= (1 << bit_pos);
1171 port_id_pci_reg_write(port_id, reg_off, reg_v);
1172 display_port_reg_value(port_id, reg_off, reg_v);
1176 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1177 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1184 if (port_id_is_invalid(port_id, ENABLED_WARN))
1186 if (port_reg_off_is_invalid(port_id, reg_off))
1188 if (reg_bit_pos_is_invalid(bit1_pos))
1190 if (reg_bit_pos_is_invalid(bit2_pos))
1192 if (bit1_pos > bit2_pos)
1193 l_bit = bit2_pos, h_bit = bit1_pos;
1195 l_bit = bit1_pos, h_bit = bit2_pos;
1197 if ((h_bit - l_bit) < 31)
1198 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1202 if (value > max_v) {
1203 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1204 (unsigned)value, (unsigned)value,
1205 (unsigned)max_v, (unsigned)max_v);
1208 reg_v = port_id_pci_reg_read(port_id, reg_off);
1209 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1210 reg_v |= (value << l_bit); /* Set changed bits */
1211 port_id_pci_reg_write(port_id, reg_off, reg_v);
1212 display_port_reg_value(port_id, reg_off, reg_v);
1216 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1218 if (port_id_is_invalid(port_id, ENABLED_WARN))
1220 if (port_reg_off_is_invalid(port_id, reg_off))
1222 port_id_pci_reg_write(port_id, reg_off, reg_v);
1223 display_port_reg_value(port_id, reg_off, reg_v);
1227 port_mtu_set(portid_t port_id, uint16_t mtu)
1230 struct rte_port *rte_port = &ports[port_id];
1231 struct rte_eth_dev_info dev_info;
1232 uint16_t eth_overhead;
1235 if (port_id_is_invalid(port_id, ENABLED_WARN))
1238 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1242 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1243 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1244 mtu, dev_info.min_mtu, dev_info.max_mtu);
1247 diag = rte_eth_dev_set_mtu(port_id, mtu);
1249 dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1251 * Ether overhead in driver is equal to the difference of
1252 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
1253 * device supports jumbo frame.
1255 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
1256 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) {
1257 rte_port->dev_conf.rxmode.offloads |=
1258 DEV_RX_OFFLOAD_JUMBO_FRAME;
1259 rte_port->dev_conf.rxmode.max_rx_pkt_len =
1262 rte_port->dev_conf.rxmode.offloads &=
1263 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1267 printf("Set MTU failed. diag=%d\n", diag);
1270 /* Generic flow management functions. */
1272 /** Generate a port_flow entry from attributes/pattern/actions. */
1273 static struct port_flow *
1274 port_flow_new(const struct rte_flow_attr *attr,
1275 const struct rte_flow_item *pattern,
1276 const struct rte_flow_action *actions,
1277 struct rte_flow_error *error)
1279 const struct rte_flow_conv_rule rule = {
1281 .pattern_ro = pattern,
1282 .actions_ro = actions,
1284 struct port_flow *pf;
1287 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1290 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1293 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1297 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1304 /** Print a message out of a flow error. */
1306 port_flow_complain(struct rte_flow_error *error)
1308 static const char *const errstrlist[] = {
1309 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1310 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1311 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1312 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1313 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1314 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1315 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1316 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1317 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1318 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1319 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1320 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1321 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1322 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1323 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1324 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1325 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1329 int err = rte_errno;
1331 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1332 !errstrlist[error->type])
1333 errstr = "unknown type";
1335 errstr = errstrlist[error->type];
1336 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__,
1337 error->type, errstr,
1338 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1339 error->cause), buf) : "",
1340 error->message ? error->message : "(no stated reason)",
1345 /** Validate flow rule. */
1347 port_flow_validate(portid_t port_id,
1348 const struct rte_flow_attr *attr,
1349 const struct rte_flow_item *pattern,
1350 const struct rte_flow_action *actions)
1352 struct rte_flow_error error;
1354 /* Poisoning to make sure PMDs update it in case of error. */
1355 memset(&error, 0x11, sizeof(error));
1356 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1357 return port_flow_complain(&error);
1358 printf("Flow rule validated\n");
1362 /** Create flow rule. */
1364 port_flow_create(portid_t port_id,
1365 const struct rte_flow_attr *attr,
1366 const struct rte_flow_item *pattern,
1367 const struct rte_flow_action *actions)
1369 struct rte_flow *flow;
1370 struct rte_port *port;
1371 struct port_flow *pf;
1373 struct rte_flow_error error;
1375 /* Poisoning to make sure PMDs update it in case of error. */
1376 memset(&error, 0x22, sizeof(error));
1377 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1379 return port_flow_complain(&error);
1380 port = &ports[port_id];
1381 if (port->flow_list) {
1382 if (port->flow_list->id == UINT32_MAX) {
1383 printf("Highest rule ID is already assigned, delete"
1385 rte_flow_destroy(port_id, flow, NULL);
1388 id = port->flow_list->id + 1;
1391 pf = port_flow_new(attr, pattern, actions, &error);
1393 rte_flow_destroy(port_id, flow, NULL);
1394 return port_flow_complain(&error);
1396 pf->next = port->flow_list;
1399 port->flow_list = pf;
1400 printf("Flow rule #%u created\n", pf->id);
1404 /** Destroy a number of flow rules. */
1406 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1408 struct rte_port *port;
1409 struct port_flow **tmp;
1413 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1414 port_id == (portid_t)RTE_PORT_ALL)
1416 port = &ports[port_id];
1417 tmp = &port->flow_list;
1421 for (i = 0; i != n; ++i) {
1422 struct rte_flow_error error;
1423 struct port_flow *pf = *tmp;
1425 if (rule[i] != pf->id)
1428 * Poisoning to make sure PMDs update it in case
1431 memset(&error, 0x33, sizeof(error));
1432 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1433 ret = port_flow_complain(&error);
1436 printf("Flow rule #%u destroyed\n", pf->id);
1442 tmp = &(*tmp)->next;
1448 /** Remove all flow rules. */
1450 port_flow_flush(portid_t port_id)
1452 struct rte_flow_error error;
1453 struct rte_port *port;
1456 /* Poisoning to make sure PMDs update it in case of error. */
1457 memset(&error, 0x44, sizeof(error));
1458 if (rte_flow_flush(port_id, &error)) {
1459 ret = port_flow_complain(&error);
1460 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1461 port_id == (portid_t)RTE_PORT_ALL)
1464 port = &ports[port_id];
1465 while (port->flow_list) {
1466 struct port_flow *pf = port->flow_list->next;
1468 free(port->flow_list);
1469 port->flow_list = pf;
1474 /** Dump all flow rules. */
1476 port_flow_dump(portid_t port_id, const char *file_name)
1479 FILE *file = stdout;
1480 struct rte_flow_error error;
1482 if (file_name && strlen(file_name)) {
1483 file = fopen(file_name, "w");
1485 printf("Failed to create file %s: %s\n", file_name,
1490 ret = rte_flow_dev_dump(port_id, file, &error);
1492 port_flow_complain(&error);
1493 printf("Failed to dump flow: %s\n", strerror(-ret));
1495 printf("Flow dump finished\n");
1496 if (file_name && strlen(file_name))
1501 /** Query a flow rule. */
1503 port_flow_query(portid_t port_id, uint32_t rule,
1504 const struct rte_flow_action *action)
1506 struct rte_flow_error error;
1507 struct rte_port *port;
1508 struct port_flow *pf;
1511 struct rte_flow_query_count count;
1515 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1516 port_id == (portid_t)RTE_PORT_ALL)
1518 port = &ports[port_id];
1519 for (pf = port->flow_list; pf; pf = pf->next)
1523 printf("Flow rule #%u not found\n", rule);
1526 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1527 &name, sizeof(name),
1528 (void *)(uintptr_t)action->type, &error);
1530 return port_flow_complain(&error);
1531 switch (action->type) {
1532 case RTE_FLOW_ACTION_TYPE_COUNT:
1535 printf("Cannot query action type %d (%s)\n",
1536 action->type, name);
1539 /* Poisoning to make sure PMDs update it in case of error. */
1540 memset(&error, 0x55, sizeof(error));
1541 memset(&query, 0, sizeof(query));
1542 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1543 return port_flow_complain(&error);
1544 switch (action->type) {
1545 case RTE_FLOW_ACTION_TYPE_COUNT:
1549 " hits: %" PRIu64 "\n"
1550 " bytes: %" PRIu64 "\n",
1552 query.count.hits_set,
1553 query.count.bytes_set,
1558 printf("Cannot display result for action type %d (%s)\n",
1559 action->type, name);
1565 /** List flow rules. */
1567 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1569 struct rte_port *port;
1570 struct port_flow *pf;
1571 struct port_flow *list = NULL;
1574 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1575 port_id == (portid_t)RTE_PORT_ALL)
1577 port = &ports[port_id];
1578 if (!port->flow_list)
1580 /* Sort flows by group, priority and ID. */
1581 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1582 struct port_flow **tmp;
1583 const struct rte_flow_attr *curr = pf->rule.attr;
1586 /* Filter out unwanted groups. */
1587 for (i = 0; i != n; ++i)
1588 if (curr->group == group[i])
1593 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1594 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1596 if (curr->group > comp->group ||
1597 (curr->group == comp->group &&
1598 curr->priority > comp->priority) ||
1599 (curr->group == comp->group &&
1600 curr->priority == comp->priority &&
1601 pf->id > (*tmp)->id))
1608 printf("ID\tGroup\tPrio\tAttr\tRule\n");
1609 for (pf = list; pf != NULL; pf = pf->tmp) {
1610 const struct rte_flow_item *item = pf->rule.pattern;
1611 const struct rte_flow_action *action = pf->rule.actions;
1614 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1616 pf->rule.attr->group,
1617 pf->rule.attr->priority,
1618 pf->rule.attr->ingress ? 'i' : '-',
1619 pf->rule.attr->egress ? 'e' : '-',
1620 pf->rule.attr->transfer ? 't' : '-');
1621 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1622 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1623 &name, sizeof(name),
1624 (void *)(uintptr_t)item->type,
1627 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1628 printf("%s ", name);
1632 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1633 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1634 &name, sizeof(name),
1635 (void *)(uintptr_t)action->type,
1638 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1639 printf(" %s", name);
1646 /** Restrict ingress traffic to the defined flow rules. */
1648 port_flow_isolate(portid_t port_id, int set)
1650 struct rte_flow_error error;
1652 /* Poisoning to make sure PMDs update it in case of error. */
1653 memset(&error, 0x66, sizeof(error));
1654 if (rte_flow_isolate(port_id, set, &error))
1655 return port_flow_complain(&error);
1656 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1658 set ? "now restricted" : "not restricted anymore");
1663 * RX/TX ring descriptors display functions.
1666 rx_queue_id_is_invalid(queueid_t rxq_id)
1668 if (rxq_id < nb_rxq)
1670 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1675 tx_queue_id_is_invalid(queueid_t txq_id)
1677 if (txq_id < nb_txq)
1679 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1684 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1686 if (rxdesc_id < nb_rxd)
1688 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1694 tx_desc_id_is_invalid(uint16_t txdesc_id)
1696 if (txdesc_id < nb_txd)
1698 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1703 static const struct rte_memzone *
1704 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1706 char mz_name[RTE_MEMZONE_NAMESIZE];
1707 const struct rte_memzone *mz;
1709 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
1710 port_id, q_id, ring_name);
1711 mz = rte_memzone_lookup(mz_name);
1713 printf("%s ring memory zoneof (port %d, queue %d) not"
1714 "found (zone name = %s\n",
1715 ring_name, port_id, q_id, mz_name);
1719 union igb_ring_dword {
1722 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1732 struct igb_ring_desc_32_bytes {
1733 union igb_ring_dword lo_dword;
1734 union igb_ring_dword hi_dword;
1735 union igb_ring_dword resv1;
1736 union igb_ring_dword resv2;
1739 struct igb_ring_desc_16_bytes {
1740 union igb_ring_dword lo_dword;
1741 union igb_ring_dword hi_dword;
1745 ring_rxd_display_dword(union igb_ring_dword dword)
1747 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1748 (unsigned)dword.words.hi);
1752 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1753 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1756 __rte_unused portid_t port_id,
1760 struct igb_ring_desc_16_bytes *ring =
1761 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1762 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1764 struct rte_eth_dev_info dev_info;
1766 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1770 if (strstr(dev_info.driver_name, "i40e") != NULL) {
1771 /* 32 bytes RX descriptor, i40e only */
1772 struct igb_ring_desc_32_bytes *ring =
1773 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1774 ring[desc_id].lo_dword.dword =
1775 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1776 ring_rxd_display_dword(ring[desc_id].lo_dword);
1777 ring[desc_id].hi_dword.dword =
1778 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1779 ring_rxd_display_dword(ring[desc_id].hi_dword);
1780 ring[desc_id].resv1.dword =
1781 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1782 ring_rxd_display_dword(ring[desc_id].resv1);
1783 ring[desc_id].resv2.dword =
1784 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1785 ring_rxd_display_dword(ring[desc_id].resv2);
1790 /* 16 bytes RX descriptor */
1791 ring[desc_id].lo_dword.dword =
1792 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1793 ring_rxd_display_dword(ring[desc_id].lo_dword);
1794 ring[desc_id].hi_dword.dword =
1795 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1796 ring_rxd_display_dword(ring[desc_id].hi_dword);
1800 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1802 struct igb_ring_desc_16_bytes *ring;
1803 struct igb_ring_desc_16_bytes txd;
1805 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1806 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1807 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1808 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1809 (unsigned)txd.lo_dword.words.lo,
1810 (unsigned)txd.lo_dword.words.hi,
1811 (unsigned)txd.hi_dword.words.lo,
1812 (unsigned)txd.hi_dword.words.hi);
1816 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1818 const struct rte_memzone *rx_mz;
1820 if (port_id_is_invalid(port_id, ENABLED_WARN))
1822 if (rx_queue_id_is_invalid(rxq_id))
1824 if (rx_desc_id_is_invalid(rxd_id))
1826 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1829 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1833 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1835 const struct rte_memzone *tx_mz;
1837 if (port_id_is_invalid(port_id, ENABLED_WARN))
1839 if (tx_queue_id_is_invalid(txq_id))
1841 if (tx_desc_id_is_invalid(txd_id))
1843 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1846 ring_tx_descriptor_display(tx_mz, txd_id);
1850 fwd_lcores_config_display(void)
1854 printf("List of forwarding lcores:");
1855 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1856 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1860 rxtx_config_display(void)
1865 printf(" %s packet forwarding%s packets/burst=%d\n",
1866 cur_fwd_eng->fwd_mode_name,
1867 retry_enabled == 0 ? "" : " with retry",
1870 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1871 printf(" packet len=%u - nb packet segments=%d\n",
1872 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1874 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
1875 nb_fwd_lcores, nb_fwd_ports);
1877 RTE_ETH_FOREACH_DEV(pid) {
1878 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
1879 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
1880 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
1881 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
1882 uint16_t nb_rx_desc_tmp;
1883 uint16_t nb_tx_desc_tmp;
1884 struct rte_eth_rxq_info rx_qinfo;
1885 struct rte_eth_txq_info tx_qinfo;
1888 /* per port config */
1889 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
1890 (unsigned int)pid, nb_rxq, nb_txq);
1892 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
1893 ports[pid].dev_conf.rxmode.offloads,
1894 ports[pid].dev_conf.txmode.offloads);
1896 /* per rx queue config only for first queue to be less verbose */
1897 for (qid = 0; qid < 1; qid++) {
1898 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
1900 nb_rx_desc_tmp = nb_rx_desc[qid];
1902 nb_rx_desc_tmp = rx_qinfo.nb_desc;
1904 printf(" RX queue: %d\n", qid);
1905 printf(" RX desc=%d - RX free threshold=%d\n",
1906 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
1907 printf(" RX threshold registers: pthresh=%d hthresh=%d "
1909 rx_conf[qid].rx_thresh.pthresh,
1910 rx_conf[qid].rx_thresh.hthresh,
1911 rx_conf[qid].rx_thresh.wthresh);
1912 printf(" RX Offloads=0x%"PRIx64"\n",
1913 rx_conf[qid].offloads);
1916 /* per tx queue config only for first queue to be less verbose */
1917 for (qid = 0; qid < 1; qid++) {
1918 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
1920 nb_tx_desc_tmp = nb_tx_desc[qid];
1922 nb_tx_desc_tmp = tx_qinfo.nb_desc;
1924 printf(" TX queue: %d\n", qid);
1925 printf(" TX desc=%d - TX free threshold=%d\n",
1926 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
1927 printf(" TX threshold registers: pthresh=%d hthresh=%d "
1929 tx_conf[qid].tx_thresh.pthresh,
1930 tx_conf[qid].tx_thresh.hthresh,
1931 tx_conf[qid].tx_thresh.wthresh);
1932 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
1933 tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
1939 port_rss_reta_info(portid_t port_id,
1940 struct rte_eth_rss_reta_entry64 *reta_conf,
1941 uint16_t nb_entries)
1943 uint16_t i, idx, shift;
1946 if (port_id_is_invalid(port_id, ENABLED_WARN))
1949 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1951 printf("Failed to get RSS RETA info, return code = %d\n", ret);
1955 for (i = 0; i < nb_entries; i++) {
1956 idx = i / RTE_RETA_GROUP_SIZE;
1957 shift = i % RTE_RETA_GROUP_SIZE;
1958 if (!(reta_conf[idx].mask & (1ULL << shift)))
1960 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1961 i, reta_conf[idx].reta[shift]);
1966 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1970 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
1972 struct rte_eth_rss_conf rss_conf = {0};
1973 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1977 struct rte_eth_dev_info dev_info;
1978 uint8_t hash_key_size;
1981 if (port_id_is_invalid(port_id, ENABLED_WARN))
1984 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1988 if (dev_info.hash_key_size > 0 &&
1989 dev_info.hash_key_size <= sizeof(rss_key))
1990 hash_key_size = dev_info.hash_key_size;
1992 printf("dev_info did not provide a valid hash key size\n");
1996 /* Get RSS hash key if asked to display it */
1997 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1998 rss_conf.rss_key_len = hash_key_size;
1999 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2003 printf("port index %d invalid\n", port_id);
2006 printf("operation not supported by device\n");
2009 printf("operation failed - diag=%d\n", diag);
2014 rss_hf = rss_conf.rss_hf;
2016 printf("RSS disabled\n");
2019 printf("RSS functions:\n ");
2020 for (i = 0; rss_type_table[i].str; i++) {
2021 if (rss_hf & rss_type_table[i].rss_type)
2022 printf("%s ", rss_type_table[i].str);
2027 printf("RSS key:\n");
2028 for (i = 0; i < hash_key_size; i++)
2029 printf("%02X", rss_key[i]);
2034 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
2037 struct rte_eth_rss_conf rss_conf;
2041 rss_conf.rss_key = NULL;
2042 rss_conf.rss_key_len = hash_key_len;
2043 rss_conf.rss_hf = 0;
2044 for (i = 0; rss_type_table[i].str; i++) {
2045 if (!strcmp(rss_type_table[i].str, rss_type))
2046 rss_conf.rss_hf = rss_type_table[i].rss_type;
2048 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2050 rss_conf.rss_key = hash_key;
2051 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
2058 printf("port index %d invalid\n", port_id);
2061 printf("operation not supported by device\n");
2064 printf("operation failed - diag=%d\n", diag);
2070 * Setup forwarding configuration for each logical core.
2073 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
2075 streamid_t nb_fs_per_lcore;
2083 nb_fs = cfg->nb_fwd_streams;
2084 nb_fc = cfg->nb_fwd_lcores;
2085 if (nb_fs <= nb_fc) {
2086 nb_fs_per_lcore = 1;
2089 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2090 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2093 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2095 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2096 fwd_lcores[lc_id]->stream_idx = sm_id;
2097 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2098 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2102 * Assign extra remaining streams, if any.
2104 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2105 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2106 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2107 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2108 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2113 fwd_topology_tx_port_get(portid_t rxp)
2115 static int warning_once = 1;
2117 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2119 switch (port_topology) {
2121 case PORT_TOPOLOGY_PAIRED:
2122 if ((rxp & 0x1) == 0) {
2123 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2126 printf("\nWarning! port-topology=paired"
2127 " and odd forward ports number,"
2128 " the last port will pair with"
2135 case PORT_TOPOLOGY_CHAINED:
2136 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2137 case PORT_TOPOLOGY_LOOP:
2143 simple_fwd_config_setup(void)
2147 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2148 cur_fwd_config.nb_fwd_streams =
2149 (streamid_t) cur_fwd_config.nb_fwd_ports;
2151 /* reinitialize forwarding streams */
2155 * In the simple forwarding test, the number of forwarding cores
2156 * must be lower or equal to the number of forwarding ports.
2158 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2159 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2160 cur_fwd_config.nb_fwd_lcores =
2161 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2162 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2164 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2165 fwd_streams[i]->rx_port = fwd_ports_ids[i];
2166 fwd_streams[i]->rx_queue = 0;
2167 fwd_streams[i]->tx_port =
2168 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2169 fwd_streams[i]->tx_queue = 0;
2170 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2171 fwd_streams[i]->retry_enabled = retry_enabled;
2176 * For the RSS forwarding test all streams distributed over lcores. Each stream
2177 * being composed of a RX queue to poll on a RX port for input messages,
2178 * associated with a TX queue of a TX port where to send forwarded packets.
2181 rss_fwd_config_setup(void)
2192 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2193 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2194 cur_fwd_config.nb_fwd_streams =
2195 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2197 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2198 cur_fwd_config.nb_fwd_lcores =
2199 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2201 /* reinitialize forwarding streams */
2204 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2206 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2207 struct fwd_stream *fs;
2209 fs = fwd_streams[sm_id];
2210 txp = fwd_topology_tx_port_get(rxp);
2211 fs->rx_port = fwd_ports_ids[rxp];
2213 fs->tx_port = fwd_ports_ids[txp];
2215 fs->peer_addr = fs->tx_port;
2216 fs->retry_enabled = retry_enabled;
2218 if (rxp < nb_fwd_ports)
2226 * For the DCB forwarding test, each core is assigned on each traffic class.
2228 * Each core is assigned a multi-stream, each stream being composed of
2229 * a RX queue to poll on a RX port for input messages, associated with
2230 * a TX queue of a TX port where to send forwarded packets. All RX and
2231 * TX queues are mapping to the same traffic class.
2232 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2236 dcb_fwd_config_setup(void)
2238 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2239 portid_t txp, rxp = 0;
2240 queueid_t txq, rxq = 0;
2242 uint16_t nb_rx_queue, nb_tx_queue;
2243 uint16_t i, j, k, sm_id = 0;
2246 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2247 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2248 cur_fwd_config.nb_fwd_streams =
2249 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2251 /* reinitialize forwarding streams */
2255 /* get the dcb info on the first RX and TX ports */
2256 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2257 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2259 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2260 fwd_lcores[lc_id]->stream_nb = 0;
2261 fwd_lcores[lc_id]->stream_idx = sm_id;
2262 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2263 /* if the nb_queue is zero, means this tc is
2264 * not enabled on the POOL
2266 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2268 k = fwd_lcores[lc_id]->stream_nb +
2269 fwd_lcores[lc_id]->stream_idx;
2270 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2271 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2272 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2273 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2274 for (j = 0; j < nb_rx_queue; j++) {
2275 struct fwd_stream *fs;
2277 fs = fwd_streams[k + j];
2278 fs->rx_port = fwd_ports_ids[rxp];
2279 fs->rx_queue = rxq + j;
2280 fs->tx_port = fwd_ports_ids[txp];
2281 fs->tx_queue = txq + j % nb_tx_queue;
2282 fs->peer_addr = fs->tx_port;
2283 fs->retry_enabled = retry_enabled;
2285 fwd_lcores[lc_id]->stream_nb +=
2286 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2288 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2291 if (tc < rxp_dcb_info.nb_tcs)
2293 /* Restart from TC 0 on next RX port */
2295 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2297 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2300 if (rxp >= nb_fwd_ports)
2302 /* get the dcb information on next RX and TX ports */
2303 if ((rxp & 0x1) == 0)
2304 txp = (portid_t) (rxp + 1);
2306 txp = (portid_t) (rxp - 1);
2307 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2308 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2313 icmp_echo_config_setup(void)
2320 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2321 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2322 (nb_txq * nb_fwd_ports);
2324 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2325 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2326 cur_fwd_config.nb_fwd_streams =
2327 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2328 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2329 cur_fwd_config.nb_fwd_lcores =
2330 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2331 if (verbose_level > 0) {
2332 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2334 cur_fwd_config.nb_fwd_lcores,
2335 cur_fwd_config.nb_fwd_ports,
2336 cur_fwd_config.nb_fwd_streams);
2339 /* reinitialize forwarding streams */
2341 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2343 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2344 if (verbose_level > 0)
2345 printf(" core=%d: \n", lc_id);
2346 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2347 struct fwd_stream *fs;
2348 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2349 fs->rx_port = fwd_ports_ids[rxp];
2351 fs->tx_port = fs->rx_port;
2353 fs->peer_addr = fs->tx_port;
2354 fs->retry_enabled = retry_enabled;
2355 if (verbose_level > 0)
2356 printf(" stream=%d port=%d rxq=%d txq=%d\n",
2357 sm_id, fs->rx_port, fs->rx_queue,
2359 rxq = (queueid_t) (rxq + 1);
2360 if (rxq == nb_rxq) {
2362 rxp = (portid_t) (rxp + 1);
2368 #if defined RTE_LIBRTE_PMD_SOFTNIC
2370 softnic_fwd_config_setup(void)
2372 struct rte_port *port;
2373 portid_t pid, softnic_portid;
2375 uint8_t softnic_enable = 0;
2377 RTE_ETH_FOREACH_DEV(pid) {
2379 const char *driver = port->dev_info.driver_name;
2381 if (strcmp(driver, "net_softnic") == 0) {
2382 softnic_portid = pid;
2388 if (softnic_enable == 0) {
2389 printf("Softnic mode not configured(%s)!\n", __func__);
2393 cur_fwd_config.nb_fwd_ports = 1;
2394 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
2396 /* Re-initialize forwarding streams */
2400 * In the softnic forwarding test, the number of forwarding cores
2401 * is set to one and remaining are used for softnic packet processing.
2403 cur_fwd_config.nb_fwd_lcores = 1;
2404 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2406 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
2407 fwd_streams[i]->rx_port = softnic_portid;
2408 fwd_streams[i]->rx_queue = i;
2409 fwd_streams[i]->tx_port = softnic_portid;
2410 fwd_streams[i]->tx_queue = i;
2411 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2412 fwd_streams[i]->retry_enabled = retry_enabled;
2418 fwd_config_setup(void)
2420 cur_fwd_config.fwd_eng = cur_fwd_eng;
2421 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2422 icmp_echo_config_setup();
2426 #if defined RTE_LIBRTE_PMD_SOFTNIC
2427 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
2428 softnic_fwd_config_setup();
2433 if ((nb_rxq > 1) && (nb_txq > 1)){
2435 dcb_fwd_config_setup();
2437 rss_fwd_config_setup();
2440 simple_fwd_config_setup();
2444 mp_alloc_to_str(uint8_t mode)
2447 case MP_ALLOC_NATIVE:
2453 case MP_ALLOC_XMEM_HUGE:
2463 pkt_fwd_config_display(struct fwd_config *cfg)
2465 struct fwd_stream *fs;
2469 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2470 "NUMA support %s, MP allocation mode: %s\n",
2471 cfg->fwd_eng->fwd_mode_name,
2472 retry_enabled == 0 ? "" : " with retry",
2473 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2474 numa_support == 1 ? "enabled" : "disabled",
2475 mp_alloc_to_str(mp_alloc_type));
2478 printf("TX retry num: %u, delay between TX retries: %uus\n",
2479 burst_tx_retry_num, burst_tx_delay_time);
2480 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2481 printf("Logical Core %u (socket %u) forwards packets on "
2483 fwd_lcores_cpuids[lc_id],
2484 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2485 fwd_lcores[lc_id]->stream_nb);
2486 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2487 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2488 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
2489 "P=%d/Q=%d (socket %u) ",
2490 fs->rx_port, fs->rx_queue,
2491 ports[fs->rx_port].socket_id,
2492 fs->tx_port, fs->tx_queue,
2493 ports[fs->tx_port].socket_id);
2494 print_ethaddr("peer=",
2495 &peer_eth_addrs[fs->peer_addr]);
2503 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2505 struct rte_ether_addr new_peer_addr;
2506 if (!rte_eth_dev_is_valid_port(port_id)) {
2507 printf("Error: Invalid port number %i\n", port_id);
2510 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
2511 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2514 peer_eth_addrs[port_id] = new_peer_addr;
2518 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2521 unsigned int lcore_cpuid;
2526 for (i = 0; i < nb_lc; i++) {
2527 lcore_cpuid = lcorelist[i];
2528 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2529 printf("lcore %u not enabled\n", lcore_cpuid);
2532 if (lcore_cpuid == rte_get_master_lcore()) {
2533 printf("lcore %u cannot be masked on for running "
2534 "packet forwarding, which is the master lcore "
2535 "and reserved for command line parsing only\n",
2540 fwd_lcores_cpuids[i] = lcore_cpuid;
2542 if (record_now == 0) {
2546 nb_cfg_lcores = (lcoreid_t) nb_lc;
2547 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2548 printf("previous number of forwarding cores %u - changed to "
2549 "number of configured cores %u\n",
2550 (unsigned int) nb_fwd_lcores, nb_lc);
2551 nb_fwd_lcores = (lcoreid_t) nb_lc;
2558 set_fwd_lcores_mask(uint64_t lcoremask)
2560 unsigned int lcorelist[64];
2564 if (lcoremask == 0) {
2565 printf("Invalid NULL mask of cores\n");
2569 for (i = 0; i < 64; i++) {
2570 if (! ((uint64_t)(1ULL << i) & lcoremask))
2572 lcorelist[nb_lc++] = i;
2574 return set_fwd_lcores_list(lcorelist, nb_lc);
2578 set_fwd_lcores_number(uint16_t nb_lc)
2580 if (nb_lc > nb_cfg_lcores) {
2581 printf("nb fwd cores %u > %u (max. number of configured "
2582 "lcores) - ignored\n",
2583 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2586 nb_fwd_lcores = (lcoreid_t) nb_lc;
2587 printf("Number of forwarding cores set to %u\n",
2588 (unsigned int) nb_fwd_lcores);
2592 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2600 for (i = 0; i < nb_pt; i++) {
2601 port_id = (portid_t) portlist[i];
2602 if (port_id_is_invalid(port_id, ENABLED_WARN))
2605 fwd_ports_ids[i] = port_id;
2607 if (record_now == 0) {
2611 nb_cfg_ports = (portid_t) nb_pt;
2612 if (nb_fwd_ports != (portid_t) nb_pt) {
2613 printf("previous number of forwarding ports %u - changed to "
2614 "number of configured ports %u\n",
2615 (unsigned int) nb_fwd_ports, nb_pt);
2616 nb_fwd_ports = (portid_t) nb_pt;
2621 * Parse the user input and obtain the list of forwarding ports
2624 * String containing the user input. User can specify
2625 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
2626 * For example, if the user wants to use all the available
2627 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3.
2628 * If the user wants to use only the ports 1,2 then the input
2630 * valid characters are '-' and ','
2631 * @param[out] values
2632 * This array will be filled with a list of port IDs
2633 * based on the user input
2634 * Note that duplicate entries are discarded and only the first
2635 * count entries in this array are port IDs and all the rest
2636 * will contain default values
2637 * @param[in] maxsize
2638 * This parameter denotes 2 things
2639 * 1) Number of elements in the values array
2640 * 2) Maximum value of each element in the values array
2642 * On success, returns total count of parsed port IDs
2643 * On failure, returns 0
2646 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
2648 unsigned int count = 0;
2652 unsigned int marked[maxsize];
2654 if (list == NULL || values == NULL)
2657 for (i = 0; i < (int)maxsize; i++)
2663 /*Remove the blank spaces if any*/
2664 while (isblank(*list))
2669 value = strtol(list, &end, 10);
2670 if (errno || end == NULL)
2672 if (value < 0 || value >= (int)maxsize)
2674 while (isblank(*end))
2676 if (*end == '-' && min == INT_MAX) {
2678 } else if ((*end == ',') || (*end == '\0')) {
2682 for (i = min; i <= max; i++) {
2683 if (count < maxsize) {
2695 } while (*end != '\0');
2701 parse_fwd_portlist(const char *portlist)
2703 unsigned int portcount;
2704 unsigned int portindex[RTE_MAX_ETHPORTS];
2705 unsigned int i, valid_port_count = 0;
2707 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
2709 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
2712 * Here we verify the validity of the ports
2713 * and thereby calculate the total number of
2716 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
2717 if (rte_eth_dev_is_valid_port(portindex[i])) {
2718 portindex[valid_port_count] = portindex[i];
2723 set_fwd_ports_list(portindex, valid_port_count);
2727 set_fwd_ports_mask(uint64_t portmask)
2729 unsigned int portlist[64];
2733 if (portmask == 0) {
2734 printf("Invalid NULL mask of ports\n");
2738 RTE_ETH_FOREACH_DEV(i) {
2739 if (! ((uint64_t)(1ULL << i) & portmask))
2741 portlist[nb_pt++] = i;
2743 set_fwd_ports_list(portlist, nb_pt);
2747 set_fwd_ports_number(uint16_t nb_pt)
2749 if (nb_pt > nb_cfg_ports) {
2750 printf("nb fwd ports %u > %u (number of configured "
2751 "ports) - ignored\n",
2752 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2755 nb_fwd_ports = (portid_t) nb_pt;
2756 printf("Number of forwarding ports set to %u\n",
2757 (unsigned int) nb_fwd_ports);
2761 port_is_forwarding(portid_t port_id)
2765 if (port_id_is_invalid(port_id, ENABLED_WARN))
2768 for (i = 0; i < nb_fwd_ports; i++) {
2769 if (fwd_ports_ids[i] == port_id)
2777 set_nb_pkt_per_burst(uint16_t nb)
2779 if (nb > MAX_PKT_BURST) {
2780 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2782 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2785 nb_pkt_per_burst = nb;
2786 printf("Number of packets per burst set to %u\n",
2787 (unsigned int) nb_pkt_per_burst);
2791 tx_split_get_name(enum tx_pkt_split split)
2795 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2796 if (tx_split_name[i].split == split)
2797 return tx_split_name[i].name;
2803 set_tx_pkt_split(const char *name)
2807 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2808 if (strcmp(tx_split_name[i].name, name) == 0) {
2809 tx_pkt_split = tx_split_name[i].split;
2813 printf("unknown value: \"%s\"\n", name);
2817 show_tx_pkt_segments(void)
2823 split = tx_split_get_name(tx_pkt_split);
2825 printf("Number of segments: %u\n", n);
2826 printf("Segment sizes: ");
2827 for (i = 0; i != n - 1; i++)
2828 printf("%hu,", tx_pkt_seg_lengths[i]);
2829 printf("%hu\n", tx_pkt_seg_lengths[i]);
2830 printf("Split packet: %s\n", split);
2834 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2836 uint16_t tx_pkt_len;
2839 if (nb_segs >= (unsigned) nb_txd) {
2840 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2841 nb_segs, (unsigned int) nb_txd);
2846 * Check that each segment length is greater or equal than
2847 * the mbuf data sise.
2848 * Check also that the total packet length is greater or equal than the
2849 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
2853 for (i = 0; i < nb_segs; i++) {
2854 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2855 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2856 i, seg_lengths[i], (unsigned) mbuf_data_size);
2859 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2861 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
2862 printf("total packet length=%u < %d - give up\n",
2863 (unsigned) tx_pkt_len,
2864 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
2868 for (i = 0; i < nb_segs; i++)
2869 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2871 tx_pkt_length = tx_pkt_len;
2872 tx_pkt_nb_segs = (uint8_t) nb_segs;
2876 setup_gro(const char *onoff, portid_t port_id)
2878 if (!rte_eth_dev_is_valid_port(port_id)) {
2879 printf("invalid port id %u\n", port_id);
2882 if (test_done == 0) {
2883 printf("Before enable/disable GRO,"
2884 " please stop forwarding first\n");
2887 if (strcmp(onoff, "on") == 0) {
2888 if (gro_ports[port_id].enable != 0) {
2889 printf("Port %u has enabled GRO. Please"
2890 " disable GRO first\n", port_id);
2893 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2894 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
2895 gro_ports[port_id].param.max_flow_num =
2896 GRO_DEFAULT_FLOW_NUM;
2897 gro_ports[port_id].param.max_item_per_flow =
2898 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
2900 gro_ports[port_id].enable = 1;
2902 if (gro_ports[port_id].enable == 0) {
2903 printf("Port %u has disabled GRO\n", port_id);
2906 gro_ports[port_id].enable = 0;
2911 setup_gro_flush_cycles(uint8_t cycles)
2913 if (test_done == 0) {
2914 printf("Before change flush interval for GRO,"
2915 " please stop forwarding first.\n");
2919 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
2920 GRO_DEFAULT_FLUSH_CYCLES) {
2921 printf("The flushing cycle be in the range"
2922 " of 1 to %u. Revert to the default"
2924 GRO_MAX_FLUSH_CYCLES,
2925 GRO_DEFAULT_FLUSH_CYCLES);
2926 cycles = GRO_DEFAULT_FLUSH_CYCLES;
2929 gro_flush_cycles = cycles;
2933 show_gro(portid_t port_id)
2935 struct rte_gro_param *param;
2936 uint32_t max_pkts_num;
2938 param = &gro_ports[port_id].param;
2940 if (!rte_eth_dev_is_valid_port(port_id)) {
2941 printf("Invalid port id %u.\n", port_id);
2944 if (gro_ports[port_id].enable) {
2945 printf("GRO type: TCP/IPv4\n");
2946 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2947 max_pkts_num = param->max_flow_num *
2948 param->max_item_per_flow;
2950 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
2951 printf("Max number of packets to perform GRO: %u\n",
2953 printf("Flushing cycles: %u\n", gro_flush_cycles);
2955 printf("Port %u doesn't enable GRO.\n", port_id);
2959 setup_gso(const char *mode, portid_t port_id)
2961 if (!rte_eth_dev_is_valid_port(port_id)) {
2962 printf("invalid port id %u\n", port_id);
2965 if (strcmp(mode, "on") == 0) {
2966 if (test_done == 0) {
2967 printf("before enabling GSO,"
2968 " please stop forwarding first\n");
2971 gso_ports[port_id].enable = 1;
2972 } else if (strcmp(mode, "off") == 0) {
2973 if (test_done == 0) {
2974 printf("before disabling GSO,"
2975 " please stop forwarding first\n");
2978 gso_ports[port_id].enable = 0;
2983 list_pkt_forwarding_modes(void)
2985 static char fwd_modes[128] = "";
2986 const char *separator = "|";
2987 struct fwd_engine *fwd_eng;
2990 if (strlen (fwd_modes) == 0) {
2991 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2992 strncat(fwd_modes, fwd_eng->fwd_mode_name,
2993 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2994 strncat(fwd_modes, separator,
2995 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2997 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3004 list_pkt_forwarding_retry_modes(void)
3006 static char fwd_modes[128] = "";
3007 const char *separator = "|";
3008 struct fwd_engine *fwd_eng;
3011 if (strlen(fwd_modes) == 0) {
3012 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3013 if (fwd_eng == &rx_only_engine)
3015 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3017 strlen(fwd_modes) - 1);
3018 strncat(fwd_modes, separator,
3020 strlen(fwd_modes) - 1);
3022 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3029 set_pkt_forwarding_mode(const char *fwd_mode_name)
3031 struct fwd_engine *fwd_eng;
3035 while ((fwd_eng = fwd_engines[i]) != NULL) {
3036 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
3037 printf("Set %s packet forwarding mode%s\n",
3039 retry_enabled == 0 ? "" : " with retry");
3040 cur_fwd_eng = fwd_eng;
3045 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
3049 add_rx_dump_callbacks(portid_t portid)
3051 struct rte_eth_dev_info dev_info;
3055 if (port_id_is_invalid(portid, ENABLED_WARN))
3058 ret = eth_dev_info_get_print_err(portid, &dev_info);
3062 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3063 if (!ports[portid].rx_dump_cb[queue])
3064 ports[portid].rx_dump_cb[queue] =
3065 rte_eth_add_rx_callback(portid, queue,
3066 dump_rx_pkts, NULL);
3070 add_tx_dump_callbacks(portid_t portid)
3072 struct rte_eth_dev_info dev_info;
3076 if (port_id_is_invalid(portid, ENABLED_WARN))
3079 ret = eth_dev_info_get_print_err(portid, &dev_info);
3083 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3084 if (!ports[portid].tx_dump_cb[queue])
3085 ports[portid].tx_dump_cb[queue] =
3086 rte_eth_add_tx_callback(portid, queue,
3087 dump_tx_pkts, NULL);
3091 remove_rx_dump_callbacks(portid_t portid)
3093 struct rte_eth_dev_info dev_info;
3097 if (port_id_is_invalid(portid, ENABLED_WARN))
3100 ret = eth_dev_info_get_print_err(portid, &dev_info);
3104 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3105 if (ports[portid].rx_dump_cb[queue]) {
3106 rte_eth_remove_rx_callback(portid, queue,
3107 ports[portid].rx_dump_cb[queue]);
3108 ports[portid].rx_dump_cb[queue] = NULL;
3113 remove_tx_dump_callbacks(portid_t portid)
3115 struct rte_eth_dev_info dev_info;
3119 if (port_id_is_invalid(portid, ENABLED_WARN))
3122 ret = eth_dev_info_get_print_err(portid, &dev_info);
3126 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3127 if (ports[portid].tx_dump_cb[queue]) {
3128 rte_eth_remove_tx_callback(portid, queue,
3129 ports[portid].tx_dump_cb[queue]);
3130 ports[portid].tx_dump_cb[queue] = NULL;
3135 configure_rxtx_dump_callbacks(uint16_t verbose)
3139 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3140 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
3144 RTE_ETH_FOREACH_DEV(portid)
3146 if (verbose == 1 || verbose > 2)
3147 add_rx_dump_callbacks(portid);
3149 remove_rx_dump_callbacks(portid);
3151 add_tx_dump_callbacks(portid);
3153 remove_tx_dump_callbacks(portid);
3158 set_verbose_level(uint16_t vb_level)
3160 printf("Change verbose level from %u to %u\n",
3161 (unsigned int) verbose_level, (unsigned int) vb_level);
3162 verbose_level = vb_level;
3163 configure_rxtx_dump_callbacks(verbose_level);
3167 vlan_extend_set(portid_t port_id, int on)
3171 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3173 if (port_id_is_invalid(port_id, ENABLED_WARN))
3176 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3179 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
3180 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3182 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
3183 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3186 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3188 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
3189 "diag=%d\n", port_id, on, diag);
3190 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3194 rx_vlan_strip_set(portid_t port_id, int on)
3198 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3200 if (port_id_is_invalid(port_id, ENABLED_WARN))
3203 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3206 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
3207 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3209 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
3210 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3213 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3215 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
3216 "diag=%d\n", port_id, on, diag);
3217 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3221 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
3225 if (port_id_is_invalid(port_id, ENABLED_WARN))
3228 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
3230 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
3231 "diag=%d\n", port_id, queue_id, on, diag);
3235 rx_vlan_filter_set(portid_t port_id, int on)
3239 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3241 if (port_id_is_invalid(port_id, ENABLED_WARN))
3244 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3247 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
3248 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3250 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
3251 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3254 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3256 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
3257 "diag=%d\n", port_id, on, diag);
3258 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3262 rx_vlan_qinq_strip_set(portid_t port_id, int on)
3266 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3268 if (port_id_is_invalid(port_id, ENABLED_WARN))
3271 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3274 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
3275 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3277 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
3278 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3281 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3283 printf("%s(port_pi=%d, on=%d) failed "
3284 "diag=%d\n", __func__, port_id, on, diag);
3285 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3289 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
3293 if (port_id_is_invalid(port_id, ENABLED_WARN))
3295 if (vlan_id_is_invalid(vlan_id))
3297 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
3300 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
3302 port_id, vlan_id, on, diag);
3307 rx_vlan_all_filter_set(portid_t port_id, int on)
3311 if (port_id_is_invalid(port_id, ENABLED_WARN))
3313 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
3314 if (rx_vft_set(port_id, vlan_id, on))
3320 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
3324 if (port_id_is_invalid(port_id, ENABLED_WARN))
3327 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
3331 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
3333 port_id, vlan_type, tp_id, diag);
3337 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
3339 struct rte_eth_dev_info dev_info;
3342 if (port_id_is_invalid(port_id, ENABLED_WARN))
3344 if (vlan_id_is_invalid(vlan_id))
3347 if (ports[port_id].dev_conf.txmode.offloads &
3348 DEV_TX_OFFLOAD_QINQ_INSERT) {
3349 printf("Error, as QinQ has been enabled.\n");
3353 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3357 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
3358 printf("Error: vlan insert is not supported by port %d\n",
3363 tx_vlan_reset(port_id);
3364 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
3365 ports[port_id].tx_vlan_id = vlan_id;
3369 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
3371 struct rte_eth_dev_info dev_info;
3374 if (port_id_is_invalid(port_id, ENABLED_WARN))
3376 if (vlan_id_is_invalid(vlan_id))
3378 if (vlan_id_is_invalid(vlan_id_outer))
3381 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3385 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
3386 printf("Error: qinq insert not supported by port %d\n",
3391 tx_vlan_reset(port_id);
3392 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
3393 DEV_TX_OFFLOAD_QINQ_INSERT);
3394 ports[port_id].tx_vlan_id = vlan_id;
3395 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
3399 tx_vlan_reset(portid_t port_id)
3401 if (port_id_is_invalid(port_id, ENABLED_WARN))
3403 ports[port_id].dev_conf.txmode.offloads &=
3404 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
3405 DEV_TX_OFFLOAD_QINQ_INSERT);
3406 ports[port_id].tx_vlan_id = 0;
3407 ports[port_id].tx_vlan_id_outer = 0;
3411 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
3413 if (port_id_is_invalid(port_id, ENABLED_WARN))
3416 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
3420 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
3423 uint8_t existing_mapping_found = 0;
3425 if (port_id_is_invalid(port_id, ENABLED_WARN))
3428 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
3431 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
3432 printf("map_value not in required range 0..%d\n",
3433 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
3437 if (!is_rx) { /*then tx*/
3438 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3439 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3440 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
3441 tx_queue_stats_mappings[i].stats_counter_id = map_value;
3442 existing_mapping_found = 1;
3446 if (!existing_mapping_found) { /* A new additional mapping... */
3447 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
3448 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
3449 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
3450 nb_tx_queue_stats_mappings++;
3454 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3455 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3456 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
3457 rx_queue_stats_mappings[i].stats_counter_id = map_value;
3458 existing_mapping_found = 1;
3462 if (!existing_mapping_found) { /* A new additional mapping... */
3463 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
3464 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
3465 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
3466 nb_rx_queue_stats_mappings++;
3472 set_xstats_hide_zero(uint8_t on_off)
3474 xstats_hide_zero = on_off;
3478 print_fdir_mask(struct rte_eth_fdir_masks *mask)
3480 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
3482 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3483 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
3484 " tunnel_id: 0x%08x",
3485 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
3486 rte_be_to_cpu_32(mask->tunnel_id_mask));
3487 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3488 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
3489 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
3490 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
3492 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
3493 rte_be_to_cpu_16(mask->src_port_mask),
3494 rte_be_to_cpu_16(mask->dst_port_mask));
3496 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3497 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
3498 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
3499 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
3500 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
3502 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3503 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
3504 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
3505 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
3506 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
3513 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3515 struct rte_eth_flex_payload_cfg *cfg;
3518 for (i = 0; i < flex_conf->nb_payloads; i++) {
3519 cfg = &flex_conf->flex_set[i];
3520 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3522 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3523 printf("\n L2_PAYLOAD: ");
3524 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3525 printf("\n L3_PAYLOAD: ");
3526 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3527 printf("\n L4_PAYLOAD: ");
3529 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
3530 for (j = 0; j < num; j++)
3531 printf(" %-5u", cfg->src_offset[j]);
3537 flowtype_to_str(uint16_t flow_type)
3539 struct flow_type_info {
3545 static struct flow_type_info flowtype_str_table[] = {
3546 {"raw", RTE_ETH_FLOW_RAW},
3547 {"ipv4", RTE_ETH_FLOW_IPV4},
3548 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3549 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3550 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3551 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3552 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3553 {"ipv6", RTE_ETH_FLOW_IPV6},
3554 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3555 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3556 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3557 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3558 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3559 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3560 {"port", RTE_ETH_FLOW_PORT},
3561 {"vxlan", RTE_ETH_FLOW_VXLAN},
3562 {"geneve", RTE_ETH_FLOW_GENEVE},
3563 {"nvgre", RTE_ETH_FLOW_NVGRE},
3564 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
3567 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3568 if (flowtype_str_table[i].ftype == flow_type)
3569 return flowtype_str_table[i].str;
3576 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3578 struct rte_eth_fdir_flex_mask *mask;
3582 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3583 mask = &flex_conf->flex_mask[i];
3584 p = flowtype_to_str(mask->flow_type);
3585 printf("\n %s:\t", p ? p : "unknown");
3586 for (j = 0; j < num; j++)
3587 printf(" %02x", mask->mask[j]);
3593 print_fdir_flow_type(uint32_t flow_types_mask)
3598 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3599 if (!(flow_types_mask & (1 << i)))
3601 p = flowtype_to_str(i);
3611 fdir_get_infos(portid_t port_id)
3613 struct rte_eth_fdir_stats fdir_stat;
3614 struct rte_eth_fdir_info fdir_info;
3617 static const char *fdir_stats_border = "########################";
3619 if (port_id_is_invalid(port_id, ENABLED_WARN))
3621 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3623 printf("\n FDIR is not supported on port %-2d\n",
3628 memset(&fdir_info, 0, sizeof(fdir_info));
3629 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3630 RTE_ETH_FILTER_INFO, &fdir_info);
3631 memset(&fdir_stat, 0, sizeof(fdir_stat));
3632 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3633 RTE_ETH_FILTER_STATS, &fdir_stat);
3634 printf("\n %s FDIR infos for port %-2d %s\n",
3635 fdir_stats_border, port_id, fdir_stats_border);
3637 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3638 printf(" PERFECT\n");
3639 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3640 printf(" PERFECT-MAC-VLAN\n");
3641 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3642 printf(" PERFECT-TUNNEL\n");
3643 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3644 printf(" SIGNATURE\n");
3646 printf(" DISABLE\n");
3647 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3648 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3649 printf(" SUPPORTED FLOW TYPE: ");
3650 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3652 printf(" FLEX PAYLOAD INFO:\n");
3653 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
3654 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
3655 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
3656 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3657 fdir_info.flex_payload_unit,
3658 fdir_info.max_flex_payload_segment_num,
3659 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3661 print_fdir_mask(&fdir_info.mask);
3662 if (fdir_info.flex_conf.nb_payloads > 0) {
3663 printf(" FLEX PAYLOAD SRC OFFSET:");
3664 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3666 if (fdir_info.flex_conf.nb_flexmasks > 0) {
3667 printf(" FLEX MASK CFG:");
3668 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3670 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
3671 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3672 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
3673 fdir_info.guarant_spc, fdir_info.best_spc);
3674 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
3675 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
3676 " add: %-10"PRIu64" remove: %"PRIu64"\n"
3677 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
3678 fdir_stat.collision, fdir_stat.free,
3679 fdir_stat.maxhash, fdir_stat.maxlen,
3680 fdir_stat.add, fdir_stat.remove,
3681 fdir_stat.f_add, fdir_stat.f_remove);
3682 printf(" %s############################%s\n",
3683 fdir_stats_border, fdir_stats_border);
3687 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3689 struct rte_port *port;
3690 struct rte_eth_fdir_flex_conf *flex_conf;
3693 port = &ports[port_id];
3694 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3695 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3696 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3701 if (i >= RTE_ETH_FLOW_MAX) {
3702 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3703 idx = flex_conf->nb_flexmasks;
3704 flex_conf->nb_flexmasks++;
3706 printf("The flex mask table is full. Can not set flex"
3707 " mask for flow_type(%u).", cfg->flow_type);
3711 rte_memcpy(&flex_conf->flex_mask[idx],
3713 sizeof(struct rte_eth_fdir_flex_mask));
3717 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3719 struct rte_port *port;
3720 struct rte_eth_fdir_flex_conf *flex_conf;
3723 port = &ports[port_id];
3724 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3725 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3726 if (cfg->type == flex_conf->flex_set[i].type) {
3731 if (i >= RTE_ETH_PAYLOAD_MAX) {
3732 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3733 idx = flex_conf->nb_payloads;
3734 flex_conf->nb_payloads++;
3736 printf("The flex payload table is full. Can not set"
3737 " flex payload for type(%u).", cfg->type);
3741 rte_memcpy(&flex_conf->flex_set[idx],
3743 sizeof(struct rte_eth_flex_payload_cfg));
3748 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3750 #ifdef RTE_LIBRTE_IXGBE_PMD
3754 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3756 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3760 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3761 is_rx ? "rx" : "tx", port_id, diag);
3764 printf("VF %s setting not supported for port %d\n",
3765 is_rx ? "Rx" : "Tx", port_id);
3771 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3774 struct rte_eth_link link;
3777 if (port_id_is_invalid(port_id, ENABLED_WARN))
3779 ret = eth_link_get_nowait_print_err(port_id, &link);
3782 if (rate > link.link_speed) {
3783 printf("Invalid rate value:%u bigger than link speed: %u\n",
3784 rate, link.link_speed);
3787 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3790 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3796 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3798 int diag = -ENOTSUP;
3802 RTE_SET_USED(q_msk);
3804 #ifdef RTE_LIBRTE_IXGBE_PMD
3805 if (diag == -ENOTSUP)
3806 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3809 #ifdef RTE_LIBRTE_BNXT_PMD
3810 if (diag == -ENOTSUP)
3811 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3816 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3822 * Functions to manage the set of filtered Multicast MAC addresses.
3824 * A pool of filtered multicast MAC addresses is associated with each port.
3825 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3826 * The address of the pool and the number of valid multicast MAC addresses
3827 * recorded in the pool are stored in the fields "mc_addr_pool" and
3828 * "mc_addr_nb" of the "rte_port" data structure.
3830 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3831 * to be supplied a contiguous array of multicast MAC addresses.
3832 * To comply with this constraint, the set of multicast addresses recorded
3833 * into the pool are systematically compacted at the beginning of the pool.
3834 * Hence, when a multicast address is removed from the pool, all following
3835 * addresses, if any, are copied back to keep the set contiguous.
3837 #define MCAST_POOL_INC 32
3840 mcast_addr_pool_extend(struct rte_port *port)
3842 struct rte_ether_addr *mc_pool;
3843 size_t mc_pool_size;
3846 * If a free entry is available at the end of the pool, just
3847 * increment the number of recorded multicast addresses.
3849 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3855 * [re]allocate a pool with MCAST_POOL_INC more entries.
3856 * The previous test guarantees that port->mc_addr_nb is a multiple
3857 * of MCAST_POOL_INC.
3859 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
3861 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
3863 if (mc_pool == NULL) {
3864 printf("allocation of pool of %u multicast addresses failed\n",
3865 port->mc_addr_nb + MCAST_POOL_INC);
3869 port->mc_addr_pool = mc_pool;
3876 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
3878 if (mcast_addr_pool_extend(port) != 0)
3880 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
3884 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3887 if (addr_idx == port->mc_addr_nb) {
3888 /* No need to recompact the set of multicast addressses. */
3889 if (port->mc_addr_nb == 0) {
3890 /* free the pool of multicast addresses. */
3891 free(port->mc_addr_pool);
3892 port->mc_addr_pool = NULL;
3896 memmove(&port->mc_addr_pool[addr_idx],
3897 &port->mc_addr_pool[addr_idx + 1],
3898 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
3902 eth_port_multicast_addr_list_set(portid_t port_id)
3904 struct rte_port *port;
3907 port = &ports[port_id];
3908 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3911 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3912 port_id, port->mc_addr_nb, diag);
3918 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
3920 struct rte_port *port;
3923 if (port_id_is_invalid(port_id, ENABLED_WARN))
3926 port = &ports[port_id];
3929 * Check that the added multicast MAC address is not already recorded
3930 * in the pool of multicast addresses.
3932 for (i = 0; i < port->mc_addr_nb; i++) {
3933 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3934 printf("multicast address already filtered by port\n");
3939 mcast_addr_pool_append(port, mc_addr);
3940 if (eth_port_multicast_addr_list_set(port_id) < 0)
3941 /* Rollback on failure, remove the address from the pool */
3942 mcast_addr_pool_remove(port, i);
3946 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
3948 struct rte_port *port;
3951 if (port_id_is_invalid(port_id, ENABLED_WARN))
3954 port = &ports[port_id];
3957 * Search the pool of multicast MAC addresses for the removed address.
3959 for (i = 0; i < port->mc_addr_nb; i++) {
3960 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3963 if (i == port->mc_addr_nb) {
3964 printf("multicast address not filtered by port %d\n", port_id);
3968 mcast_addr_pool_remove(port, i);
3969 if (eth_port_multicast_addr_list_set(port_id) < 0)
3970 /* Rollback on failure, add the address back into the pool */
3971 mcast_addr_pool_append(port, mc_addr);
3975 port_dcb_info_display(portid_t port_id)
3977 struct rte_eth_dcb_info dcb_info;
3980 static const char *border = "================";
3982 if (port_id_is_invalid(port_id, ENABLED_WARN))
3985 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3987 printf("\n Failed to get dcb infos on port %-2d\n",
3991 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
3992 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
3994 for (i = 0; i < dcb_info.nb_tcs; i++)
3996 printf("\n Priority : ");
3997 for (i = 0; i < dcb_info.nb_tcs; i++)
3998 printf("\t%4d", dcb_info.prio_tc[i]);
3999 printf("\n BW percent :");
4000 for (i = 0; i < dcb_info.nb_tcs; i++)
4001 printf("\t%4d%%", dcb_info.tc_bws[i]);
4002 printf("\n RXQ base : ");
4003 for (i = 0; i < dcb_info.nb_tcs; i++)
4004 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
4005 printf("\n RXQ number :");
4006 for (i = 0; i < dcb_info.nb_tcs; i++)
4007 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
4008 printf("\n TXQ base : ");
4009 for (i = 0; i < dcb_info.nb_tcs; i++)
4010 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
4011 printf("\n TXQ number :");
4012 for (i = 0; i < dcb_info.nb_tcs; i++)
4013 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
4018 open_file(const char *file_path, uint32_t *size)
4020 int fd = open(file_path, O_RDONLY);
4022 uint8_t *buf = NULL;
4030 printf("%s: Failed to open %s\n", __func__, file_path);
4034 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
4036 printf("%s: File operations failed\n", __func__);
4040 pkg_size = st_buf.st_size;
4043 printf("%s: File operations failed\n", __func__);
4047 buf = (uint8_t *)malloc(pkg_size);
4050 printf("%s: Failed to malloc memory\n", __func__);
4054 ret = read(fd, buf, pkg_size);
4057 printf("%s: File read operation failed\n", __func__);
4071 save_file(const char *file_path, uint8_t *buf, uint32_t size)
4073 FILE *fh = fopen(file_path, "wb");
4076 printf("%s: Failed to open %s\n", __func__, file_path);
4080 if (fwrite(buf, 1, size, fh) != size) {
4082 printf("%s: File write operation failed\n", __func__);
4092 close_file(uint8_t *buf)
4103 port_queue_region_info_display(portid_t port_id, void *buf)
4105 #ifdef RTE_LIBRTE_I40E_PMD
4107 struct rte_pmd_i40e_queue_regions *info =
4108 (struct rte_pmd_i40e_queue_regions *)buf;
4109 static const char *queue_region_info_stats_border = "-------";
4111 if (!info->queue_region_number)
4112 printf("there is no region has been set before");
4114 printf("\n %s All queue region info for port=%2d %s",
4115 queue_region_info_stats_border, port_id,
4116 queue_region_info_stats_border);
4117 printf("\n queue_region_number: %-14u \n",
4118 info->queue_region_number);
4120 for (i = 0; i < info->queue_region_number; i++) {
4121 printf("\n region_id: %-14u queue_number: %-14u "
4122 "queue_start_index: %-14u \n",
4123 info->region[i].region_id,
4124 info->region[i].queue_num,
4125 info->region[i].queue_start_index);
4127 printf(" user_priority_num is %-14u :",
4128 info->region[i].user_priority_num);
4129 for (j = 0; j < info->region[i].user_priority_num; j++)
4130 printf(" %-14u ", info->region[i].user_priority[j]);
4132 printf("\n flowtype_num is %-14u :",
4133 info->region[i].flowtype_num);
4134 for (j = 0; j < info->region[i].flowtype_num; j++)
4135 printf(" %-14u ", info->region[i].hw_flowtype[j]);
4138 RTE_SET_USED(port_id);
4146 show_macs(portid_t port_id)
4148 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4149 struct rte_eth_dev_info dev_info;
4150 struct rte_ether_addr *addr;
4151 uint32_t i, num_macs = 0;
4152 struct rte_eth_dev *dev;
4154 dev = &rte_eth_devices[port_id];
4156 rte_eth_dev_info_get(port_id, &dev_info);
4158 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4159 addr = &dev->data->mac_addrs[i];
4161 /* skip zero address */
4162 if (rte_is_zero_ether_addr(addr))
4168 printf("Number of MAC address added: %d\n", num_macs);
4170 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4171 addr = &dev->data->mac_addrs[i];
4173 /* skip zero address */
4174 if (rte_is_zero_ether_addr(addr))
4177 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4178 printf(" %s\n", buf);
4183 show_mcast_macs(portid_t port_id)
4185 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4186 struct rte_ether_addr *addr;
4187 struct rte_port *port;
4190 port = &ports[port_id];
4192 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
4194 for (i = 0; i < port->mc_addr_nb; i++) {
4195 addr = &port->mc_addr_pool[i];
4197 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4198 printf(" %s\n", buf);