1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
55 #define ETHDEV_FWVERS_LEN 32
57 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
58 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
60 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
63 #define NS_PER_SEC 1E9
65 static char *flowtype_to_str(uint16_t flow_type);
68 enum tx_pkt_split split;
72 .split = TX_PKT_SPLIT_OFF,
76 .split = TX_PKT_SPLIT_ON,
80 .split = TX_PKT_SPLIT_RND,
85 const struct rss_type_info rss_type_table[] = {
86 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
87 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
88 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
91 { "eth", ETH_RSS_ETH },
92 { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
93 { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
94 { "vlan", ETH_RSS_VLAN },
95 { "s-vlan", ETH_RSS_S_VLAN },
96 { "c-vlan", ETH_RSS_C_VLAN },
97 { "ipv4", ETH_RSS_IPV4 },
98 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
99 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
100 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
101 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
102 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
103 { "ipv6", ETH_RSS_IPV6 },
104 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
105 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
106 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
107 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
108 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
109 { "l2-payload", ETH_RSS_L2_PAYLOAD },
110 { "ipv6-ex", ETH_RSS_IPV6_EX },
111 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
112 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
113 { "port", ETH_RSS_PORT },
114 { "vxlan", ETH_RSS_VXLAN },
115 { "geneve", ETH_RSS_GENEVE },
116 { "nvgre", ETH_RSS_NVGRE },
117 { "ip", ETH_RSS_IP },
118 { "udp", ETH_RSS_UDP },
119 { "tcp", ETH_RSS_TCP },
120 { "sctp", ETH_RSS_SCTP },
121 { "tunnel", ETH_RSS_TUNNEL },
122 { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
123 { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
124 { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
125 { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
126 { "esp", ETH_RSS_ESP },
127 { "ah", ETH_RSS_AH },
128 { "l2tpv3", ETH_RSS_L2TPV3 },
129 { "pfcp", ETH_RSS_PFCP },
130 { "pppoe", ETH_RSS_PPPOE },
131 { "gtpu", ETH_RSS_GTPU },
136 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
138 char buf[RTE_ETHER_ADDR_FMT_SIZE];
139 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
140 printf("%s%s", name, buf);
144 nic_stats_display(portid_t port_id)
146 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
147 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
148 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
149 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
150 static uint64_t prev_ns[RTE_MAX_ETHPORTS];
151 struct timespec cur_time;
152 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
154 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
155 struct rte_eth_stats stats;
156 struct rte_port *port = &ports[port_id];
159 static const char *nic_stats_border = "########################";
161 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
165 rte_eth_stats_get(port_id, &stats);
166 printf("\n %s NIC statistics for port %-2d %s\n",
167 nic_stats_border, port_id, nic_stats_border);
169 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
170 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
172 stats.ipackets, stats.imissed, stats.ibytes);
173 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
174 printf(" RX-nombuf: %-10"PRIu64"\n",
176 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
178 stats.opackets, stats.oerrors, stats.obytes);
181 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
182 " RX-bytes: %10"PRIu64"\n",
183 stats.ipackets, stats.ierrors, stats.ibytes);
184 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
185 printf(" RX-nombuf: %10"PRIu64"\n",
187 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
188 " TX-bytes: %10"PRIu64"\n",
189 stats.opackets, stats.oerrors, stats.obytes);
192 if (port->rx_queue_stats_mapping_enabled) {
194 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
195 printf(" Stats reg %2d RX-packets: %10"PRIu64
196 " RX-errors: %10"PRIu64
197 " RX-bytes: %10"PRIu64"\n",
198 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
201 if (port->tx_queue_stats_mapping_enabled) {
203 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
204 printf(" Stats reg %2d TX-packets: %10"PRIu64
205 " TX-bytes: %10"PRIu64"\n",
206 i, stats.q_opackets[i], stats.q_obytes[i]);
211 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
214 ns = cur_time.tv_sec * NS_PER_SEC;
215 ns += cur_time.tv_nsec;
217 if (prev_ns[port_id] != 0)
218 diff_ns = ns - prev_ns[port_id];
219 prev_ns[port_id] = ns;
222 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
223 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
224 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
225 (stats.opackets - prev_pkts_tx[port_id]) : 0;
226 prev_pkts_rx[port_id] = stats.ipackets;
227 prev_pkts_tx[port_id] = stats.opackets;
228 mpps_rx = diff_ns > 0 ?
229 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
230 mpps_tx = diff_ns > 0 ?
231 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
233 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
234 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
235 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
236 (stats.obytes - prev_bytes_tx[port_id]) : 0;
237 prev_bytes_rx[port_id] = stats.ibytes;
238 prev_bytes_tx[port_id] = stats.obytes;
239 mbps_rx = diff_ns > 0 ?
240 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
241 mbps_tx = diff_ns > 0 ?
242 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
244 printf("\n Throughput (since last show)\n");
245 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
246 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
247 mpps_tx, mbps_tx * 8);
249 printf(" %s############################%s\n",
250 nic_stats_border, nic_stats_border);
254 nic_stats_clear(portid_t port_id)
258 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
263 ret = rte_eth_stats_reset(port_id);
265 printf("%s: Error: failed to reset stats (port %u): %s",
266 __func__, port_id, strerror(-ret));
270 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
274 printf("%s: Error: failed to get stats (port %u): %s",
275 __func__, port_id, strerror(ret));
278 printf("\n NIC statistics for port %d cleared\n", port_id);
282 nic_xstats_display(portid_t port_id)
284 struct rte_eth_xstat *xstats;
285 int cnt_xstats, idx_xstat;
286 struct rte_eth_xstat_name *xstats_names;
288 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
292 printf("###### NIC extended statistics for port %-2d\n", port_id);
293 if (!rte_eth_dev_is_valid_port(port_id)) {
294 printf("Error: Invalid port number %i\n", port_id);
299 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
300 if (cnt_xstats < 0) {
301 printf("Error: Cannot get count of xstats\n");
305 /* Get id-name lookup table */
306 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
307 if (xstats_names == NULL) {
308 printf("Cannot allocate memory for xstats lookup\n");
311 if (cnt_xstats != rte_eth_xstats_get_names(
312 port_id, xstats_names, cnt_xstats)) {
313 printf("Error: Cannot get xstats lookup\n");
318 /* Get stats themselves */
319 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
320 if (xstats == NULL) {
321 printf("Cannot allocate memory for xstats\n");
325 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
326 printf("Error: Unable to get xstats\n");
333 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
334 if (xstats_hide_zero && !xstats[idx_xstat].value)
336 printf("%s: %"PRIu64"\n",
337 xstats_names[idx_xstat].name,
338 xstats[idx_xstat].value);
345 nic_xstats_clear(portid_t port_id)
349 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
354 ret = rte_eth_xstats_reset(port_id);
356 printf("%s: Error: failed to reset xstats (port %u): %s",
357 __func__, port_id, strerror(-ret));
361 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
365 printf("%s: Error: failed to get stats (port %u): %s",
366 __func__, port_id, strerror(ret));
372 nic_stats_mapping_display(portid_t port_id)
374 struct rte_port *port = &ports[port_id];
377 static const char *nic_stats_mapping_border = "########################";
379 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
384 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
385 printf("Port id %d - either does not support queue statistic mapping or"
386 " no queue statistic mapping set\n", port_id);
390 printf("\n %s NIC statistics mapping for port %-2d %s\n",
391 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
393 if (port->rx_queue_stats_mapping_enabled) {
394 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
395 if (rx_queue_stats_mappings[i].port_id == port_id) {
396 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
397 rx_queue_stats_mappings[i].queue_id,
398 rx_queue_stats_mappings[i].stats_counter_id);
405 if (port->tx_queue_stats_mapping_enabled) {
406 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
407 if (tx_queue_stats_mappings[i].port_id == port_id) {
408 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
409 tx_queue_stats_mappings[i].queue_id,
410 tx_queue_stats_mappings[i].stats_counter_id);
415 printf(" %s####################################%s\n",
416 nic_stats_mapping_border, nic_stats_mapping_border);
420 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
422 struct rte_eth_burst_mode mode;
423 struct rte_eth_rxq_info qinfo;
425 static const char *info_border = "*********************";
427 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
429 printf("Failed to retrieve information for port: %u, "
430 "RX queue: %hu\nerror desc: %s(%d)\n",
431 port_id, queue_id, strerror(-rc), rc);
435 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
436 info_border, port_id, queue_id, info_border);
438 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
439 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
440 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
441 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
442 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
443 printf("\nRX drop packets: %s",
444 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
445 printf("\nRX deferred start: %s",
446 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
447 printf("\nRX scattered packets: %s",
448 (qinfo.scattered_rx != 0) ? "on" : "off");
449 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
451 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
452 printf("\nBurst mode: %s%s",
454 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
455 " (per queue)" : "");
461 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
463 struct rte_eth_burst_mode mode;
464 struct rte_eth_txq_info qinfo;
466 static const char *info_border = "*********************";
468 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
470 printf("Failed to retrieve information for port: %u, "
471 "TX queue: %hu\nerror desc: %s(%d)\n",
472 port_id, queue_id, strerror(-rc), rc);
476 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
477 info_border, port_id, queue_id, info_border);
479 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
480 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
481 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
482 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
483 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
484 printf("\nTX deferred start: %s",
485 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
486 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
488 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
489 printf("\nBurst mode: %s%s",
491 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
492 " (per queue)" : "");
497 static int bus_match_all(const struct rte_bus *bus, const void *data)
505 device_infos_display(const char *identifier)
507 static const char *info_border = "*********************";
508 struct rte_bus *start = NULL, *next;
509 struct rte_dev_iterator dev_iter;
510 char name[RTE_ETH_NAME_MAX_LEN];
511 struct rte_ether_addr mac_addr;
512 struct rte_device *dev;
513 struct rte_devargs da;
517 memset(&da, 0, sizeof(da));
521 if (rte_devargs_parsef(&da, "%s", identifier)) {
522 printf("cannot parse identifier\n");
529 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
532 if (identifier && da.bus != next)
535 /* Skip buses that don't have iterate method */
536 if (!next->dev_iterate)
539 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
540 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
544 /* Check for matching device if identifier is present */
546 strncmp(da.name, dev->name, strlen(dev->name)))
548 printf("\n%s Infos for device %s %s\n",
549 info_border, dev->name, info_border);
550 printf("Bus name: %s", dev->bus->name);
551 printf("\nDriver name: %s", dev->driver->name);
552 printf("\nDevargs: %s",
553 dev->devargs ? dev->devargs->args : "");
554 printf("\nConnect to socket: %d", dev->numa_node);
557 /* List ports with matching device name */
558 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
559 printf("\n\tPort id: %-2d", port_id);
560 if (eth_macaddr_get_print_err(port_id,
562 print_ethaddr("\n\tMAC address: ",
564 rte_eth_dev_get_name_by_port(port_id, name);
565 printf("\n\tDevice name: %s", name);
573 port_infos_display(portid_t port_id)
575 struct rte_port *port;
576 struct rte_ether_addr mac_addr;
577 struct rte_eth_link link;
578 struct rte_eth_dev_info dev_info;
580 struct rte_mempool * mp;
581 static const char *info_border = "*********************";
583 char name[RTE_ETH_NAME_MAX_LEN];
585 char fw_version[ETHDEV_FWVERS_LEN];
587 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
591 port = &ports[port_id];
592 ret = eth_link_get_nowait_print_err(port_id, &link);
596 ret = eth_dev_info_get_print_err(port_id, &dev_info);
600 printf("\n%s Infos for port %-2d %s\n",
601 info_border, port_id, info_border);
602 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
603 print_ethaddr("MAC address: ", &mac_addr);
604 rte_eth_dev_get_name_by_port(port_id, name);
605 printf("\nDevice name: %s", name);
606 printf("\nDriver name: %s", dev_info.driver_name);
608 if (rte_eth_dev_fw_version_get(port_id, fw_version,
609 ETHDEV_FWVERS_LEN) == 0)
610 printf("\nFirmware-version: %s", fw_version);
612 printf("\nFirmware-version: %s", "not available");
614 if (dev_info.device->devargs && dev_info.device->devargs->args)
615 printf("\nDevargs: %s", dev_info.device->devargs->args);
616 printf("\nConnect to socket: %u", port->socket_id);
618 if (port_numa[port_id] != NUMA_NO_CONFIG) {
619 mp = mbuf_pool_find(port_numa[port_id]);
621 printf("\nmemory allocation on the socket: %d",
624 printf("\nmemory allocation on the socket: %u",port->socket_id);
626 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
627 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
628 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
629 ("full-duplex") : ("half-duplex"));
631 if (!rte_eth_dev_get_mtu(port_id, &mtu))
632 printf("MTU: %u\n", mtu);
634 printf("Promiscuous mode: %s\n",
635 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
636 printf("Allmulticast mode: %s\n",
637 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
638 printf("Maximum number of MAC addresses: %u\n",
639 (unsigned int)(port->dev_info.max_mac_addrs));
640 printf("Maximum number of MAC addresses of hash filtering: %u\n",
641 (unsigned int)(port->dev_info.max_hash_mac_addrs));
643 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
644 if (vlan_offload >= 0){
645 printf("VLAN offload: \n");
646 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
647 printf(" strip on, ");
649 printf(" strip off, ");
651 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
652 printf("filter on, ");
654 printf("filter off, ");
656 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
657 printf("extend on, ");
659 printf("extend off, ");
661 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
662 printf("qinq strip on\n");
664 printf("qinq strip off\n");
667 if (dev_info.hash_key_size > 0)
668 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
669 if (dev_info.reta_size > 0)
670 printf("Redirection table size: %u\n", dev_info.reta_size);
671 if (!dev_info.flow_type_rss_offloads)
672 printf("No RSS offload flow type is supported.\n");
677 printf("Supported RSS offload flow types:\n");
678 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
679 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
680 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
682 p = flowtype_to_str(i);
686 printf(" user defined %d\n", i);
690 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
691 printf("Maximum configurable length of RX packet: %u\n",
692 dev_info.max_rx_pktlen);
693 printf("Maximum configurable size of LRO aggregated packet: %u\n",
694 dev_info.max_lro_pkt_size);
695 if (dev_info.max_vfs)
696 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
697 if (dev_info.max_vmdq_pools)
698 printf("Maximum number of VMDq pools: %u\n",
699 dev_info.max_vmdq_pools);
701 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
702 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
703 printf("Max possible number of RXDs per queue: %hu\n",
704 dev_info.rx_desc_lim.nb_max);
705 printf("Min possible number of RXDs per queue: %hu\n",
706 dev_info.rx_desc_lim.nb_min);
707 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
709 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
710 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
711 printf("Max possible number of TXDs per queue: %hu\n",
712 dev_info.tx_desc_lim.nb_max);
713 printf("Min possible number of TXDs per queue: %hu\n",
714 dev_info.tx_desc_lim.nb_min);
715 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
716 printf("Max segment number per packet: %hu\n",
717 dev_info.tx_desc_lim.nb_seg_max);
718 printf("Max segment number per MTU/TSO: %hu\n",
719 dev_info.tx_desc_lim.nb_mtu_seg_max);
721 /* Show switch info only if valid switch domain and port id is set */
722 if (dev_info.switch_info.domain_id !=
723 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
724 if (dev_info.switch_info.name)
725 printf("Switch name: %s\n", dev_info.switch_info.name);
727 printf("Switch domain Id: %u\n",
728 dev_info.switch_info.domain_id);
729 printf("Switch Port Id: %u\n",
730 dev_info.switch_info.port_id);
735 port_summary_header_display(void)
737 uint16_t port_number;
739 port_number = rte_eth_dev_count_avail();
740 printf("Number of available ports: %i\n", port_number);
741 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
742 "Driver", "Status", "Link");
746 port_summary_display(portid_t port_id)
748 struct rte_ether_addr mac_addr;
749 struct rte_eth_link link;
750 struct rte_eth_dev_info dev_info;
751 char name[RTE_ETH_NAME_MAX_LEN];
754 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
759 ret = eth_link_get_nowait_print_err(port_id, &link);
763 ret = eth_dev_info_get_print_err(port_id, &dev_info);
767 rte_eth_dev_get_name_by_port(port_id, name);
768 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
772 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n",
773 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
774 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
775 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
776 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
777 (unsigned int) link.link_speed);
781 port_offload_cap_display(portid_t port_id)
783 struct rte_eth_dev_info dev_info;
784 static const char *info_border = "************";
787 if (port_id_is_invalid(port_id, ENABLED_WARN))
790 ret = eth_dev_info_get_print_err(port_id, &dev_info);
794 printf("\n%s Port %d supported offload features: %s\n",
795 info_border, port_id, info_border);
797 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
798 printf("VLAN stripped: ");
799 if (ports[port_id].dev_conf.rxmode.offloads &
800 DEV_RX_OFFLOAD_VLAN_STRIP)
806 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
807 printf("Double VLANs stripped: ");
808 if (ports[port_id].dev_conf.rxmode.offloads &
809 DEV_RX_OFFLOAD_QINQ_STRIP)
815 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
816 printf("RX IPv4 checksum: ");
817 if (ports[port_id].dev_conf.rxmode.offloads &
818 DEV_RX_OFFLOAD_IPV4_CKSUM)
824 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
825 printf("RX UDP checksum: ");
826 if (ports[port_id].dev_conf.rxmode.offloads &
827 DEV_RX_OFFLOAD_UDP_CKSUM)
833 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
834 printf("RX TCP checksum: ");
835 if (ports[port_id].dev_conf.rxmode.offloads &
836 DEV_RX_OFFLOAD_TCP_CKSUM)
842 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
843 printf("RX SCTP checksum: ");
844 if (ports[port_id].dev_conf.rxmode.offloads &
845 DEV_RX_OFFLOAD_SCTP_CKSUM)
851 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
852 printf("RX Outer IPv4 checksum: ");
853 if (ports[port_id].dev_conf.rxmode.offloads &
854 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
860 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
861 printf("RX Outer UDP checksum: ");
862 if (ports[port_id].dev_conf.rxmode.offloads &
863 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
869 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
870 printf("Large receive offload: ");
871 if (ports[port_id].dev_conf.rxmode.offloads &
872 DEV_RX_OFFLOAD_TCP_LRO)
878 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
879 printf("HW timestamp: ");
880 if (ports[port_id].dev_conf.rxmode.offloads &
881 DEV_RX_OFFLOAD_TIMESTAMP)
887 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
888 printf("Rx Keep CRC: ");
889 if (ports[port_id].dev_conf.rxmode.offloads &
890 DEV_RX_OFFLOAD_KEEP_CRC)
896 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
897 printf("RX offload security: ");
898 if (ports[port_id].dev_conf.rxmode.offloads &
899 DEV_RX_OFFLOAD_SECURITY)
905 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
906 printf("VLAN insert: ");
907 if (ports[port_id].dev_conf.txmode.offloads &
908 DEV_TX_OFFLOAD_VLAN_INSERT)
914 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
915 printf("Double VLANs insert: ");
916 if (ports[port_id].dev_conf.txmode.offloads &
917 DEV_TX_OFFLOAD_QINQ_INSERT)
923 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
924 printf("TX IPv4 checksum: ");
925 if (ports[port_id].dev_conf.txmode.offloads &
926 DEV_TX_OFFLOAD_IPV4_CKSUM)
932 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
933 printf("TX UDP checksum: ");
934 if (ports[port_id].dev_conf.txmode.offloads &
935 DEV_TX_OFFLOAD_UDP_CKSUM)
941 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
942 printf("TX TCP checksum: ");
943 if (ports[port_id].dev_conf.txmode.offloads &
944 DEV_TX_OFFLOAD_TCP_CKSUM)
950 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
951 printf("TX SCTP checksum: ");
952 if (ports[port_id].dev_conf.txmode.offloads &
953 DEV_TX_OFFLOAD_SCTP_CKSUM)
959 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
960 printf("TX Outer IPv4 checksum: ");
961 if (ports[port_id].dev_conf.txmode.offloads &
962 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
968 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
969 printf("TX TCP segmentation: ");
970 if (ports[port_id].dev_conf.txmode.offloads &
971 DEV_TX_OFFLOAD_TCP_TSO)
977 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
978 printf("TX UDP segmentation: ");
979 if (ports[port_id].dev_conf.txmode.offloads &
980 DEV_TX_OFFLOAD_UDP_TSO)
986 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
987 printf("TSO for VXLAN tunnel packet: ");
988 if (ports[port_id].dev_conf.txmode.offloads &
989 DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
995 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
996 printf("TSO for GRE tunnel packet: ");
997 if (ports[port_id].dev_conf.txmode.offloads &
998 DEV_TX_OFFLOAD_GRE_TNL_TSO)
1004 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
1005 printf("TSO for IPIP tunnel packet: ");
1006 if (ports[port_id].dev_conf.txmode.offloads &
1007 DEV_TX_OFFLOAD_IPIP_TNL_TSO)
1013 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
1014 printf("TSO for GENEVE tunnel packet: ");
1015 if (ports[port_id].dev_conf.txmode.offloads &
1016 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
1022 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
1023 printf("IP tunnel TSO: ");
1024 if (ports[port_id].dev_conf.txmode.offloads &
1025 DEV_TX_OFFLOAD_IP_TNL_TSO)
1031 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
1032 printf("UDP tunnel TSO: ");
1033 if (ports[port_id].dev_conf.txmode.offloads &
1034 DEV_TX_OFFLOAD_UDP_TNL_TSO)
1040 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
1041 printf("TX Outer UDP checksum: ");
1042 if (ports[port_id].dev_conf.txmode.offloads &
1043 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
1052 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1056 if (port_id == (portid_t)RTE_PORT_ALL)
1059 RTE_ETH_FOREACH_DEV(pid)
1063 if (warning == ENABLED_WARN)
1064 printf("Invalid port %d\n", port_id);
1069 void print_valid_ports(void)
1073 printf("The valid ports array is [");
1074 RTE_ETH_FOREACH_DEV(pid) {
1081 vlan_id_is_invalid(uint16_t vlan_id)
1085 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1090 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1092 const struct rte_pci_device *pci_dev;
1093 const struct rte_bus *bus;
1096 if (reg_off & 0x3) {
1097 printf("Port register offset 0x%X not aligned on a 4-byte "
1103 if (!ports[port_id].dev_info.device) {
1104 printf("Invalid device\n");
1108 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1109 if (bus && !strcmp(bus->name, "pci")) {
1110 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1112 printf("Not a PCI device\n");
1116 pci_len = pci_dev->mem_resource[0].len;
1117 if (reg_off >= pci_len) {
1118 printf("Port %d: register offset %u (0x%X) out of port PCI "
1119 "resource (length=%"PRIu64")\n",
1120 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
1127 reg_bit_pos_is_invalid(uint8_t bit_pos)
1131 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
1135 #define display_port_and_reg_off(port_id, reg_off) \
1136 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1139 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1141 display_port_and_reg_off(port_id, (unsigned)reg_off);
1142 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1146 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1151 if (port_id_is_invalid(port_id, ENABLED_WARN))
1153 if (port_reg_off_is_invalid(port_id, reg_off))
1155 if (reg_bit_pos_is_invalid(bit_x))
1157 reg_v = port_id_pci_reg_read(port_id, reg_off);
1158 display_port_and_reg_off(port_id, (unsigned)reg_off);
1159 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1163 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1164 uint8_t bit1_pos, uint8_t bit2_pos)
1170 if (port_id_is_invalid(port_id, ENABLED_WARN))
1172 if (port_reg_off_is_invalid(port_id, reg_off))
1174 if (reg_bit_pos_is_invalid(bit1_pos))
1176 if (reg_bit_pos_is_invalid(bit2_pos))
1178 if (bit1_pos > bit2_pos)
1179 l_bit = bit2_pos, h_bit = bit1_pos;
1181 l_bit = bit1_pos, h_bit = bit2_pos;
1183 reg_v = port_id_pci_reg_read(port_id, reg_off);
1186 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1187 display_port_and_reg_off(port_id, (unsigned)reg_off);
1188 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1189 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1193 port_reg_display(portid_t port_id, uint32_t reg_off)
1197 if (port_id_is_invalid(port_id, ENABLED_WARN))
1199 if (port_reg_off_is_invalid(port_id, reg_off))
1201 reg_v = port_id_pci_reg_read(port_id, reg_off);
1202 display_port_reg_value(port_id, reg_off, reg_v);
1206 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1211 if (port_id_is_invalid(port_id, ENABLED_WARN))
1213 if (port_reg_off_is_invalid(port_id, reg_off))
1215 if (reg_bit_pos_is_invalid(bit_pos))
1218 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1221 reg_v = port_id_pci_reg_read(port_id, reg_off);
1223 reg_v &= ~(1 << bit_pos);
1225 reg_v |= (1 << bit_pos);
1226 port_id_pci_reg_write(port_id, reg_off, reg_v);
1227 display_port_reg_value(port_id, reg_off, reg_v);
1231 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1232 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1239 if (port_id_is_invalid(port_id, ENABLED_WARN))
1241 if (port_reg_off_is_invalid(port_id, reg_off))
1243 if (reg_bit_pos_is_invalid(bit1_pos))
1245 if (reg_bit_pos_is_invalid(bit2_pos))
1247 if (bit1_pos > bit2_pos)
1248 l_bit = bit2_pos, h_bit = bit1_pos;
1250 l_bit = bit1_pos, h_bit = bit2_pos;
1252 if ((h_bit - l_bit) < 31)
1253 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1257 if (value > max_v) {
1258 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1259 (unsigned)value, (unsigned)value,
1260 (unsigned)max_v, (unsigned)max_v);
1263 reg_v = port_id_pci_reg_read(port_id, reg_off);
1264 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1265 reg_v |= (value << l_bit); /* Set changed bits */
1266 port_id_pci_reg_write(port_id, reg_off, reg_v);
1267 display_port_reg_value(port_id, reg_off, reg_v);
1271 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1273 if (port_id_is_invalid(port_id, ENABLED_WARN))
1275 if (port_reg_off_is_invalid(port_id, reg_off))
1277 port_id_pci_reg_write(port_id, reg_off, reg_v);
1278 display_port_reg_value(port_id, reg_off, reg_v);
1282 port_mtu_set(portid_t port_id, uint16_t mtu)
1285 struct rte_port *rte_port = &ports[port_id];
1286 struct rte_eth_dev_info dev_info;
1287 uint16_t eth_overhead;
1290 if (port_id_is_invalid(port_id, ENABLED_WARN))
1293 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1297 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1298 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1299 mtu, dev_info.min_mtu, dev_info.max_mtu);
1302 diag = rte_eth_dev_set_mtu(port_id, mtu);
1304 printf("Set MTU failed. diag=%d\n", diag);
1305 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1307 * Ether overhead in driver is equal to the difference of
1308 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
1309 * device supports jumbo frame.
1311 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
1312 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) {
1313 rte_port->dev_conf.rxmode.offloads |=
1314 DEV_RX_OFFLOAD_JUMBO_FRAME;
1315 rte_port->dev_conf.rxmode.max_rx_pkt_len =
1318 rte_port->dev_conf.rxmode.offloads &=
1319 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1323 /* Generic flow management functions. */
1325 /** Generate a port_flow entry from attributes/pattern/actions. */
1326 static struct port_flow *
1327 port_flow_new(const struct rte_flow_attr *attr,
1328 const struct rte_flow_item *pattern,
1329 const struct rte_flow_action *actions,
1330 struct rte_flow_error *error)
1332 const struct rte_flow_conv_rule rule = {
1334 .pattern_ro = pattern,
1335 .actions_ro = actions,
1337 struct port_flow *pf;
1340 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1343 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1346 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1350 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1357 /** Print a message out of a flow error. */
1359 port_flow_complain(struct rte_flow_error *error)
1361 static const char *const errstrlist[] = {
1362 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1363 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1364 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1365 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1366 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1367 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1368 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1369 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1370 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1371 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1372 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1373 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1374 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1375 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1376 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1377 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1378 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1382 int err = rte_errno;
1384 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1385 !errstrlist[error->type])
1386 errstr = "unknown type";
1388 errstr = errstrlist[error->type];
1389 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__,
1390 error->type, errstr,
1391 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1392 error->cause), buf) : "",
1393 error->message ? error->message : "(no stated reason)",
1399 rss_config_display(struct rte_flow_action_rss *rss_conf)
1403 if (rss_conf == NULL) {
1404 printf("Invalid rule\n");
1410 if (rss_conf->queue_num == 0)
1412 for (i = 0; i < rss_conf->queue_num; i++)
1413 printf("%d\n", rss_conf->queue[i]);
1415 printf(" function: ");
1416 switch (rss_conf->func) {
1417 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1418 printf("default\n");
1420 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1421 printf("toeplitz\n");
1423 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1424 printf("simple_xor\n");
1426 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1427 printf("symmetric_toeplitz\n");
1430 printf("Unknown function\n");
1434 printf(" types:\n");
1435 if (rss_conf->types == 0) {
1439 for (i = 0; rss_type_table[i].str; i++) {
1440 if ((rss_conf->types &
1441 rss_type_table[i].rss_type) ==
1442 rss_type_table[i].rss_type &&
1443 rss_type_table[i].rss_type != 0)
1444 printf(" %s\n", rss_type_table[i].str);
1448 /** Validate flow rule. */
1450 port_flow_validate(portid_t port_id,
1451 const struct rte_flow_attr *attr,
1452 const struct rte_flow_item *pattern,
1453 const struct rte_flow_action *actions)
1455 struct rte_flow_error error;
1457 /* Poisoning to make sure PMDs update it in case of error. */
1458 memset(&error, 0x11, sizeof(error));
1459 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1460 return port_flow_complain(&error);
1461 printf("Flow rule validated\n");
1465 /** Update age action context by port_flow pointer. */
1467 update_age_action_context(const struct rte_flow_action *actions,
1468 struct port_flow *pf)
1470 struct rte_flow_action_age *age = NULL;
1472 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1473 switch (actions->type) {
1474 case RTE_FLOW_ACTION_TYPE_AGE:
1475 age = (struct rte_flow_action_age *)
1476 (uintptr_t)actions->conf;
1485 /** Create flow rule. */
1487 port_flow_create(portid_t port_id,
1488 const struct rte_flow_attr *attr,
1489 const struct rte_flow_item *pattern,
1490 const struct rte_flow_action *actions)
1492 struct rte_flow *flow;
1493 struct rte_port *port;
1494 struct port_flow *pf;
1496 struct rte_flow_error error;
1498 port = &ports[port_id];
1499 if (port->flow_list) {
1500 if (port->flow_list->id == UINT32_MAX) {
1501 printf("Highest rule ID is already assigned, delete"
1505 id = port->flow_list->id + 1;
1507 pf = port_flow_new(attr, pattern, actions, &error);
1509 return port_flow_complain(&error);
1510 update_age_action_context(actions, pf);
1511 /* Poisoning to make sure PMDs update it in case of error. */
1512 memset(&error, 0x22, sizeof(error));
1513 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1516 return port_flow_complain(&error);
1518 pf->next = port->flow_list;
1521 port->flow_list = pf;
1522 printf("Flow rule #%u created\n", pf->id);
1526 /** Destroy a number of flow rules. */
1528 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1530 struct rte_port *port;
1531 struct port_flow **tmp;
1535 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1536 port_id == (portid_t)RTE_PORT_ALL)
1538 port = &ports[port_id];
1539 tmp = &port->flow_list;
1543 for (i = 0; i != n; ++i) {
1544 struct rte_flow_error error;
1545 struct port_flow *pf = *tmp;
1547 if (rule[i] != pf->id)
1550 * Poisoning to make sure PMDs update it in case
1553 memset(&error, 0x33, sizeof(error));
1554 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1555 ret = port_flow_complain(&error);
1558 printf("Flow rule #%u destroyed\n", pf->id);
1564 tmp = &(*tmp)->next;
1570 /** Remove all flow rules. */
1572 port_flow_flush(portid_t port_id)
1574 struct rte_flow_error error;
1575 struct rte_port *port;
1578 /* Poisoning to make sure PMDs update it in case of error. */
1579 memset(&error, 0x44, sizeof(error));
1580 if (rte_flow_flush(port_id, &error)) {
1581 ret = port_flow_complain(&error);
1582 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1583 port_id == (portid_t)RTE_PORT_ALL)
1586 port = &ports[port_id];
1587 while (port->flow_list) {
1588 struct port_flow *pf = port->flow_list->next;
1590 free(port->flow_list);
1591 port->flow_list = pf;
1596 /** Dump all flow rules. */
1598 port_flow_dump(portid_t port_id, const char *file_name)
1601 FILE *file = stdout;
1602 struct rte_flow_error error;
1604 if (file_name && strlen(file_name)) {
1605 file = fopen(file_name, "w");
1607 printf("Failed to create file %s: %s\n", file_name,
1612 ret = rte_flow_dev_dump(port_id, file, &error);
1614 port_flow_complain(&error);
1615 printf("Failed to dump flow: %s\n", strerror(-ret));
1617 printf("Flow dump finished\n");
1618 if (file_name && strlen(file_name))
1623 /** Query a flow rule. */
1625 port_flow_query(portid_t port_id, uint32_t rule,
1626 const struct rte_flow_action *action)
1628 struct rte_flow_error error;
1629 struct rte_port *port;
1630 struct port_flow *pf;
1633 struct rte_flow_query_count count;
1634 struct rte_flow_action_rss rss_conf;
1638 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1639 port_id == (portid_t)RTE_PORT_ALL)
1641 port = &ports[port_id];
1642 for (pf = port->flow_list; pf; pf = pf->next)
1646 printf("Flow rule #%u not found\n", rule);
1649 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1650 &name, sizeof(name),
1651 (void *)(uintptr_t)action->type, &error);
1653 return port_flow_complain(&error);
1654 switch (action->type) {
1655 case RTE_FLOW_ACTION_TYPE_COUNT:
1656 case RTE_FLOW_ACTION_TYPE_RSS:
1659 printf("Cannot query action type %d (%s)\n",
1660 action->type, name);
1663 /* Poisoning to make sure PMDs update it in case of error. */
1664 memset(&error, 0x55, sizeof(error));
1665 memset(&query, 0, sizeof(query));
1666 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1667 return port_flow_complain(&error);
1668 switch (action->type) {
1669 case RTE_FLOW_ACTION_TYPE_COUNT:
1673 " hits: %" PRIu64 "\n"
1674 " bytes: %" PRIu64 "\n",
1676 query.count.hits_set,
1677 query.count.bytes_set,
1681 case RTE_FLOW_ACTION_TYPE_RSS:
1682 rss_config_display(&query.rss_conf);
1685 printf("Cannot display result for action type %d (%s)\n",
1686 action->type, name);
1692 /** List simply and destroy all aged flows. */
1694 port_flow_aged(portid_t port_id, uint8_t destroy)
1697 int nb_context, total = 0, idx;
1698 struct rte_flow_error error;
1699 struct port_flow *pf;
1701 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1702 port_id == (portid_t)RTE_PORT_ALL)
1704 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
1705 printf("Port %u total aged flows: %d\n", port_id, total);
1707 port_flow_complain(&error);
1712 contexts = malloc(sizeof(void *) * total);
1713 if (contexts == NULL) {
1714 printf("Cannot allocate contexts for aged flow\n");
1717 printf("ID\tGroup\tPrio\tAttr\n");
1718 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
1719 if (nb_context != total) {
1720 printf("Port:%d get aged flows count(%d) != total(%d)\n",
1721 port_id, nb_context, total);
1725 for (idx = 0; idx < nb_context; idx++) {
1726 pf = (struct port_flow *)contexts[idx];
1728 printf("Error: get Null context in port %u\n", port_id);
1731 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t\n",
1733 pf->rule.attr->group,
1734 pf->rule.attr->priority,
1735 pf->rule.attr->ingress ? 'i' : '-',
1736 pf->rule.attr->egress ? 'e' : '-',
1737 pf->rule.attr->transfer ? 't' : '-');
1745 for (idx = 0; idx < nb_context; idx++) {
1746 pf = (struct port_flow *)contexts[idx];
1750 ret = port_flow_destroy(port_id, 1, &flow_id);
1754 printf("%d flows be destroyed\n", total);
1759 /** List flow rules. */
1761 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1763 struct rte_port *port;
1764 struct port_flow *pf;
1765 struct port_flow *list = NULL;
1768 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1769 port_id == (portid_t)RTE_PORT_ALL)
1771 port = &ports[port_id];
1772 if (!port->flow_list)
1774 /* Sort flows by group, priority and ID. */
1775 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1776 struct port_flow **tmp;
1777 const struct rte_flow_attr *curr = pf->rule.attr;
1780 /* Filter out unwanted groups. */
1781 for (i = 0; i != n; ++i)
1782 if (curr->group == group[i])
1787 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1788 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1790 if (curr->group > comp->group ||
1791 (curr->group == comp->group &&
1792 curr->priority > comp->priority) ||
1793 (curr->group == comp->group &&
1794 curr->priority == comp->priority &&
1795 pf->id > (*tmp)->id))
1802 printf("ID\tGroup\tPrio\tAttr\tRule\n");
1803 for (pf = list; pf != NULL; pf = pf->tmp) {
1804 const struct rte_flow_item *item = pf->rule.pattern;
1805 const struct rte_flow_action *action = pf->rule.actions;
1808 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1810 pf->rule.attr->group,
1811 pf->rule.attr->priority,
1812 pf->rule.attr->ingress ? 'i' : '-',
1813 pf->rule.attr->egress ? 'e' : '-',
1814 pf->rule.attr->transfer ? 't' : '-');
1815 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1816 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1817 &name, sizeof(name),
1818 (void *)(uintptr_t)item->type,
1821 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1822 printf("%s ", name);
1826 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1827 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1828 &name, sizeof(name),
1829 (void *)(uintptr_t)action->type,
1832 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1833 printf(" %s", name);
1840 /** Restrict ingress traffic to the defined flow rules. */
1842 port_flow_isolate(portid_t port_id, int set)
1844 struct rte_flow_error error;
1846 /* Poisoning to make sure PMDs update it in case of error. */
1847 memset(&error, 0x66, sizeof(error));
1848 if (rte_flow_isolate(port_id, set, &error))
1849 return port_flow_complain(&error);
1850 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1852 set ? "now restricted" : "not restricted anymore");
1857 * RX/TX ring descriptors display functions.
1860 rx_queue_id_is_invalid(queueid_t rxq_id)
1862 if (rxq_id < nb_rxq)
1864 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1869 tx_queue_id_is_invalid(queueid_t txq_id)
1871 if (txq_id < nb_txq)
1873 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1878 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1880 if (rxdesc_id < nb_rxd)
1882 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1888 tx_desc_id_is_invalid(uint16_t txdesc_id)
1890 if (txdesc_id < nb_txd)
1892 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1897 static const struct rte_memzone *
1898 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1900 char mz_name[RTE_MEMZONE_NAMESIZE];
1901 const struct rte_memzone *mz;
1903 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
1904 port_id, q_id, ring_name);
1905 mz = rte_memzone_lookup(mz_name);
1907 printf("%s ring memory zoneof (port %d, queue %d) not"
1908 "found (zone name = %s\n",
1909 ring_name, port_id, q_id, mz_name);
1913 union igb_ring_dword {
1916 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1926 struct igb_ring_desc_32_bytes {
1927 union igb_ring_dword lo_dword;
1928 union igb_ring_dword hi_dword;
1929 union igb_ring_dword resv1;
1930 union igb_ring_dword resv2;
1933 struct igb_ring_desc_16_bytes {
1934 union igb_ring_dword lo_dword;
1935 union igb_ring_dword hi_dword;
1939 ring_rxd_display_dword(union igb_ring_dword dword)
1941 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1942 (unsigned)dword.words.hi);
1946 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1947 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1950 __rte_unused portid_t port_id,
1954 struct igb_ring_desc_16_bytes *ring =
1955 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1956 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1958 struct rte_eth_dev_info dev_info;
1960 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1964 if (strstr(dev_info.driver_name, "i40e") != NULL) {
1965 /* 32 bytes RX descriptor, i40e only */
1966 struct igb_ring_desc_32_bytes *ring =
1967 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1968 ring[desc_id].lo_dword.dword =
1969 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1970 ring_rxd_display_dword(ring[desc_id].lo_dword);
1971 ring[desc_id].hi_dword.dword =
1972 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1973 ring_rxd_display_dword(ring[desc_id].hi_dword);
1974 ring[desc_id].resv1.dword =
1975 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1976 ring_rxd_display_dword(ring[desc_id].resv1);
1977 ring[desc_id].resv2.dword =
1978 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1979 ring_rxd_display_dword(ring[desc_id].resv2);
1984 /* 16 bytes RX descriptor */
1985 ring[desc_id].lo_dword.dword =
1986 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1987 ring_rxd_display_dword(ring[desc_id].lo_dword);
1988 ring[desc_id].hi_dword.dword =
1989 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1990 ring_rxd_display_dword(ring[desc_id].hi_dword);
1994 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1996 struct igb_ring_desc_16_bytes *ring;
1997 struct igb_ring_desc_16_bytes txd;
1999 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2000 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2001 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2002 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
2003 (unsigned)txd.lo_dword.words.lo,
2004 (unsigned)txd.lo_dword.words.hi,
2005 (unsigned)txd.hi_dword.words.lo,
2006 (unsigned)txd.hi_dword.words.hi);
2010 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
2012 const struct rte_memzone *rx_mz;
2014 if (port_id_is_invalid(port_id, ENABLED_WARN))
2016 if (rx_queue_id_is_invalid(rxq_id))
2018 if (rx_desc_id_is_invalid(rxd_id))
2020 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
2023 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
2027 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
2029 const struct rte_memzone *tx_mz;
2031 if (port_id_is_invalid(port_id, ENABLED_WARN))
2033 if (tx_queue_id_is_invalid(txq_id))
2035 if (tx_desc_id_is_invalid(txd_id))
2037 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
2040 ring_tx_descriptor_display(tx_mz, txd_id);
2044 fwd_lcores_config_display(void)
2048 printf("List of forwarding lcores:");
2049 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
2050 printf(" %2u", fwd_lcores_cpuids[lc_id]);
2054 rxtx_config_display(void)
2059 printf(" %s packet forwarding%s packets/burst=%d\n",
2060 cur_fwd_eng->fwd_mode_name,
2061 retry_enabled == 0 ? "" : " with retry",
2064 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
2065 printf(" packet len=%u - nb packet segments=%d\n",
2066 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
2068 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
2069 nb_fwd_lcores, nb_fwd_ports);
2071 RTE_ETH_FOREACH_DEV(pid) {
2072 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
2073 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
2074 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
2075 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
2076 uint16_t nb_rx_desc_tmp;
2077 uint16_t nb_tx_desc_tmp;
2078 struct rte_eth_rxq_info rx_qinfo;
2079 struct rte_eth_txq_info tx_qinfo;
2082 /* per port config */
2083 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
2084 (unsigned int)pid, nb_rxq, nb_txq);
2086 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
2087 ports[pid].dev_conf.rxmode.offloads,
2088 ports[pid].dev_conf.txmode.offloads);
2090 /* per rx queue config only for first queue to be less verbose */
2091 for (qid = 0; qid < 1; qid++) {
2092 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
2094 nb_rx_desc_tmp = nb_rx_desc[qid];
2096 nb_rx_desc_tmp = rx_qinfo.nb_desc;
2098 printf(" RX queue: %d\n", qid);
2099 printf(" RX desc=%d - RX free threshold=%d\n",
2100 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
2101 printf(" RX threshold registers: pthresh=%d hthresh=%d "
2103 rx_conf[qid].rx_thresh.pthresh,
2104 rx_conf[qid].rx_thresh.hthresh,
2105 rx_conf[qid].rx_thresh.wthresh);
2106 printf(" RX Offloads=0x%"PRIx64"\n",
2107 rx_conf[qid].offloads);
2110 /* per tx queue config only for first queue to be less verbose */
2111 for (qid = 0; qid < 1; qid++) {
2112 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
2114 nb_tx_desc_tmp = nb_tx_desc[qid];
2116 nb_tx_desc_tmp = tx_qinfo.nb_desc;
2118 printf(" TX queue: %d\n", qid);
2119 printf(" TX desc=%d - TX free threshold=%d\n",
2120 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
2121 printf(" TX threshold registers: pthresh=%d hthresh=%d "
2123 tx_conf[qid].tx_thresh.pthresh,
2124 tx_conf[qid].tx_thresh.hthresh,
2125 tx_conf[qid].tx_thresh.wthresh);
2126 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
2127 tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
2133 port_rss_reta_info(portid_t port_id,
2134 struct rte_eth_rss_reta_entry64 *reta_conf,
2135 uint16_t nb_entries)
2137 uint16_t i, idx, shift;
2140 if (port_id_is_invalid(port_id, ENABLED_WARN))
2143 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
2145 printf("Failed to get RSS RETA info, return code = %d\n", ret);
2149 for (i = 0; i < nb_entries; i++) {
2150 idx = i / RTE_RETA_GROUP_SIZE;
2151 shift = i % RTE_RETA_GROUP_SIZE;
2152 if (!(reta_conf[idx].mask & (1ULL << shift)))
2154 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
2155 i, reta_conf[idx].reta[shift]);
2160 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
2164 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
2166 struct rte_eth_rss_conf rss_conf = {0};
2167 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
2171 struct rte_eth_dev_info dev_info;
2172 uint8_t hash_key_size;
2175 if (port_id_is_invalid(port_id, ENABLED_WARN))
2178 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2182 if (dev_info.hash_key_size > 0 &&
2183 dev_info.hash_key_size <= sizeof(rss_key))
2184 hash_key_size = dev_info.hash_key_size;
2186 printf("dev_info did not provide a valid hash key size\n");
2190 /* Get RSS hash key if asked to display it */
2191 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
2192 rss_conf.rss_key_len = hash_key_size;
2193 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2197 printf("port index %d invalid\n", port_id);
2200 printf("operation not supported by device\n");
2203 printf("operation failed - diag=%d\n", diag);
2208 rss_hf = rss_conf.rss_hf;
2210 printf("RSS disabled\n");
2213 printf("RSS functions:\n ");
2214 for (i = 0; rss_type_table[i].str; i++) {
2215 if (rss_hf & rss_type_table[i].rss_type)
2216 printf("%s ", rss_type_table[i].str);
2221 printf("RSS key:\n");
2222 for (i = 0; i < hash_key_size; i++)
2223 printf("%02X", rss_key[i]);
2228 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
2231 struct rte_eth_rss_conf rss_conf;
2235 rss_conf.rss_key = NULL;
2236 rss_conf.rss_key_len = hash_key_len;
2237 rss_conf.rss_hf = 0;
2238 for (i = 0; rss_type_table[i].str; i++) {
2239 if (!strcmp(rss_type_table[i].str, rss_type))
2240 rss_conf.rss_hf = rss_type_table[i].rss_type;
2242 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2244 rss_conf.rss_key = hash_key;
2245 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
2252 printf("port index %d invalid\n", port_id);
2255 printf("operation not supported by device\n");
2258 printf("operation failed - diag=%d\n", diag);
2264 * Setup forwarding configuration for each logical core.
2267 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
2269 streamid_t nb_fs_per_lcore;
2277 nb_fs = cfg->nb_fwd_streams;
2278 nb_fc = cfg->nb_fwd_lcores;
2279 if (nb_fs <= nb_fc) {
2280 nb_fs_per_lcore = 1;
2283 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2284 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2287 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2289 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2290 fwd_lcores[lc_id]->stream_idx = sm_id;
2291 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2292 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2296 * Assign extra remaining streams, if any.
2298 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2299 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2300 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2301 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2302 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2307 fwd_topology_tx_port_get(portid_t rxp)
2309 static int warning_once = 1;
2311 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2313 switch (port_topology) {
2315 case PORT_TOPOLOGY_PAIRED:
2316 if ((rxp & 0x1) == 0) {
2317 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2320 printf("\nWarning! port-topology=paired"
2321 " and odd forward ports number,"
2322 " the last port will pair with"
2329 case PORT_TOPOLOGY_CHAINED:
2330 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2331 case PORT_TOPOLOGY_LOOP:
2337 simple_fwd_config_setup(void)
2341 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2342 cur_fwd_config.nb_fwd_streams =
2343 (streamid_t) cur_fwd_config.nb_fwd_ports;
2345 /* reinitialize forwarding streams */
2349 * In the simple forwarding test, the number of forwarding cores
2350 * must be lower or equal to the number of forwarding ports.
2352 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2353 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2354 cur_fwd_config.nb_fwd_lcores =
2355 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2356 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2358 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2359 fwd_streams[i]->rx_port = fwd_ports_ids[i];
2360 fwd_streams[i]->rx_queue = 0;
2361 fwd_streams[i]->tx_port =
2362 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2363 fwd_streams[i]->tx_queue = 0;
2364 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2365 fwd_streams[i]->retry_enabled = retry_enabled;
2370 * For the RSS forwarding test all streams distributed over lcores. Each stream
2371 * being composed of a RX queue to poll on a RX port for input messages,
2372 * associated with a TX queue of a TX port where to send forwarded packets.
2375 rss_fwd_config_setup(void)
2386 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2387 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2388 cur_fwd_config.nb_fwd_streams =
2389 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2391 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2392 cur_fwd_config.nb_fwd_lcores =
2393 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2395 /* reinitialize forwarding streams */
2398 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2400 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2401 struct fwd_stream *fs;
2403 fs = fwd_streams[sm_id];
2404 txp = fwd_topology_tx_port_get(rxp);
2405 fs->rx_port = fwd_ports_ids[rxp];
2407 fs->tx_port = fwd_ports_ids[txp];
2409 fs->peer_addr = fs->tx_port;
2410 fs->retry_enabled = retry_enabled;
2412 if (rxp < nb_fwd_ports)
2420 * For the DCB forwarding test, each core is assigned on each traffic class.
2422 * Each core is assigned a multi-stream, each stream being composed of
2423 * a RX queue to poll on a RX port for input messages, associated with
2424 * a TX queue of a TX port where to send forwarded packets. All RX and
2425 * TX queues are mapping to the same traffic class.
2426 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2430 dcb_fwd_config_setup(void)
2432 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2433 portid_t txp, rxp = 0;
2434 queueid_t txq, rxq = 0;
2436 uint16_t nb_rx_queue, nb_tx_queue;
2437 uint16_t i, j, k, sm_id = 0;
2440 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2441 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2442 cur_fwd_config.nb_fwd_streams =
2443 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2445 /* reinitialize forwarding streams */
2449 /* get the dcb info on the first RX and TX ports */
2450 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2451 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2453 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2454 fwd_lcores[lc_id]->stream_nb = 0;
2455 fwd_lcores[lc_id]->stream_idx = sm_id;
2456 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2457 /* if the nb_queue is zero, means this tc is
2458 * not enabled on the POOL
2460 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2462 k = fwd_lcores[lc_id]->stream_nb +
2463 fwd_lcores[lc_id]->stream_idx;
2464 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2465 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2466 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2467 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2468 for (j = 0; j < nb_rx_queue; j++) {
2469 struct fwd_stream *fs;
2471 fs = fwd_streams[k + j];
2472 fs->rx_port = fwd_ports_ids[rxp];
2473 fs->rx_queue = rxq + j;
2474 fs->tx_port = fwd_ports_ids[txp];
2475 fs->tx_queue = txq + j % nb_tx_queue;
2476 fs->peer_addr = fs->tx_port;
2477 fs->retry_enabled = retry_enabled;
2479 fwd_lcores[lc_id]->stream_nb +=
2480 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2482 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2485 if (tc < rxp_dcb_info.nb_tcs)
2487 /* Restart from TC 0 on next RX port */
2489 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2491 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2494 if (rxp >= nb_fwd_ports)
2496 /* get the dcb information on next RX and TX ports */
2497 if ((rxp & 0x1) == 0)
2498 txp = (portid_t) (rxp + 1);
2500 txp = (portid_t) (rxp - 1);
2501 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2502 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2507 icmp_echo_config_setup(void)
2514 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2515 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2516 (nb_txq * nb_fwd_ports);
2518 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2519 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2520 cur_fwd_config.nb_fwd_streams =
2521 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2522 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2523 cur_fwd_config.nb_fwd_lcores =
2524 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2525 if (verbose_level > 0) {
2526 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2528 cur_fwd_config.nb_fwd_lcores,
2529 cur_fwd_config.nb_fwd_ports,
2530 cur_fwd_config.nb_fwd_streams);
2533 /* reinitialize forwarding streams */
2535 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2537 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2538 if (verbose_level > 0)
2539 printf(" core=%d: \n", lc_id);
2540 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2541 struct fwd_stream *fs;
2542 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2543 fs->rx_port = fwd_ports_ids[rxp];
2545 fs->tx_port = fs->rx_port;
2547 fs->peer_addr = fs->tx_port;
2548 fs->retry_enabled = retry_enabled;
2549 if (verbose_level > 0)
2550 printf(" stream=%d port=%d rxq=%d txq=%d\n",
2551 sm_id, fs->rx_port, fs->rx_queue,
2553 rxq = (queueid_t) (rxq + 1);
2554 if (rxq == nb_rxq) {
2556 rxp = (portid_t) (rxp + 1);
2562 #if defined RTE_LIBRTE_PMD_SOFTNIC
2564 softnic_fwd_config_setup(void)
2566 struct rte_port *port;
2567 portid_t pid, softnic_portid;
2569 uint8_t softnic_enable = 0;
2571 RTE_ETH_FOREACH_DEV(pid) {
2573 const char *driver = port->dev_info.driver_name;
2575 if (strcmp(driver, "net_softnic") == 0) {
2576 softnic_portid = pid;
2582 if (softnic_enable == 0) {
2583 printf("Softnic mode not configured(%s)!\n", __func__);
2587 cur_fwd_config.nb_fwd_ports = 1;
2588 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
2590 /* Re-initialize forwarding streams */
2594 * In the softnic forwarding test, the number of forwarding cores
2595 * is set to one and remaining are used for softnic packet processing.
2597 cur_fwd_config.nb_fwd_lcores = 1;
2598 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2600 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
2601 fwd_streams[i]->rx_port = softnic_portid;
2602 fwd_streams[i]->rx_queue = i;
2603 fwd_streams[i]->tx_port = softnic_portid;
2604 fwd_streams[i]->tx_queue = i;
2605 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2606 fwd_streams[i]->retry_enabled = retry_enabled;
2612 fwd_config_setup(void)
2614 cur_fwd_config.fwd_eng = cur_fwd_eng;
2615 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2616 icmp_echo_config_setup();
2620 #if defined RTE_LIBRTE_PMD_SOFTNIC
2621 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
2622 softnic_fwd_config_setup();
2627 if ((nb_rxq > 1) && (nb_txq > 1)){
2629 dcb_fwd_config_setup();
2631 rss_fwd_config_setup();
2634 simple_fwd_config_setup();
2638 mp_alloc_to_str(uint8_t mode)
2641 case MP_ALLOC_NATIVE:
2647 case MP_ALLOC_XMEM_HUGE:
2657 pkt_fwd_config_display(struct fwd_config *cfg)
2659 struct fwd_stream *fs;
2663 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2664 "NUMA support %s, MP allocation mode: %s\n",
2665 cfg->fwd_eng->fwd_mode_name,
2666 retry_enabled == 0 ? "" : " with retry",
2667 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2668 numa_support == 1 ? "enabled" : "disabled",
2669 mp_alloc_to_str(mp_alloc_type));
2672 printf("TX retry num: %u, delay between TX retries: %uus\n",
2673 burst_tx_retry_num, burst_tx_delay_time);
2674 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2675 printf("Logical Core %u (socket %u) forwards packets on "
2677 fwd_lcores_cpuids[lc_id],
2678 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2679 fwd_lcores[lc_id]->stream_nb);
2680 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2681 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2682 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
2683 "P=%d/Q=%d (socket %u) ",
2684 fs->rx_port, fs->rx_queue,
2685 ports[fs->rx_port].socket_id,
2686 fs->tx_port, fs->tx_queue,
2687 ports[fs->tx_port].socket_id);
2688 print_ethaddr("peer=",
2689 &peer_eth_addrs[fs->peer_addr]);
2697 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2699 struct rte_ether_addr new_peer_addr;
2700 if (!rte_eth_dev_is_valid_port(port_id)) {
2701 printf("Error: Invalid port number %i\n", port_id);
2704 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
2705 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2708 peer_eth_addrs[port_id] = new_peer_addr;
2712 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2715 unsigned int lcore_cpuid;
2720 for (i = 0; i < nb_lc; i++) {
2721 lcore_cpuid = lcorelist[i];
2722 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2723 printf("lcore %u not enabled\n", lcore_cpuid);
2726 if (lcore_cpuid == rte_get_master_lcore()) {
2727 printf("lcore %u cannot be masked on for running "
2728 "packet forwarding, which is the master lcore "
2729 "and reserved for command line parsing only\n",
2734 fwd_lcores_cpuids[i] = lcore_cpuid;
2736 if (record_now == 0) {
2740 nb_cfg_lcores = (lcoreid_t) nb_lc;
2741 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2742 printf("previous number of forwarding cores %u - changed to "
2743 "number of configured cores %u\n",
2744 (unsigned int) nb_fwd_lcores, nb_lc);
2745 nb_fwd_lcores = (lcoreid_t) nb_lc;
2752 set_fwd_lcores_mask(uint64_t lcoremask)
2754 unsigned int lcorelist[64];
2758 if (lcoremask == 0) {
2759 printf("Invalid NULL mask of cores\n");
2763 for (i = 0; i < 64; i++) {
2764 if (! ((uint64_t)(1ULL << i) & lcoremask))
2766 lcorelist[nb_lc++] = i;
2768 return set_fwd_lcores_list(lcorelist, nb_lc);
2772 set_fwd_lcores_number(uint16_t nb_lc)
2774 if (nb_lc > nb_cfg_lcores) {
2775 printf("nb fwd cores %u > %u (max. number of configured "
2776 "lcores) - ignored\n",
2777 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2780 nb_fwd_lcores = (lcoreid_t) nb_lc;
2781 printf("Number of forwarding cores set to %u\n",
2782 (unsigned int) nb_fwd_lcores);
2786 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2794 for (i = 0; i < nb_pt; i++) {
2795 port_id = (portid_t) portlist[i];
2796 if (port_id_is_invalid(port_id, ENABLED_WARN))
2799 fwd_ports_ids[i] = port_id;
2801 if (record_now == 0) {
2805 nb_cfg_ports = (portid_t) nb_pt;
2806 if (nb_fwd_ports != (portid_t) nb_pt) {
2807 printf("previous number of forwarding ports %u - changed to "
2808 "number of configured ports %u\n",
2809 (unsigned int) nb_fwd_ports, nb_pt);
2810 nb_fwd_ports = (portid_t) nb_pt;
2815 * Parse the user input and obtain the list of forwarding ports
2818 * String containing the user input. User can specify
2819 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
2820 * For example, if the user wants to use all the available
2821 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3.
2822 * If the user wants to use only the ports 1,2 then the input
2824 * valid characters are '-' and ','
2825 * @param[out] values
2826 * This array will be filled with a list of port IDs
2827 * based on the user input
2828 * Note that duplicate entries are discarded and only the first
2829 * count entries in this array are port IDs and all the rest
2830 * will contain default values
2831 * @param[in] maxsize
2832 * This parameter denotes 2 things
2833 * 1) Number of elements in the values array
2834 * 2) Maximum value of each element in the values array
2836 * On success, returns total count of parsed port IDs
2837 * On failure, returns 0
2840 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
2842 unsigned int count = 0;
2846 unsigned int marked[maxsize];
2848 if (list == NULL || values == NULL)
2851 for (i = 0; i < (int)maxsize; i++)
2857 /*Remove the blank spaces if any*/
2858 while (isblank(*list))
2863 value = strtol(list, &end, 10);
2864 if (errno || end == NULL)
2866 if (value < 0 || value >= (int)maxsize)
2868 while (isblank(*end))
2870 if (*end == '-' && min == INT_MAX) {
2872 } else if ((*end == ',') || (*end == '\0')) {
2876 for (i = min; i <= max; i++) {
2877 if (count < maxsize) {
2889 } while (*end != '\0');
2895 parse_fwd_portlist(const char *portlist)
2897 unsigned int portcount;
2898 unsigned int portindex[RTE_MAX_ETHPORTS];
2899 unsigned int i, valid_port_count = 0;
2901 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
2903 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
2906 * Here we verify the validity of the ports
2907 * and thereby calculate the total number of
2910 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
2911 if (rte_eth_dev_is_valid_port(portindex[i])) {
2912 portindex[valid_port_count] = portindex[i];
2917 set_fwd_ports_list(portindex, valid_port_count);
2921 set_fwd_ports_mask(uint64_t portmask)
2923 unsigned int portlist[64];
2927 if (portmask == 0) {
2928 printf("Invalid NULL mask of ports\n");
2932 RTE_ETH_FOREACH_DEV(i) {
2933 if (! ((uint64_t)(1ULL << i) & portmask))
2935 portlist[nb_pt++] = i;
2937 set_fwd_ports_list(portlist, nb_pt);
2941 set_fwd_ports_number(uint16_t nb_pt)
2943 if (nb_pt > nb_cfg_ports) {
2944 printf("nb fwd ports %u > %u (number of configured "
2945 "ports) - ignored\n",
2946 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2949 nb_fwd_ports = (portid_t) nb_pt;
2950 printf("Number of forwarding ports set to %u\n",
2951 (unsigned int) nb_fwd_ports);
2955 port_is_forwarding(portid_t port_id)
2959 if (port_id_is_invalid(port_id, ENABLED_WARN))
2962 for (i = 0; i < nb_fwd_ports; i++) {
2963 if (fwd_ports_ids[i] == port_id)
2971 set_nb_pkt_per_burst(uint16_t nb)
2973 if (nb > MAX_PKT_BURST) {
2974 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2976 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2979 nb_pkt_per_burst = nb;
2980 printf("Number of packets per burst set to %u\n",
2981 (unsigned int) nb_pkt_per_burst);
2985 tx_split_get_name(enum tx_pkt_split split)
2989 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2990 if (tx_split_name[i].split == split)
2991 return tx_split_name[i].name;
2997 set_tx_pkt_split(const char *name)
3001 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3002 if (strcmp(tx_split_name[i].name, name) == 0) {
3003 tx_pkt_split = tx_split_name[i].split;
3007 printf("unknown value: \"%s\"\n", name);
3011 show_tx_pkt_segments(void)
3017 split = tx_split_get_name(tx_pkt_split);
3019 printf("Number of segments: %u\n", n);
3020 printf("Segment sizes: ");
3021 for (i = 0; i != n - 1; i++)
3022 printf("%hu,", tx_pkt_seg_lengths[i]);
3023 printf("%hu\n", tx_pkt_seg_lengths[i]);
3024 printf("Split packet: %s\n", split);
3028 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
3030 uint16_t tx_pkt_len;
3033 if (nb_segs >= (unsigned) nb_txd) {
3034 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
3035 nb_segs, (unsigned int) nb_txd);
3040 * Check that each segment length is greater or equal than
3041 * the mbuf data sise.
3042 * Check also that the total packet length is greater or equal than the
3043 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
3047 for (i = 0; i < nb_segs; i++) {
3048 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
3049 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
3050 i, seg_lengths[i], (unsigned) mbuf_data_size);
3053 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
3055 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
3056 printf("total packet length=%u < %d - give up\n",
3057 (unsigned) tx_pkt_len,
3058 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
3062 for (i = 0; i < nb_segs; i++)
3063 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
3065 tx_pkt_length = tx_pkt_len;
3066 tx_pkt_nb_segs = (uint8_t) nb_segs;
3070 setup_gro(const char *onoff, portid_t port_id)
3072 if (!rte_eth_dev_is_valid_port(port_id)) {
3073 printf("invalid port id %u\n", port_id);
3076 if (test_done == 0) {
3077 printf("Before enable/disable GRO,"
3078 " please stop forwarding first\n");
3081 if (strcmp(onoff, "on") == 0) {
3082 if (gro_ports[port_id].enable != 0) {
3083 printf("Port %u has enabled GRO. Please"
3084 " disable GRO first\n", port_id);
3087 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3088 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
3089 gro_ports[port_id].param.max_flow_num =
3090 GRO_DEFAULT_FLOW_NUM;
3091 gro_ports[port_id].param.max_item_per_flow =
3092 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
3094 gro_ports[port_id].enable = 1;
3096 if (gro_ports[port_id].enable == 0) {
3097 printf("Port %u has disabled GRO\n", port_id);
3100 gro_ports[port_id].enable = 0;
3105 setup_gro_flush_cycles(uint8_t cycles)
3107 if (test_done == 0) {
3108 printf("Before change flush interval for GRO,"
3109 " please stop forwarding first.\n");
3113 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
3114 GRO_DEFAULT_FLUSH_CYCLES) {
3115 printf("The flushing cycle be in the range"
3116 " of 1 to %u. Revert to the default"
3118 GRO_MAX_FLUSH_CYCLES,
3119 GRO_DEFAULT_FLUSH_CYCLES);
3120 cycles = GRO_DEFAULT_FLUSH_CYCLES;
3123 gro_flush_cycles = cycles;
3127 show_gro(portid_t port_id)
3129 struct rte_gro_param *param;
3130 uint32_t max_pkts_num;
3132 param = &gro_ports[port_id].param;
3134 if (!rte_eth_dev_is_valid_port(port_id)) {
3135 printf("Invalid port id %u.\n", port_id);
3138 if (gro_ports[port_id].enable) {
3139 printf("GRO type: TCP/IPv4\n");
3140 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3141 max_pkts_num = param->max_flow_num *
3142 param->max_item_per_flow;
3144 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
3145 printf("Max number of packets to perform GRO: %u\n",
3147 printf("Flushing cycles: %u\n", gro_flush_cycles);
3149 printf("Port %u doesn't enable GRO.\n", port_id);
3153 setup_gso(const char *mode, portid_t port_id)
3155 if (!rte_eth_dev_is_valid_port(port_id)) {
3156 printf("invalid port id %u\n", port_id);
3159 if (strcmp(mode, "on") == 0) {
3160 if (test_done == 0) {
3161 printf("before enabling GSO,"
3162 " please stop forwarding first\n");
3165 gso_ports[port_id].enable = 1;
3166 } else if (strcmp(mode, "off") == 0) {
3167 if (test_done == 0) {
3168 printf("before disabling GSO,"
3169 " please stop forwarding first\n");
3172 gso_ports[port_id].enable = 0;
3177 list_pkt_forwarding_modes(void)
3179 static char fwd_modes[128] = "";
3180 const char *separator = "|";
3181 struct fwd_engine *fwd_eng;
3184 if (strlen (fwd_modes) == 0) {
3185 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3186 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3187 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3188 strncat(fwd_modes, separator,
3189 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3191 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3198 list_pkt_forwarding_retry_modes(void)
3200 static char fwd_modes[128] = "";
3201 const char *separator = "|";
3202 struct fwd_engine *fwd_eng;
3205 if (strlen(fwd_modes) == 0) {
3206 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3207 if (fwd_eng == &rx_only_engine)
3209 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3211 strlen(fwd_modes) - 1);
3212 strncat(fwd_modes, separator,
3214 strlen(fwd_modes) - 1);
3216 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3223 set_pkt_forwarding_mode(const char *fwd_mode_name)
3225 struct fwd_engine *fwd_eng;
3229 while ((fwd_eng = fwd_engines[i]) != NULL) {
3230 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
3231 printf("Set %s packet forwarding mode%s\n",
3233 retry_enabled == 0 ? "" : " with retry");
3234 cur_fwd_eng = fwd_eng;
3239 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
3243 add_rx_dump_callbacks(portid_t portid)
3245 struct rte_eth_dev_info dev_info;
3249 if (port_id_is_invalid(portid, ENABLED_WARN))
3252 ret = eth_dev_info_get_print_err(portid, &dev_info);
3256 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3257 if (!ports[portid].rx_dump_cb[queue])
3258 ports[portid].rx_dump_cb[queue] =
3259 rte_eth_add_rx_callback(portid, queue,
3260 dump_rx_pkts, NULL);
3264 add_tx_dump_callbacks(portid_t portid)
3266 struct rte_eth_dev_info dev_info;
3270 if (port_id_is_invalid(portid, ENABLED_WARN))
3273 ret = eth_dev_info_get_print_err(portid, &dev_info);
3277 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3278 if (!ports[portid].tx_dump_cb[queue])
3279 ports[portid].tx_dump_cb[queue] =
3280 rte_eth_add_tx_callback(portid, queue,
3281 dump_tx_pkts, NULL);
3285 remove_rx_dump_callbacks(portid_t portid)
3287 struct rte_eth_dev_info dev_info;
3291 if (port_id_is_invalid(portid, ENABLED_WARN))
3294 ret = eth_dev_info_get_print_err(portid, &dev_info);
3298 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3299 if (ports[portid].rx_dump_cb[queue]) {
3300 rte_eth_remove_rx_callback(portid, queue,
3301 ports[portid].rx_dump_cb[queue]);
3302 ports[portid].rx_dump_cb[queue] = NULL;
3307 remove_tx_dump_callbacks(portid_t portid)
3309 struct rte_eth_dev_info dev_info;
3313 if (port_id_is_invalid(portid, ENABLED_WARN))
3316 ret = eth_dev_info_get_print_err(portid, &dev_info);
3320 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3321 if (ports[portid].tx_dump_cb[queue]) {
3322 rte_eth_remove_tx_callback(portid, queue,
3323 ports[portid].tx_dump_cb[queue]);
3324 ports[portid].tx_dump_cb[queue] = NULL;
3329 configure_rxtx_dump_callbacks(uint16_t verbose)
3333 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3334 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
3338 RTE_ETH_FOREACH_DEV(portid)
3340 if (verbose == 1 || verbose > 2)
3341 add_rx_dump_callbacks(portid);
3343 remove_rx_dump_callbacks(portid);
3345 add_tx_dump_callbacks(portid);
3347 remove_tx_dump_callbacks(portid);
3352 set_verbose_level(uint16_t vb_level)
3354 printf("Change verbose level from %u to %u\n",
3355 (unsigned int) verbose_level, (unsigned int) vb_level);
3356 verbose_level = vb_level;
3357 configure_rxtx_dump_callbacks(verbose_level);
3361 vlan_extend_set(portid_t port_id, int on)
3365 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3367 if (port_id_is_invalid(port_id, ENABLED_WARN))
3370 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3373 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
3374 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3376 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
3377 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3380 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3382 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
3383 "diag=%d\n", port_id, on, diag);
3384 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3388 rx_vlan_strip_set(portid_t port_id, int on)
3392 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3394 if (port_id_is_invalid(port_id, ENABLED_WARN))
3397 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3400 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
3401 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3403 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
3404 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3407 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3409 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
3410 "diag=%d\n", port_id, on, diag);
3411 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3415 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
3419 if (port_id_is_invalid(port_id, ENABLED_WARN))
3422 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
3424 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
3425 "diag=%d\n", port_id, queue_id, on, diag);
3429 rx_vlan_filter_set(portid_t port_id, int on)
3433 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3435 if (port_id_is_invalid(port_id, ENABLED_WARN))
3438 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3441 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
3442 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3444 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
3445 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3448 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3450 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
3451 "diag=%d\n", port_id, on, diag);
3452 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3456 rx_vlan_qinq_strip_set(portid_t port_id, int on)
3460 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3462 if (port_id_is_invalid(port_id, ENABLED_WARN))
3465 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3468 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
3469 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3471 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
3472 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3475 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3477 printf("%s(port_pi=%d, on=%d) failed "
3478 "diag=%d\n", __func__, port_id, on, diag);
3479 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3483 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
3487 if (port_id_is_invalid(port_id, ENABLED_WARN))
3489 if (vlan_id_is_invalid(vlan_id))
3491 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
3494 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
3496 port_id, vlan_id, on, diag);
3501 rx_vlan_all_filter_set(portid_t port_id, int on)
3505 if (port_id_is_invalid(port_id, ENABLED_WARN))
3507 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
3508 if (rx_vft_set(port_id, vlan_id, on))
3514 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
3518 if (port_id_is_invalid(port_id, ENABLED_WARN))
3521 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
3525 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
3527 port_id, vlan_type, tp_id, diag);
3531 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
3533 struct rte_eth_dev_info dev_info;
3536 if (port_id_is_invalid(port_id, ENABLED_WARN))
3538 if (vlan_id_is_invalid(vlan_id))
3541 if (ports[port_id].dev_conf.txmode.offloads &
3542 DEV_TX_OFFLOAD_QINQ_INSERT) {
3543 printf("Error, as QinQ has been enabled.\n");
3547 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3551 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
3552 printf("Error: vlan insert is not supported by port %d\n",
3557 tx_vlan_reset(port_id);
3558 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
3559 ports[port_id].tx_vlan_id = vlan_id;
3563 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
3565 struct rte_eth_dev_info dev_info;
3568 if (port_id_is_invalid(port_id, ENABLED_WARN))
3570 if (vlan_id_is_invalid(vlan_id))
3572 if (vlan_id_is_invalid(vlan_id_outer))
3575 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3579 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
3580 printf("Error: qinq insert not supported by port %d\n",
3585 tx_vlan_reset(port_id);
3586 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
3587 DEV_TX_OFFLOAD_QINQ_INSERT);
3588 ports[port_id].tx_vlan_id = vlan_id;
3589 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
3593 tx_vlan_reset(portid_t port_id)
3595 if (port_id_is_invalid(port_id, ENABLED_WARN))
3597 ports[port_id].dev_conf.txmode.offloads &=
3598 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
3599 DEV_TX_OFFLOAD_QINQ_INSERT);
3600 ports[port_id].tx_vlan_id = 0;
3601 ports[port_id].tx_vlan_id_outer = 0;
3605 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
3607 if (port_id_is_invalid(port_id, ENABLED_WARN))
3610 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
3614 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
3617 uint8_t existing_mapping_found = 0;
3619 if (port_id_is_invalid(port_id, ENABLED_WARN))
3622 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
3625 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
3626 printf("map_value not in required range 0..%d\n",
3627 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
3631 if (!is_rx) { /*then tx*/
3632 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3633 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3634 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
3635 tx_queue_stats_mappings[i].stats_counter_id = map_value;
3636 existing_mapping_found = 1;
3640 if (!existing_mapping_found) { /* A new additional mapping... */
3641 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
3642 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
3643 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
3644 nb_tx_queue_stats_mappings++;
3648 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3649 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3650 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
3651 rx_queue_stats_mappings[i].stats_counter_id = map_value;
3652 existing_mapping_found = 1;
3656 if (!existing_mapping_found) { /* A new additional mapping... */
3657 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
3658 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
3659 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
3660 nb_rx_queue_stats_mappings++;
3666 set_xstats_hide_zero(uint8_t on_off)
3668 xstats_hide_zero = on_off;
3672 print_fdir_mask(struct rte_eth_fdir_masks *mask)
3674 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
3676 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3677 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
3678 " tunnel_id: 0x%08x",
3679 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
3680 rte_be_to_cpu_32(mask->tunnel_id_mask));
3681 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3682 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
3683 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
3684 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
3686 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
3687 rte_be_to_cpu_16(mask->src_port_mask),
3688 rte_be_to_cpu_16(mask->dst_port_mask));
3690 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3691 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
3692 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
3693 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
3694 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
3696 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3697 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
3698 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
3699 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
3700 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
3707 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3709 struct rte_eth_flex_payload_cfg *cfg;
3712 for (i = 0; i < flex_conf->nb_payloads; i++) {
3713 cfg = &flex_conf->flex_set[i];
3714 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3716 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3717 printf("\n L2_PAYLOAD: ");
3718 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3719 printf("\n L3_PAYLOAD: ");
3720 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3721 printf("\n L4_PAYLOAD: ");
3723 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
3724 for (j = 0; j < num; j++)
3725 printf(" %-5u", cfg->src_offset[j]);
3731 flowtype_to_str(uint16_t flow_type)
3733 struct flow_type_info {
3739 static struct flow_type_info flowtype_str_table[] = {
3740 {"raw", RTE_ETH_FLOW_RAW},
3741 {"ipv4", RTE_ETH_FLOW_IPV4},
3742 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3743 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3744 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3745 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3746 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3747 {"ipv6", RTE_ETH_FLOW_IPV6},
3748 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3749 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3750 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3751 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3752 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3753 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3754 {"port", RTE_ETH_FLOW_PORT},
3755 {"vxlan", RTE_ETH_FLOW_VXLAN},
3756 {"geneve", RTE_ETH_FLOW_GENEVE},
3757 {"nvgre", RTE_ETH_FLOW_NVGRE},
3758 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
3761 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3762 if (flowtype_str_table[i].ftype == flow_type)
3763 return flowtype_str_table[i].str;
3770 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3772 struct rte_eth_fdir_flex_mask *mask;
3776 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3777 mask = &flex_conf->flex_mask[i];
3778 p = flowtype_to_str(mask->flow_type);
3779 printf("\n %s:\t", p ? p : "unknown");
3780 for (j = 0; j < num; j++)
3781 printf(" %02x", mask->mask[j]);
3787 print_fdir_flow_type(uint32_t flow_types_mask)
3792 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3793 if (!(flow_types_mask & (1 << i)))
3795 p = flowtype_to_str(i);
3805 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
3806 struct rte_eth_fdir_stats *fdir_stat)
3810 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3812 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3813 RTE_ETH_FILTER_INFO, fdir_info);
3814 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3815 RTE_ETH_FILTER_STATS, fdir_stat);
3819 #ifdef RTE_LIBRTE_I40E_PMD
3820 if (ret == -ENOTSUP) {
3821 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
3823 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
3826 #ifdef RTE_LIBRTE_IXGBE_PMD
3827 if (ret == -ENOTSUP) {
3828 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
3830 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
3837 printf("\n FDIR is not supported on port %-2d\n",
3841 printf("programming error: (%s)\n", strerror(-ret));
3848 fdir_get_infos(portid_t port_id)
3850 struct rte_eth_fdir_stats fdir_stat;
3851 struct rte_eth_fdir_info fdir_info;
3853 static const char *fdir_stats_border = "########################";
3855 if (port_id_is_invalid(port_id, ENABLED_WARN))
3858 memset(&fdir_info, 0, sizeof(fdir_info));
3859 memset(&fdir_stat, 0, sizeof(fdir_stat));
3860 if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
3863 printf("\n %s FDIR infos for port %-2d %s\n",
3864 fdir_stats_border, port_id, fdir_stats_border);
3866 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3867 printf(" PERFECT\n");
3868 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3869 printf(" PERFECT-MAC-VLAN\n");
3870 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3871 printf(" PERFECT-TUNNEL\n");
3872 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3873 printf(" SIGNATURE\n");
3875 printf(" DISABLE\n");
3876 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3877 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3878 printf(" SUPPORTED FLOW TYPE: ");
3879 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3881 printf(" FLEX PAYLOAD INFO:\n");
3882 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
3883 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
3884 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
3885 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3886 fdir_info.flex_payload_unit,
3887 fdir_info.max_flex_payload_segment_num,
3888 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3890 print_fdir_mask(&fdir_info.mask);
3891 if (fdir_info.flex_conf.nb_payloads > 0) {
3892 printf(" FLEX PAYLOAD SRC OFFSET:");
3893 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3895 if (fdir_info.flex_conf.nb_flexmasks > 0) {
3896 printf(" FLEX MASK CFG:");
3897 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3899 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
3900 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3901 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
3902 fdir_info.guarant_spc, fdir_info.best_spc);
3903 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
3904 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
3905 " add: %-10"PRIu64" remove: %"PRIu64"\n"
3906 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
3907 fdir_stat.collision, fdir_stat.free,
3908 fdir_stat.maxhash, fdir_stat.maxlen,
3909 fdir_stat.add, fdir_stat.remove,
3910 fdir_stat.f_add, fdir_stat.f_remove);
3911 printf(" %s############################%s\n",
3912 fdir_stats_border, fdir_stats_border);
3916 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3918 struct rte_port *port;
3919 struct rte_eth_fdir_flex_conf *flex_conf;
3922 port = &ports[port_id];
3923 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3924 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3925 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3930 if (i >= RTE_ETH_FLOW_MAX) {
3931 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3932 idx = flex_conf->nb_flexmasks;
3933 flex_conf->nb_flexmasks++;
3935 printf("The flex mask table is full. Can not set flex"
3936 " mask for flow_type(%u).", cfg->flow_type);
3940 rte_memcpy(&flex_conf->flex_mask[idx],
3942 sizeof(struct rte_eth_fdir_flex_mask));
3946 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3948 struct rte_port *port;
3949 struct rte_eth_fdir_flex_conf *flex_conf;
3952 port = &ports[port_id];
3953 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3954 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3955 if (cfg->type == flex_conf->flex_set[i].type) {
3960 if (i >= RTE_ETH_PAYLOAD_MAX) {
3961 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3962 idx = flex_conf->nb_payloads;
3963 flex_conf->nb_payloads++;
3965 printf("The flex payload table is full. Can not set"
3966 " flex payload for type(%u).", cfg->type);
3970 rte_memcpy(&flex_conf->flex_set[idx],
3972 sizeof(struct rte_eth_flex_payload_cfg));
3977 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3979 #ifdef RTE_LIBRTE_IXGBE_PMD
3983 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3985 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3989 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3990 is_rx ? "rx" : "tx", port_id, diag);
3993 printf("VF %s setting not supported for port %d\n",
3994 is_rx ? "Rx" : "Tx", port_id);
4000 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
4003 struct rte_eth_link link;
4006 if (port_id_is_invalid(port_id, ENABLED_WARN))
4008 ret = eth_link_get_nowait_print_err(port_id, &link);
4011 if (rate > link.link_speed) {
4012 printf("Invalid rate value:%u bigger than link speed: %u\n",
4013 rate, link.link_speed);
4016 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
4019 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
4025 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
4027 int diag = -ENOTSUP;
4031 RTE_SET_USED(q_msk);
4033 #ifdef RTE_LIBRTE_IXGBE_PMD
4034 if (diag == -ENOTSUP)
4035 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
4038 #ifdef RTE_LIBRTE_BNXT_PMD
4039 if (diag == -ENOTSUP)
4040 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
4045 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
4051 * Functions to manage the set of filtered Multicast MAC addresses.
4053 * A pool of filtered multicast MAC addresses is associated with each port.
4054 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
4055 * The address of the pool and the number of valid multicast MAC addresses
4056 * recorded in the pool are stored in the fields "mc_addr_pool" and
4057 * "mc_addr_nb" of the "rte_port" data structure.
4059 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
4060 * to be supplied a contiguous array of multicast MAC addresses.
4061 * To comply with this constraint, the set of multicast addresses recorded
4062 * into the pool are systematically compacted at the beginning of the pool.
4063 * Hence, when a multicast address is removed from the pool, all following
4064 * addresses, if any, are copied back to keep the set contiguous.
4066 #define MCAST_POOL_INC 32
4069 mcast_addr_pool_extend(struct rte_port *port)
4071 struct rte_ether_addr *mc_pool;
4072 size_t mc_pool_size;
4075 * If a free entry is available at the end of the pool, just
4076 * increment the number of recorded multicast addresses.
4078 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
4084 * [re]allocate a pool with MCAST_POOL_INC more entries.
4085 * The previous test guarantees that port->mc_addr_nb is a multiple
4086 * of MCAST_POOL_INC.
4088 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
4090 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
4092 if (mc_pool == NULL) {
4093 printf("allocation of pool of %u multicast addresses failed\n",
4094 port->mc_addr_nb + MCAST_POOL_INC);
4098 port->mc_addr_pool = mc_pool;
4105 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
4107 if (mcast_addr_pool_extend(port) != 0)
4109 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
4113 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
4116 if (addr_idx == port->mc_addr_nb) {
4117 /* No need to recompact the set of multicast addressses. */
4118 if (port->mc_addr_nb == 0) {
4119 /* free the pool of multicast addresses. */
4120 free(port->mc_addr_pool);
4121 port->mc_addr_pool = NULL;
4125 memmove(&port->mc_addr_pool[addr_idx],
4126 &port->mc_addr_pool[addr_idx + 1],
4127 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
4131 eth_port_multicast_addr_list_set(portid_t port_id)
4133 struct rte_port *port;
4136 port = &ports[port_id];
4137 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
4140 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
4141 port_id, port->mc_addr_nb, diag);
4147 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
4149 struct rte_port *port;
4152 if (port_id_is_invalid(port_id, ENABLED_WARN))
4155 port = &ports[port_id];
4158 * Check that the added multicast MAC address is not already recorded
4159 * in the pool of multicast addresses.
4161 for (i = 0; i < port->mc_addr_nb; i++) {
4162 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
4163 printf("multicast address already filtered by port\n");
4168 mcast_addr_pool_append(port, mc_addr);
4169 if (eth_port_multicast_addr_list_set(port_id) < 0)
4170 /* Rollback on failure, remove the address from the pool */
4171 mcast_addr_pool_remove(port, i);
4175 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
4177 struct rte_port *port;
4180 if (port_id_is_invalid(port_id, ENABLED_WARN))
4183 port = &ports[port_id];
4186 * Search the pool of multicast MAC addresses for the removed address.
4188 for (i = 0; i < port->mc_addr_nb; i++) {
4189 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
4192 if (i == port->mc_addr_nb) {
4193 printf("multicast address not filtered by port %d\n", port_id);
4197 mcast_addr_pool_remove(port, i);
4198 if (eth_port_multicast_addr_list_set(port_id) < 0)
4199 /* Rollback on failure, add the address back into the pool */
4200 mcast_addr_pool_append(port, mc_addr);
4204 port_dcb_info_display(portid_t port_id)
4206 struct rte_eth_dcb_info dcb_info;
4209 static const char *border = "================";
4211 if (port_id_is_invalid(port_id, ENABLED_WARN))
4214 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
4216 printf("\n Failed to get dcb infos on port %-2d\n",
4220 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
4221 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
4223 for (i = 0; i < dcb_info.nb_tcs; i++)
4225 printf("\n Priority : ");
4226 for (i = 0; i < dcb_info.nb_tcs; i++)
4227 printf("\t%4d", dcb_info.prio_tc[i]);
4228 printf("\n BW percent :");
4229 for (i = 0; i < dcb_info.nb_tcs; i++)
4230 printf("\t%4d%%", dcb_info.tc_bws[i]);
4231 printf("\n RXQ base : ");
4232 for (i = 0; i < dcb_info.nb_tcs; i++)
4233 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
4234 printf("\n RXQ number :");
4235 for (i = 0; i < dcb_info.nb_tcs; i++)
4236 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
4237 printf("\n TXQ base : ");
4238 for (i = 0; i < dcb_info.nb_tcs; i++)
4239 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
4240 printf("\n TXQ number :");
4241 for (i = 0; i < dcb_info.nb_tcs; i++)
4242 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
4247 open_file(const char *file_path, uint32_t *size)
4249 int fd = open(file_path, O_RDONLY);
4251 uint8_t *buf = NULL;
4259 printf("%s: Failed to open %s\n", __func__, file_path);
4263 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
4265 printf("%s: File operations failed\n", __func__);
4269 pkg_size = st_buf.st_size;
4272 printf("%s: File operations failed\n", __func__);
4276 buf = (uint8_t *)malloc(pkg_size);
4279 printf("%s: Failed to malloc memory\n", __func__);
4283 ret = read(fd, buf, pkg_size);
4286 printf("%s: File read operation failed\n", __func__);
4300 save_file(const char *file_path, uint8_t *buf, uint32_t size)
4302 FILE *fh = fopen(file_path, "wb");
4305 printf("%s: Failed to open %s\n", __func__, file_path);
4309 if (fwrite(buf, 1, size, fh) != size) {
4311 printf("%s: File write operation failed\n", __func__);
4321 close_file(uint8_t *buf)
4332 port_queue_region_info_display(portid_t port_id, void *buf)
4334 #ifdef RTE_LIBRTE_I40E_PMD
4336 struct rte_pmd_i40e_queue_regions *info =
4337 (struct rte_pmd_i40e_queue_regions *)buf;
4338 static const char *queue_region_info_stats_border = "-------";
4340 if (!info->queue_region_number)
4341 printf("there is no region has been set before");
4343 printf("\n %s All queue region info for port=%2d %s",
4344 queue_region_info_stats_border, port_id,
4345 queue_region_info_stats_border);
4346 printf("\n queue_region_number: %-14u \n",
4347 info->queue_region_number);
4349 for (i = 0; i < info->queue_region_number; i++) {
4350 printf("\n region_id: %-14u queue_number: %-14u "
4351 "queue_start_index: %-14u \n",
4352 info->region[i].region_id,
4353 info->region[i].queue_num,
4354 info->region[i].queue_start_index);
4356 printf(" user_priority_num is %-14u :",
4357 info->region[i].user_priority_num);
4358 for (j = 0; j < info->region[i].user_priority_num; j++)
4359 printf(" %-14u ", info->region[i].user_priority[j]);
4361 printf("\n flowtype_num is %-14u :",
4362 info->region[i].flowtype_num);
4363 for (j = 0; j < info->region[i].flowtype_num; j++)
4364 printf(" %-14u ", info->region[i].hw_flowtype[j]);
4367 RTE_SET_USED(port_id);
4375 show_macs(portid_t port_id)
4377 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4378 struct rte_eth_dev_info dev_info;
4379 struct rte_ether_addr *addr;
4380 uint32_t i, num_macs = 0;
4381 struct rte_eth_dev *dev;
4383 dev = &rte_eth_devices[port_id];
4385 rte_eth_dev_info_get(port_id, &dev_info);
4387 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4388 addr = &dev->data->mac_addrs[i];
4390 /* skip zero address */
4391 if (rte_is_zero_ether_addr(addr))
4397 printf("Number of MAC address added: %d\n", num_macs);
4399 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4400 addr = &dev->data->mac_addrs[i];
4402 /* skip zero address */
4403 if (rte_is_zero_ether_addr(addr))
4406 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4407 printf(" %s\n", buf);
4412 show_mcast_macs(portid_t port_id)
4414 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4415 struct rte_ether_addr *addr;
4416 struct rte_port *port;
4419 port = &ports[port_id];
4421 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
4423 for (i = 0; i < port->mc_addr_nb; i++) {
4424 addr = &port->mc_addr_pool[i];
4426 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4427 printf(" %s\n", buf);