1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
55 #define ETHDEV_FWVERS_LEN 32
57 static char *flowtype_to_str(uint16_t flow_type);
60 enum tx_pkt_split split;
64 .split = TX_PKT_SPLIT_OFF,
68 .split = TX_PKT_SPLIT_ON,
72 .split = TX_PKT_SPLIT_RND,
77 const struct rss_type_info rss_type_table[] = {
78 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
79 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
80 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
83 { "eth", ETH_RSS_ETH },
84 { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
85 { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
86 { "vlan", ETH_RSS_VLAN },
87 { "s-vlan", ETH_RSS_S_VLAN },
88 { "c-vlan", ETH_RSS_C_VLAN },
89 { "ipv4", ETH_RSS_IPV4 },
90 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
91 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
92 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
93 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
94 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
95 { "ipv6", ETH_RSS_IPV6 },
96 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
97 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
98 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
99 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
100 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
101 { "l2-payload", ETH_RSS_L2_PAYLOAD },
102 { "ipv6-ex", ETH_RSS_IPV6_EX },
103 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
104 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
105 { "port", ETH_RSS_PORT },
106 { "vxlan", ETH_RSS_VXLAN },
107 { "geneve", ETH_RSS_GENEVE },
108 { "nvgre", ETH_RSS_NVGRE },
109 { "ip", ETH_RSS_IP },
110 { "udp", ETH_RSS_UDP },
111 { "tcp", ETH_RSS_TCP },
112 { "sctp", ETH_RSS_SCTP },
113 { "tunnel", ETH_RSS_TUNNEL },
114 { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
115 { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
116 { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
117 { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
118 { "esp", ETH_RSS_ESP },
119 { "ah", ETH_RSS_AH },
120 { "l2tpv3", ETH_RSS_L2TPV3 },
121 { "pfcp", ETH_RSS_PFCP },
122 { "pppoe", ETH_RSS_PPPOE },
123 { "gtpu", ETH_RSS_GTPU },
128 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
130 char buf[RTE_ETHER_ADDR_FMT_SIZE];
131 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
132 printf("%s%s", name, buf);
136 nic_stats_display(portid_t port_id)
138 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
139 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
140 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
141 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
142 static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
143 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
145 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
146 struct rte_eth_stats stats;
147 struct rte_port *port = &ports[port_id];
150 static const char *nic_stats_border = "########################";
152 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
156 rte_eth_stats_get(port_id, &stats);
157 printf("\n %s NIC statistics for port %-2d %s\n",
158 nic_stats_border, port_id, nic_stats_border);
160 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
161 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
163 stats.ipackets, stats.imissed, stats.ibytes);
164 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
165 printf(" RX-nombuf: %-10"PRIu64"\n",
167 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
169 stats.opackets, stats.oerrors, stats.obytes);
172 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
173 " RX-bytes: %10"PRIu64"\n",
174 stats.ipackets, stats.ierrors, stats.ibytes);
175 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
176 printf(" RX-nombuf: %10"PRIu64"\n",
178 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
179 " TX-bytes: %10"PRIu64"\n",
180 stats.opackets, stats.oerrors, stats.obytes);
183 if (port->rx_queue_stats_mapping_enabled) {
185 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
186 printf(" Stats reg %2d RX-packets: %10"PRIu64
187 " RX-errors: %10"PRIu64
188 " RX-bytes: %10"PRIu64"\n",
189 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
192 if (port->tx_queue_stats_mapping_enabled) {
194 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
195 printf(" Stats reg %2d TX-packets: %10"PRIu64
196 " TX-bytes: %10"PRIu64"\n",
197 i, stats.q_opackets[i], stats.q_obytes[i]);
201 diff_cycles = prev_cycles[port_id];
202 prev_cycles[port_id] = rte_rdtsc();
204 diff_cycles = prev_cycles[port_id] - diff_cycles;
206 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
207 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
208 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
209 (stats.opackets - prev_pkts_tx[port_id]) : 0;
210 prev_pkts_rx[port_id] = stats.ipackets;
211 prev_pkts_tx[port_id] = stats.opackets;
212 mpps_rx = diff_cycles > 0 ?
213 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
214 mpps_tx = diff_cycles > 0 ?
215 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
217 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
218 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
219 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
220 (stats.obytes - prev_bytes_tx[port_id]) : 0;
221 prev_bytes_rx[port_id] = stats.ibytes;
222 prev_bytes_tx[port_id] = stats.obytes;
223 mbps_rx = diff_cycles > 0 ?
224 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0;
225 mbps_tx = diff_cycles > 0 ?
226 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0;
228 printf("\n Throughput (since last show)\n");
229 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
230 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
231 mpps_tx, mbps_tx * 8);
233 printf(" %s############################%s\n",
234 nic_stats_border, nic_stats_border);
238 nic_stats_clear(portid_t port_id)
242 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
247 ret = rte_eth_stats_reset(port_id);
249 printf("%s: Error: failed to reset stats (port %u): %s",
250 __func__, port_id, strerror(-ret));
254 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
258 printf("%s: Error: failed to get stats (port %u): %s",
259 __func__, port_id, strerror(ret));
262 printf("\n NIC statistics for port %d cleared\n", port_id);
266 nic_xstats_display(portid_t port_id)
268 struct rte_eth_xstat *xstats;
269 int cnt_xstats, idx_xstat;
270 struct rte_eth_xstat_name *xstats_names;
272 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
276 printf("###### NIC extended statistics for port %-2d\n", port_id);
277 if (!rte_eth_dev_is_valid_port(port_id)) {
278 printf("Error: Invalid port number %i\n", port_id);
283 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
284 if (cnt_xstats < 0) {
285 printf("Error: Cannot get count of xstats\n");
289 /* Get id-name lookup table */
290 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
291 if (xstats_names == NULL) {
292 printf("Cannot allocate memory for xstats lookup\n");
295 if (cnt_xstats != rte_eth_xstats_get_names(
296 port_id, xstats_names, cnt_xstats)) {
297 printf("Error: Cannot get xstats lookup\n");
302 /* Get stats themselves */
303 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
304 if (xstats == NULL) {
305 printf("Cannot allocate memory for xstats\n");
309 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
310 printf("Error: Unable to get xstats\n");
317 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
318 if (xstats_hide_zero && !xstats[idx_xstat].value)
320 printf("%s: %"PRIu64"\n",
321 xstats_names[idx_xstat].name,
322 xstats[idx_xstat].value);
329 nic_xstats_clear(portid_t port_id)
333 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
338 ret = rte_eth_xstats_reset(port_id);
340 printf("%s: Error: failed to reset xstats (port %u): %s",
341 __func__, port_id, strerror(-ret));
345 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
349 printf("%s: Error: failed to get stats (port %u): %s",
350 __func__, port_id, strerror(ret));
356 nic_stats_mapping_display(portid_t port_id)
358 struct rte_port *port = &ports[port_id];
361 static const char *nic_stats_mapping_border = "########################";
363 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
368 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
369 printf("Port id %d - either does not support queue statistic mapping or"
370 " no queue statistic mapping set\n", port_id);
374 printf("\n %s NIC statistics mapping for port %-2d %s\n",
375 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
377 if (port->rx_queue_stats_mapping_enabled) {
378 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
379 if (rx_queue_stats_mappings[i].port_id == port_id) {
380 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
381 rx_queue_stats_mappings[i].queue_id,
382 rx_queue_stats_mappings[i].stats_counter_id);
389 if (port->tx_queue_stats_mapping_enabled) {
390 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
391 if (tx_queue_stats_mappings[i].port_id == port_id) {
392 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
393 tx_queue_stats_mappings[i].queue_id,
394 tx_queue_stats_mappings[i].stats_counter_id);
399 printf(" %s####################################%s\n",
400 nic_stats_mapping_border, nic_stats_mapping_border);
404 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
406 struct rte_eth_burst_mode mode;
407 struct rte_eth_rxq_info qinfo;
409 static const char *info_border = "*********************";
411 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
413 printf("Failed to retrieve information for port: %u, "
414 "RX queue: %hu\nerror desc: %s(%d)\n",
415 port_id, queue_id, strerror(-rc), rc);
419 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
420 info_border, port_id, queue_id, info_border);
422 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
423 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
424 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
425 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
426 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
427 printf("\nRX drop packets: %s",
428 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
429 printf("\nRX deferred start: %s",
430 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
431 printf("\nRX scattered packets: %s",
432 (qinfo.scattered_rx != 0) ? "on" : "off");
433 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
435 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
436 printf("\nBurst mode: %s%s",
438 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
439 " (per queue)" : "");
445 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
447 struct rte_eth_burst_mode mode;
448 struct rte_eth_txq_info qinfo;
450 static const char *info_border = "*********************";
452 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
454 printf("Failed to retrieve information for port: %u, "
455 "TX queue: %hu\nerror desc: %s(%d)\n",
456 port_id, queue_id, strerror(-rc), rc);
460 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
461 info_border, port_id, queue_id, info_border);
463 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
464 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
465 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
466 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
467 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
468 printf("\nTX deferred start: %s",
469 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
470 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
472 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
473 printf("\nBurst mode: %s%s",
475 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
476 " (per queue)" : "");
481 static int bus_match_all(const struct rte_bus *bus, const void *data)
489 device_infos_display(const char *identifier)
491 static const char *info_border = "*********************";
492 struct rte_bus *start = NULL, *next;
493 struct rte_dev_iterator dev_iter;
494 char name[RTE_ETH_NAME_MAX_LEN];
495 struct rte_ether_addr mac_addr;
496 struct rte_device *dev;
497 struct rte_devargs da;
501 memset(&da, 0, sizeof(da));
505 if (rte_devargs_parsef(&da, "%s", identifier)) {
506 printf("cannot parse identifier\n");
513 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
516 if (identifier && da.bus != next)
519 /* Skip buses that don't have iterate method */
520 if (!next->dev_iterate)
523 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
524 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
528 /* Check for matching device if identifier is present */
530 strncmp(da.name, dev->name, strlen(dev->name)))
532 printf("\n%s Infos for device %s %s\n",
533 info_border, dev->name, info_border);
534 printf("Bus name: %s", dev->bus->name);
535 printf("\nDriver name: %s", dev->driver->name);
536 printf("\nDevargs: %s",
537 dev->devargs ? dev->devargs->args : "");
538 printf("\nConnect to socket: %d", dev->numa_node);
541 /* List ports with matching device name */
542 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
543 printf("\n\tPort id: %-2d", port_id);
544 if (eth_macaddr_get_print_err(port_id,
546 print_ethaddr("\n\tMAC address: ",
548 rte_eth_dev_get_name_by_port(port_id, name);
549 printf("\n\tDevice name: %s", name);
557 port_infos_display(portid_t port_id)
559 struct rte_port *port;
560 struct rte_ether_addr mac_addr;
561 struct rte_eth_link link;
562 struct rte_eth_dev_info dev_info;
564 struct rte_mempool * mp;
565 static const char *info_border = "*********************";
567 char name[RTE_ETH_NAME_MAX_LEN];
569 char fw_version[ETHDEV_FWVERS_LEN];
571 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
575 port = &ports[port_id];
576 ret = eth_link_get_nowait_print_err(port_id, &link);
580 ret = eth_dev_info_get_print_err(port_id, &dev_info);
584 printf("\n%s Infos for port %-2d %s\n",
585 info_border, port_id, info_border);
586 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
587 print_ethaddr("MAC address: ", &mac_addr);
588 rte_eth_dev_get_name_by_port(port_id, name);
589 printf("\nDevice name: %s", name);
590 printf("\nDriver name: %s", dev_info.driver_name);
592 if (rte_eth_dev_fw_version_get(port_id, fw_version,
593 ETHDEV_FWVERS_LEN) == 0)
594 printf("\nFirmware-version: %s", fw_version);
596 printf("\nFirmware-version: %s", "not available");
598 if (dev_info.device->devargs && dev_info.device->devargs->args)
599 printf("\nDevargs: %s", dev_info.device->devargs->args);
600 printf("\nConnect to socket: %u", port->socket_id);
602 if (port_numa[port_id] != NUMA_NO_CONFIG) {
603 mp = mbuf_pool_find(port_numa[port_id]);
605 printf("\nmemory allocation on the socket: %d",
608 printf("\nmemory allocation on the socket: %u",port->socket_id);
610 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
611 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
612 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
613 ("full-duplex") : ("half-duplex"));
615 if (!rte_eth_dev_get_mtu(port_id, &mtu))
616 printf("MTU: %u\n", mtu);
618 printf("Promiscuous mode: %s\n",
619 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
620 printf("Allmulticast mode: %s\n",
621 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
622 printf("Maximum number of MAC addresses: %u\n",
623 (unsigned int)(port->dev_info.max_mac_addrs));
624 printf("Maximum number of MAC addresses of hash filtering: %u\n",
625 (unsigned int)(port->dev_info.max_hash_mac_addrs));
627 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
628 if (vlan_offload >= 0){
629 printf("VLAN offload: \n");
630 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
631 printf(" strip on, ");
633 printf(" strip off, ");
635 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
636 printf("filter on, ");
638 printf("filter off, ");
640 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
641 printf("extend on, ");
643 printf("extend off, ");
645 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
646 printf("qinq strip on\n");
648 printf("qinq strip off\n");
651 if (dev_info.hash_key_size > 0)
652 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
653 if (dev_info.reta_size > 0)
654 printf("Redirection table size: %u\n", dev_info.reta_size);
655 if (!dev_info.flow_type_rss_offloads)
656 printf("No RSS offload flow type is supported.\n");
661 printf("Supported RSS offload flow types:\n");
662 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
663 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
664 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
666 p = flowtype_to_str(i);
670 printf(" user defined %d\n", i);
674 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
675 printf("Maximum configurable length of RX packet: %u\n",
676 dev_info.max_rx_pktlen);
677 printf("Maximum configurable size of LRO aggregated packet: %u\n",
678 dev_info.max_lro_pkt_size);
679 if (dev_info.max_vfs)
680 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
681 if (dev_info.max_vmdq_pools)
682 printf("Maximum number of VMDq pools: %u\n",
683 dev_info.max_vmdq_pools);
685 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
686 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
687 printf("Max possible number of RXDs per queue: %hu\n",
688 dev_info.rx_desc_lim.nb_max);
689 printf("Min possible number of RXDs per queue: %hu\n",
690 dev_info.rx_desc_lim.nb_min);
691 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
693 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
694 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
695 printf("Max possible number of TXDs per queue: %hu\n",
696 dev_info.tx_desc_lim.nb_max);
697 printf("Min possible number of TXDs per queue: %hu\n",
698 dev_info.tx_desc_lim.nb_min);
699 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
700 printf("Max segment number per packet: %hu\n",
701 dev_info.tx_desc_lim.nb_seg_max);
702 printf("Max segment number per MTU/TSO: %hu\n",
703 dev_info.tx_desc_lim.nb_mtu_seg_max);
705 /* Show switch info only if valid switch domain and port id is set */
706 if (dev_info.switch_info.domain_id !=
707 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
708 if (dev_info.switch_info.name)
709 printf("Switch name: %s\n", dev_info.switch_info.name);
711 printf("Switch domain Id: %u\n",
712 dev_info.switch_info.domain_id);
713 printf("Switch Port Id: %u\n",
714 dev_info.switch_info.port_id);
719 port_summary_header_display(void)
721 uint16_t port_number;
723 port_number = rte_eth_dev_count_avail();
724 printf("Number of available ports: %i\n", port_number);
725 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
726 "Driver", "Status", "Link");
730 port_summary_display(portid_t port_id)
732 struct rte_ether_addr mac_addr;
733 struct rte_eth_link link;
734 struct rte_eth_dev_info dev_info;
735 char name[RTE_ETH_NAME_MAX_LEN];
738 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
743 ret = eth_link_get_nowait_print_err(port_id, &link);
747 ret = eth_dev_info_get_print_err(port_id, &dev_info);
751 rte_eth_dev_get_name_by_port(port_id, name);
752 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
756 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n",
757 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
758 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
759 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
760 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
761 (unsigned int) link.link_speed);
765 port_offload_cap_display(portid_t port_id)
767 struct rte_eth_dev_info dev_info;
768 static const char *info_border = "************";
771 if (port_id_is_invalid(port_id, ENABLED_WARN))
774 ret = eth_dev_info_get_print_err(port_id, &dev_info);
778 printf("\n%s Port %d supported offload features: %s\n",
779 info_border, port_id, info_border);
781 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
782 printf("VLAN stripped: ");
783 if (ports[port_id].dev_conf.rxmode.offloads &
784 DEV_RX_OFFLOAD_VLAN_STRIP)
790 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
791 printf("Double VLANs stripped: ");
792 if (ports[port_id].dev_conf.rxmode.offloads &
793 DEV_RX_OFFLOAD_QINQ_STRIP)
799 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
800 printf("RX IPv4 checksum: ");
801 if (ports[port_id].dev_conf.rxmode.offloads &
802 DEV_RX_OFFLOAD_IPV4_CKSUM)
808 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
809 printf("RX UDP checksum: ");
810 if (ports[port_id].dev_conf.rxmode.offloads &
811 DEV_RX_OFFLOAD_UDP_CKSUM)
817 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
818 printf("RX TCP checksum: ");
819 if (ports[port_id].dev_conf.rxmode.offloads &
820 DEV_RX_OFFLOAD_TCP_CKSUM)
826 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
827 printf("RX SCTP checksum: ");
828 if (ports[port_id].dev_conf.rxmode.offloads &
829 DEV_RX_OFFLOAD_SCTP_CKSUM)
835 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
836 printf("RX Outer IPv4 checksum: ");
837 if (ports[port_id].dev_conf.rxmode.offloads &
838 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
844 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
845 printf("RX Outer UDP checksum: ");
846 if (ports[port_id].dev_conf.rxmode.offloads &
847 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
853 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
854 printf("Large receive offload: ");
855 if (ports[port_id].dev_conf.rxmode.offloads &
856 DEV_RX_OFFLOAD_TCP_LRO)
862 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
863 printf("HW timestamp: ");
864 if (ports[port_id].dev_conf.rxmode.offloads &
865 DEV_RX_OFFLOAD_TIMESTAMP)
871 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
872 printf("Rx Keep CRC: ");
873 if (ports[port_id].dev_conf.rxmode.offloads &
874 DEV_RX_OFFLOAD_KEEP_CRC)
880 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
881 printf("RX offload security: ");
882 if (ports[port_id].dev_conf.rxmode.offloads &
883 DEV_RX_OFFLOAD_SECURITY)
889 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
890 printf("VLAN insert: ");
891 if (ports[port_id].dev_conf.txmode.offloads &
892 DEV_TX_OFFLOAD_VLAN_INSERT)
898 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
899 printf("Double VLANs insert: ");
900 if (ports[port_id].dev_conf.txmode.offloads &
901 DEV_TX_OFFLOAD_QINQ_INSERT)
907 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
908 printf("TX IPv4 checksum: ");
909 if (ports[port_id].dev_conf.txmode.offloads &
910 DEV_TX_OFFLOAD_IPV4_CKSUM)
916 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
917 printf("TX UDP checksum: ");
918 if (ports[port_id].dev_conf.txmode.offloads &
919 DEV_TX_OFFLOAD_UDP_CKSUM)
925 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
926 printf("TX TCP checksum: ");
927 if (ports[port_id].dev_conf.txmode.offloads &
928 DEV_TX_OFFLOAD_TCP_CKSUM)
934 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
935 printf("TX SCTP checksum: ");
936 if (ports[port_id].dev_conf.txmode.offloads &
937 DEV_TX_OFFLOAD_SCTP_CKSUM)
943 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
944 printf("TX Outer IPv4 checksum: ");
945 if (ports[port_id].dev_conf.txmode.offloads &
946 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
952 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
953 printf("TX TCP segmentation: ");
954 if (ports[port_id].dev_conf.txmode.offloads &
955 DEV_TX_OFFLOAD_TCP_TSO)
961 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
962 printf("TX UDP segmentation: ");
963 if (ports[port_id].dev_conf.txmode.offloads &
964 DEV_TX_OFFLOAD_UDP_TSO)
970 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
971 printf("TSO for VXLAN tunnel packet: ");
972 if (ports[port_id].dev_conf.txmode.offloads &
973 DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
979 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
980 printf("TSO for GRE tunnel packet: ");
981 if (ports[port_id].dev_conf.txmode.offloads &
982 DEV_TX_OFFLOAD_GRE_TNL_TSO)
988 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
989 printf("TSO for IPIP tunnel packet: ");
990 if (ports[port_id].dev_conf.txmode.offloads &
991 DEV_TX_OFFLOAD_IPIP_TNL_TSO)
997 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
998 printf("TSO for GENEVE tunnel packet: ");
999 if (ports[port_id].dev_conf.txmode.offloads &
1000 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
1006 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
1007 printf("IP tunnel TSO: ");
1008 if (ports[port_id].dev_conf.txmode.offloads &
1009 DEV_TX_OFFLOAD_IP_TNL_TSO)
1015 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
1016 printf("UDP tunnel TSO: ");
1017 if (ports[port_id].dev_conf.txmode.offloads &
1018 DEV_TX_OFFLOAD_UDP_TNL_TSO)
1024 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
1025 printf("TX Outer UDP checksum: ");
1026 if (ports[port_id].dev_conf.txmode.offloads &
1027 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
1036 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1040 if (port_id == (portid_t)RTE_PORT_ALL)
1043 RTE_ETH_FOREACH_DEV(pid)
1047 if (warning == ENABLED_WARN)
1048 printf("Invalid port %d\n", port_id);
1053 void print_valid_ports(void)
1057 printf("The valid ports array is [");
1058 RTE_ETH_FOREACH_DEV(pid) {
1065 vlan_id_is_invalid(uint16_t vlan_id)
1069 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1074 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1076 const struct rte_pci_device *pci_dev;
1077 const struct rte_bus *bus;
1080 if (reg_off & 0x3) {
1081 printf("Port register offset 0x%X not aligned on a 4-byte "
1087 if (!ports[port_id].dev_info.device) {
1088 printf("Invalid device\n");
1092 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1093 if (bus && !strcmp(bus->name, "pci")) {
1094 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1096 printf("Not a PCI device\n");
1100 pci_len = pci_dev->mem_resource[0].len;
1101 if (reg_off >= pci_len) {
1102 printf("Port %d: register offset %u (0x%X) out of port PCI "
1103 "resource (length=%"PRIu64")\n",
1104 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
1111 reg_bit_pos_is_invalid(uint8_t bit_pos)
1115 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
1119 #define display_port_and_reg_off(port_id, reg_off) \
1120 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1123 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1125 display_port_and_reg_off(port_id, (unsigned)reg_off);
1126 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1130 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1135 if (port_id_is_invalid(port_id, ENABLED_WARN))
1137 if (port_reg_off_is_invalid(port_id, reg_off))
1139 if (reg_bit_pos_is_invalid(bit_x))
1141 reg_v = port_id_pci_reg_read(port_id, reg_off);
1142 display_port_and_reg_off(port_id, (unsigned)reg_off);
1143 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1147 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1148 uint8_t bit1_pos, uint8_t bit2_pos)
1154 if (port_id_is_invalid(port_id, ENABLED_WARN))
1156 if (port_reg_off_is_invalid(port_id, reg_off))
1158 if (reg_bit_pos_is_invalid(bit1_pos))
1160 if (reg_bit_pos_is_invalid(bit2_pos))
1162 if (bit1_pos > bit2_pos)
1163 l_bit = bit2_pos, h_bit = bit1_pos;
1165 l_bit = bit1_pos, h_bit = bit2_pos;
1167 reg_v = port_id_pci_reg_read(port_id, reg_off);
1170 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1171 display_port_and_reg_off(port_id, (unsigned)reg_off);
1172 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1173 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1177 port_reg_display(portid_t port_id, uint32_t reg_off)
1181 if (port_id_is_invalid(port_id, ENABLED_WARN))
1183 if (port_reg_off_is_invalid(port_id, reg_off))
1185 reg_v = port_id_pci_reg_read(port_id, reg_off);
1186 display_port_reg_value(port_id, reg_off, reg_v);
1190 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1195 if (port_id_is_invalid(port_id, ENABLED_WARN))
1197 if (port_reg_off_is_invalid(port_id, reg_off))
1199 if (reg_bit_pos_is_invalid(bit_pos))
1202 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1205 reg_v = port_id_pci_reg_read(port_id, reg_off);
1207 reg_v &= ~(1 << bit_pos);
1209 reg_v |= (1 << bit_pos);
1210 port_id_pci_reg_write(port_id, reg_off, reg_v);
1211 display_port_reg_value(port_id, reg_off, reg_v);
1215 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1216 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1223 if (port_id_is_invalid(port_id, ENABLED_WARN))
1225 if (port_reg_off_is_invalid(port_id, reg_off))
1227 if (reg_bit_pos_is_invalid(bit1_pos))
1229 if (reg_bit_pos_is_invalid(bit2_pos))
1231 if (bit1_pos > bit2_pos)
1232 l_bit = bit2_pos, h_bit = bit1_pos;
1234 l_bit = bit1_pos, h_bit = bit2_pos;
1236 if ((h_bit - l_bit) < 31)
1237 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1241 if (value > max_v) {
1242 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1243 (unsigned)value, (unsigned)value,
1244 (unsigned)max_v, (unsigned)max_v);
1247 reg_v = port_id_pci_reg_read(port_id, reg_off);
1248 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1249 reg_v |= (value << l_bit); /* Set changed bits */
1250 port_id_pci_reg_write(port_id, reg_off, reg_v);
1251 display_port_reg_value(port_id, reg_off, reg_v);
1255 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1257 if (port_id_is_invalid(port_id, ENABLED_WARN))
1259 if (port_reg_off_is_invalid(port_id, reg_off))
1261 port_id_pci_reg_write(port_id, reg_off, reg_v);
1262 display_port_reg_value(port_id, reg_off, reg_v);
1266 port_mtu_set(portid_t port_id, uint16_t mtu)
1269 struct rte_port *rte_port = &ports[port_id];
1270 struct rte_eth_dev_info dev_info;
1271 uint16_t eth_overhead;
1274 if (port_id_is_invalid(port_id, ENABLED_WARN))
1277 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1281 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1282 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1283 mtu, dev_info.min_mtu, dev_info.max_mtu);
1286 diag = rte_eth_dev_set_mtu(port_id, mtu);
1288 printf("Set MTU failed. diag=%d\n", diag);
1289 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1291 * Ether overhead in driver is equal to the difference of
1292 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
1293 * device supports jumbo frame.
1295 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
1296 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) {
1297 rte_port->dev_conf.rxmode.offloads |=
1298 DEV_RX_OFFLOAD_JUMBO_FRAME;
1299 rte_port->dev_conf.rxmode.max_rx_pkt_len =
1302 rte_port->dev_conf.rxmode.offloads &=
1303 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1307 /* Generic flow management functions. */
1309 /** Generate a port_flow entry from attributes/pattern/actions. */
1310 static struct port_flow *
1311 port_flow_new(const struct rte_flow_attr *attr,
1312 const struct rte_flow_item *pattern,
1313 const struct rte_flow_action *actions,
1314 struct rte_flow_error *error)
1316 const struct rte_flow_conv_rule rule = {
1318 .pattern_ro = pattern,
1319 .actions_ro = actions,
1321 struct port_flow *pf;
1324 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1327 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1330 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1334 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1341 /** Print a message out of a flow error. */
1343 port_flow_complain(struct rte_flow_error *error)
1345 static const char *const errstrlist[] = {
1346 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1347 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1348 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1349 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1350 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1351 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1352 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1353 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1354 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1355 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1356 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1357 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1358 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1359 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1360 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1361 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1362 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1366 int err = rte_errno;
1368 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1369 !errstrlist[error->type])
1370 errstr = "unknown type";
1372 errstr = errstrlist[error->type];
1373 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__,
1374 error->type, errstr,
1375 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1376 error->cause), buf) : "",
1377 error->message ? error->message : "(no stated reason)",
1382 /** Validate flow rule. */
1384 port_flow_validate(portid_t port_id,
1385 const struct rte_flow_attr *attr,
1386 const struct rte_flow_item *pattern,
1387 const struct rte_flow_action *actions)
1389 struct rte_flow_error error;
1391 /* Poisoning to make sure PMDs update it in case of error. */
1392 memset(&error, 0x11, sizeof(error));
1393 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1394 return port_flow_complain(&error);
1395 printf("Flow rule validated\n");
1399 /** Update age action context by port_flow pointer. */
1401 update_age_action_context(const struct rte_flow_action *actions,
1402 struct port_flow *pf)
1404 struct rte_flow_action_age *age = NULL;
1406 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1407 switch (actions->type) {
1408 case RTE_FLOW_ACTION_TYPE_AGE:
1409 age = (struct rte_flow_action_age *)
1410 (uintptr_t)actions->conf;
1419 /** Create flow rule. */
1421 port_flow_create(portid_t port_id,
1422 const struct rte_flow_attr *attr,
1423 const struct rte_flow_item *pattern,
1424 const struct rte_flow_action *actions)
1426 struct rte_flow *flow;
1427 struct rte_port *port;
1428 struct port_flow *pf;
1430 struct rte_flow_error error;
1432 port = &ports[port_id];
1433 if (port->flow_list) {
1434 if (port->flow_list->id == UINT32_MAX) {
1435 printf("Highest rule ID is already assigned, delete"
1439 id = port->flow_list->id + 1;
1441 pf = port_flow_new(attr, pattern, actions, &error);
1443 return port_flow_complain(&error);
1444 update_age_action_context(actions, pf);
1445 /* Poisoning to make sure PMDs update it in case of error. */
1446 memset(&error, 0x22, sizeof(error));
1447 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1450 return port_flow_complain(&error);
1452 pf->next = port->flow_list;
1455 port->flow_list = pf;
1456 printf("Flow rule #%u created\n", pf->id);
1460 /** Destroy a number of flow rules. */
1462 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1464 struct rte_port *port;
1465 struct port_flow **tmp;
1469 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1470 port_id == (portid_t)RTE_PORT_ALL)
1472 port = &ports[port_id];
1473 tmp = &port->flow_list;
1477 for (i = 0; i != n; ++i) {
1478 struct rte_flow_error error;
1479 struct port_flow *pf = *tmp;
1481 if (rule[i] != pf->id)
1484 * Poisoning to make sure PMDs update it in case
1487 memset(&error, 0x33, sizeof(error));
1488 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1489 ret = port_flow_complain(&error);
1492 printf("Flow rule #%u destroyed\n", pf->id);
1498 tmp = &(*tmp)->next;
1504 /** Remove all flow rules. */
1506 port_flow_flush(portid_t port_id)
1508 struct rte_flow_error error;
1509 struct rte_port *port;
1512 /* Poisoning to make sure PMDs update it in case of error. */
1513 memset(&error, 0x44, sizeof(error));
1514 if (rte_flow_flush(port_id, &error)) {
1515 ret = port_flow_complain(&error);
1516 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1517 port_id == (portid_t)RTE_PORT_ALL)
1520 port = &ports[port_id];
1521 while (port->flow_list) {
1522 struct port_flow *pf = port->flow_list->next;
1524 free(port->flow_list);
1525 port->flow_list = pf;
1530 /** Dump all flow rules. */
1532 port_flow_dump(portid_t port_id, const char *file_name)
1535 FILE *file = stdout;
1536 struct rte_flow_error error;
1538 if (file_name && strlen(file_name)) {
1539 file = fopen(file_name, "w");
1541 printf("Failed to create file %s: %s\n", file_name,
1546 ret = rte_flow_dev_dump(port_id, file, &error);
1548 port_flow_complain(&error);
1549 printf("Failed to dump flow: %s\n", strerror(-ret));
1551 printf("Flow dump finished\n");
1552 if (file_name && strlen(file_name))
1557 /** Query a flow rule. */
1559 port_flow_query(portid_t port_id, uint32_t rule,
1560 const struct rte_flow_action *action)
1562 struct rte_flow_error error;
1563 struct rte_port *port;
1564 struct port_flow *pf;
1567 struct rte_flow_query_count count;
1571 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1572 port_id == (portid_t)RTE_PORT_ALL)
1574 port = &ports[port_id];
1575 for (pf = port->flow_list; pf; pf = pf->next)
1579 printf("Flow rule #%u not found\n", rule);
1582 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1583 &name, sizeof(name),
1584 (void *)(uintptr_t)action->type, &error);
1586 return port_flow_complain(&error);
1587 switch (action->type) {
1588 case RTE_FLOW_ACTION_TYPE_COUNT:
1591 printf("Cannot query action type %d (%s)\n",
1592 action->type, name);
1595 /* Poisoning to make sure PMDs update it in case of error. */
1596 memset(&error, 0x55, sizeof(error));
1597 memset(&query, 0, sizeof(query));
1598 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1599 return port_flow_complain(&error);
1600 switch (action->type) {
1601 case RTE_FLOW_ACTION_TYPE_COUNT:
1605 " hits: %" PRIu64 "\n"
1606 " bytes: %" PRIu64 "\n",
1608 query.count.hits_set,
1609 query.count.bytes_set,
1614 printf("Cannot display result for action type %d (%s)\n",
1615 action->type, name);
1621 /** List simply and destroy all aged flows. */
1623 port_flow_aged(portid_t port_id, uint8_t destroy)
1626 int nb_context, total = 0, idx;
1627 struct rte_flow_error error;
1628 struct port_flow *pf;
1630 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1631 port_id == (portid_t)RTE_PORT_ALL)
1633 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
1634 printf("Port %u total aged flows: %d\n", port_id, total);
1636 port_flow_complain(&error);
1641 contexts = malloc(sizeof(void *) * total);
1642 if (contexts == NULL) {
1643 printf("Cannot allocate contexts for aged flow\n");
1646 printf("ID\tGroup\tPrio\tAttr\n");
1647 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
1648 if (nb_context != total) {
1649 printf("Port:%d get aged flows count(%d) != total(%d)\n",
1650 port_id, nb_context, total);
1654 for (idx = 0; idx < nb_context; idx++) {
1655 pf = (struct port_flow *)contexts[idx];
1657 printf("Error: get Null context in port %u\n", port_id);
1660 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t\n",
1662 pf->rule.attr->group,
1663 pf->rule.attr->priority,
1664 pf->rule.attr->ingress ? 'i' : '-',
1665 pf->rule.attr->egress ? 'e' : '-',
1666 pf->rule.attr->transfer ? 't' : '-');
1674 for (idx = 0; idx < nb_context; idx++) {
1675 pf = (struct port_flow *)contexts[idx];
1679 ret = port_flow_destroy(port_id, 1, &flow_id);
1683 printf("%d flows be destroyed\n", total);
1688 /** List flow rules. */
1690 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1692 struct rte_port *port;
1693 struct port_flow *pf;
1694 struct port_flow *list = NULL;
1697 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1698 port_id == (portid_t)RTE_PORT_ALL)
1700 port = &ports[port_id];
1701 if (!port->flow_list)
1703 /* Sort flows by group, priority and ID. */
1704 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1705 struct port_flow **tmp;
1706 const struct rte_flow_attr *curr = pf->rule.attr;
1709 /* Filter out unwanted groups. */
1710 for (i = 0; i != n; ++i)
1711 if (curr->group == group[i])
1716 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1717 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1719 if (curr->group > comp->group ||
1720 (curr->group == comp->group &&
1721 curr->priority > comp->priority) ||
1722 (curr->group == comp->group &&
1723 curr->priority == comp->priority &&
1724 pf->id > (*tmp)->id))
1731 printf("ID\tGroup\tPrio\tAttr\tRule\n");
1732 for (pf = list; pf != NULL; pf = pf->tmp) {
1733 const struct rte_flow_item *item = pf->rule.pattern;
1734 const struct rte_flow_action *action = pf->rule.actions;
1737 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1739 pf->rule.attr->group,
1740 pf->rule.attr->priority,
1741 pf->rule.attr->ingress ? 'i' : '-',
1742 pf->rule.attr->egress ? 'e' : '-',
1743 pf->rule.attr->transfer ? 't' : '-');
1744 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1745 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1746 &name, sizeof(name),
1747 (void *)(uintptr_t)item->type,
1750 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1751 printf("%s ", name);
1755 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1756 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1757 &name, sizeof(name),
1758 (void *)(uintptr_t)action->type,
1761 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1762 printf(" %s", name);
1769 /** Restrict ingress traffic to the defined flow rules. */
1771 port_flow_isolate(portid_t port_id, int set)
1773 struct rte_flow_error error;
1775 /* Poisoning to make sure PMDs update it in case of error. */
1776 memset(&error, 0x66, sizeof(error));
1777 if (rte_flow_isolate(port_id, set, &error))
1778 return port_flow_complain(&error);
1779 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1781 set ? "now restricted" : "not restricted anymore");
1786 * RX/TX ring descriptors display functions.
1789 rx_queue_id_is_invalid(queueid_t rxq_id)
1791 if (rxq_id < nb_rxq)
1793 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1798 tx_queue_id_is_invalid(queueid_t txq_id)
1800 if (txq_id < nb_txq)
1802 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1807 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1809 if (rxdesc_id < nb_rxd)
1811 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1817 tx_desc_id_is_invalid(uint16_t txdesc_id)
1819 if (txdesc_id < nb_txd)
1821 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1826 static const struct rte_memzone *
1827 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1829 char mz_name[RTE_MEMZONE_NAMESIZE];
1830 const struct rte_memzone *mz;
1832 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
1833 port_id, q_id, ring_name);
1834 mz = rte_memzone_lookup(mz_name);
1836 printf("%s ring memory zoneof (port %d, queue %d) not"
1837 "found (zone name = %s\n",
1838 ring_name, port_id, q_id, mz_name);
1842 union igb_ring_dword {
1845 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1855 struct igb_ring_desc_32_bytes {
1856 union igb_ring_dword lo_dword;
1857 union igb_ring_dword hi_dword;
1858 union igb_ring_dword resv1;
1859 union igb_ring_dword resv2;
1862 struct igb_ring_desc_16_bytes {
1863 union igb_ring_dword lo_dword;
1864 union igb_ring_dword hi_dword;
1868 ring_rxd_display_dword(union igb_ring_dword dword)
1870 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1871 (unsigned)dword.words.hi);
1875 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1876 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1879 __rte_unused portid_t port_id,
1883 struct igb_ring_desc_16_bytes *ring =
1884 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1885 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1887 struct rte_eth_dev_info dev_info;
1889 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1893 if (strstr(dev_info.driver_name, "i40e") != NULL) {
1894 /* 32 bytes RX descriptor, i40e only */
1895 struct igb_ring_desc_32_bytes *ring =
1896 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1897 ring[desc_id].lo_dword.dword =
1898 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1899 ring_rxd_display_dword(ring[desc_id].lo_dword);
1900 ring[desc_id].hi_dword.dword =
1901 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1902 ring_rxd_display_dword(ring[desc_id].hi_dword);
1903 ring[desc_id].resv1.dword =
1904 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1905 ring_rxd_display_dword(ring[desc_id].resv1);
1906 ring[desc_id].resv2.dword =
1907 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1908 ring_rxd_display_dword(ring[desc_id].resv2);
1913 /* 16 bytes RX descriptor */
1914 ring[desc_id].lo_dword.dword =
1915 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1916 ring_rxd_display_dword(ring[desc_id].lo_dword);
1917 ring[desc_id].hi_dword.dword =
1918 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1919 ring_rxd_display_dword(ring[desc_id].hi_dword);
1923 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1925 struct igb_ring_desc_16_bytes *ring;
1926 struct igb_ring_desc_16_bytes txd;
1928 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1929 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1930 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1931 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1932 (unsigned)txd.lo_dword.words.lo,
1933 (unsigned)txd.lo_dword.words.hi,
1934 (unsigned)txd.hi_dword.words.lo,
1935 (unsigned)txd.hi_dword.words.hi);
1939 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1941 const struct rte_memzone *rx_mz;
1943 if (port_id_is_invalid(port_id, ENABLED_WARN))
1945 if (rx_queue_id_is_invalid(rxq_id))
1947 if (rx_desc_id_is_invalid(rxd_id))
1949 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1952 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1956 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1958 const struct rte_memzone *tx_mz;
1960 if (port_id_is_invalid(port_id, ENABLED_WARN))
1962 if (tx_queue_id_is_invalid(txq_id))
1964 if (tx_desc_id_is_invalid(txd_id))
1966 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1969 ring_tx_descriptor_display(tx_mz, txd_id);
1973 fwd_lcores_config_display(void)
1977 printf("List of forwarding lcores:");
1978 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1979 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1983 rxtx_config_display(void)
1988 printf(" %s packet forwarding%s packets/burst=%d\n",
1989 cur_fwd_eng->fwd_mode_name,
1990 retry_enabled == 0 ? "" : " with retry",
1993 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1994 printf(" packet len=%u - nb packet segments=%d\n",
1995 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1997 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
1998 nb_fwd_lcores, nb_fwd_ports);
2000 RTE_ETH_FOREACH_DEV(pid) {
2001 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
2002 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
2003 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
2004 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
2005 uint16_t nb_rx_desc_tmp;
2006 uint16_t nb_tx_desc_tmp;
2007 struct rte_eth_rxq_info rx_qinfo;
2008 struct rte_eth_txq_info tx_qinfo;
2011 /* per port config */
2012 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
2013 (unsigned int)pid, nb_rxq, nb_txq);
2015 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
2016 ports[pid].dev_conf.rxmode.offloads,
2017 ports[pid].dev_conf.txmode.offloads);
2019 /* per rx queue config only for first queue to be less verbose */
2020 for (qid = 0; qid < 1; qid++) {
2021 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
2023 nb_rx_desc_tmp = nb_rx_desc[qid];
2025 nb_rx_desc_tmp = rx_qinfo.nb_desc;
2027 printf(" RX queue: %d\n", qid);
2028 printf(" RX desc=%d - RX free threshold=%d\n",
2029 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
2030 printf(" RX threshold registers: pthresh=%d hthresh=%d "
2032 rx_conf[qid].rx_thresh.pthresh,
2033 rx_conf[qid].rx_thresh.hthresh,
2034 rx_conf[qid].rx_thresh.wthresh);
2035 printf(" RX Offloads=0x%"PRIx64"\n",
2036 rx_conf[qid].offloads);
2039 /* per tx queue config only for first queue to be less verbose */
2040 for (qid = 0; qid < 1; qid++) {
2041 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
2043 nb_tx_desc_tmp = nb_tx_desc[qid];
2045 nb_tx_desc_tmp = tx_qinfo.nb_desc;
2047 printf(" TX queue: %d\n", qid);
2048 printf(" TX desc=%d - TX free threshold=%d\n",
2049 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
2050 printf(" TX threshold registers: pthresh=%d hthresh=%d "
2052 tx_conf[qid].tx_thresh.pthresh,
2053 tx_conf[qid].tx_thresh.hthresh,
2054 tx_conf[qid].tx_thresh.wthresh);
2055 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
2056 tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
2062 port_rss_reta_info(portid_t port_id,
2063 struct rte_eth_rss_reta_entry64 *reta_conf,
2064 uint16_t nb_entries)
2066 uint16_t i, idx, shift;
2069 if (port_id_is_invalid(port_id, ENABLED_WARN))
2072 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
2074 printf("Failed to get RSS RETA info, return code = %d\n", ret);
2078 for (i = 0; i < nb_entries; i++) {
2079 idx = i / RTE_RETA_GROUP_SIZE;
2080 shift = i % RTE_RETA_GROUP_SIZE;
2081 if (!(reta_conf[idx].mask & (1ULL << shift)))
2083 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
2084 i, reta_conf[idx].reta[shift]);
2089 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
2093 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
2095 struct rte_eth_rss_conf rss_conf = {0};
2096 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
2100 struct rte_eth_dev_info dev_info;
2101 uint8_t hash_key_size;
2104 if (port_id_is_invalid(port_id, ENABLED_WARN))
2107 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2111 if (dev_info.hash_key_size > 0 &&
2112 dev_info.hash_key_size <= sizeof(rss_key))
2113 hash_key_size = dev_info.hash_key_size;
2115 printf("dev_info did not provide a valid hash key size\n");
2119 /* Get RSS hash key if asked to display it */
2120 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
2121 rss_conf.rss_key_len = hash_key_size;
2122 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2126 printf("port index %d invalid\n", port_id);
2129 printf("operation not supported by device\n");
2132 printf("operation failed - diag=%d\n", diag);
2137 rss_hf = rss_conf.rss_hf;
2139 printf("RSS disabled\n");
2142 printf("RSS functions:\n ");
2143 for (i = 0; rss_type_table[i].str; i++) {
2144 if (rss_hf & rss_type_table[i].rss_type)
2145 printf("%s ", rss_type_table[i].str);
2150 printf("RSS key:\n");
2151 for (i = 0; i < hash_key_size; i++)
2152 printf("%02X", rss_key[i]);
2157 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
2160 struct rte_eth_rss_conf rss_conf;
2164 rss_conf.rss_key = NULL;
2165 rss_conf.rss_key_len = hash_key_len;
2166 rss_conf.rss_hf = 0;
2167 for (i = 0; rss_type_table[i].str; i++) {
2168 if (!strcmp(rss_type_table[i].str, rss_type))
2169 rss_conf.rss_hf = rss_type_table[i].rss_type;
2171 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2173 rss_conf.rss_key = hash_key;
2174 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
2181 printf("port index %d invalid\n", port_id);
2184 printf("operation not supported by device\n");
2187 printf("operation failed - diag=%d\n", diag);
2193 * Setup forwarding configuration for each logical core.
2196 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
2198 streamid_t nb_fs_per_lcore;
2206 nb_fs = cfg->nb_fwd_streams;
2207 nb_fc = cfg->nb_fwd_lcores;
2208 if (nb_fs <= nb_fc) {
2209 nb_fs_per_lcore = 1;
2212 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2213 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2216 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2218 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2219 fwd_lcores[lc_id]->stream_idx = sm_id;
2220 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2221 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2225 * Assign extra remaining streams, if any.
2227 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2228 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2229 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2230 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2231 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2236 fwd_topology_tx_port_get(portid_t rxp)
2238 static int warning_once = 1;
2240 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2242 switch (port_topology) {
2244 case PORT_TOPOLOGY_PAIRED:
2245 if ((rxp & 0x1) == 0) {
2246 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2249 printf("\nWarning! port-topology=paired"
2250 " and odd forward ports number,"
2251 " the last port will pair with"
2258 case PORT_TOPOLOGY_CHAINED:
2259 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2260 case PORT_TOPOLOGY_LOOP:
2266 simple_fwd_config_setup(void)
2270 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2271 cur_fwd_config.nb_fwd_streams =
2272 (streamid_t) cur_fwd_config.nb_fwd_ports;
2274 /* reinitialize forwarding streams */
2278 * In the simple forwarding test, the number of forwarding cores
2279 * must be lower or equal to the number of forwarding ports.
2281 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2282 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2283 cur_fwd_config.nb_fwd_lcores =
2284 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2285 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2287 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2288 fwd_streams[i]->rx_port = fwd_ports_ids[i];
2289 fwd_streams[i]->rx_queue = 0;
2290 fwd_streams[i]->tx_port =
2291 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2292 fwd_streams[i]->tx_queue = 0;
2293 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2294 fwd_streams[i]->retry_enabled = retry_enabled;
2299 * For the RSS forwarding test all streams distributed over lcores. Each stream
2300 * being composed of a RX queue to poll on a RX port for input messages,
2301 * associated with a TX queue of a TX port where to send forwarded packets.
2304 rss_fwd_config_setup(void)
2315 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2316 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2317 cur_fwd_config.nb_fwd_streams =
2318 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2320 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2321 cur_fwd_config.nb_fwd_lcores =
2322 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2324 /* reinitialize forwarding streams */
2327 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2329 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2330 struct fwd_stream *fs;
2332 fs = fwd_streams[sm_id];
2333 txp = fwd_topology_tx_port_get(rxp);
2334 fs->rx_port = fwd_ports_ids[rxp];
2336 fs->tx_port = fwd_ports_ids[txp];
2338 fs->peer_addr = fs->tx_port;
2339 fs->retry_enabled = retry_enabled;
2341 if (rxp < nb_fwd_ports)
2349 * For the DCB forwarding test, each core is assigned on each traffic class.
2351 * Each core is assigned a multi-stream, each stream being composed of
2352 * a RX queue to poll on a RX port for input messages, associated with
2353 * a TX queue of a TX port where to send forwarded packets. All RX and
2354 * TX queues are mapping to the same traffic class.
2355 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2359 dcb_fwd_config_setup(void)
2361 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2362 portid_t txp, rxp = 0;
2363 queueid_t txq, rxq = 0;
2365 uint16_t nb_rx_queue, nb_tx_queue;
2366 uint16_t i, j, k, sm_id = 0;
2369 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2370 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2371 cur_fwd_config.nb_fwd_streams =
2372 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2374 /* reinitialize forwarding streams */
2378 /* get the dcb info on the first RX and TX ports */
2379 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2380 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2382 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2383 fwd_lcores[lc_id]->stream_nb = 0;
2384 fwd_lcores[lc_id]->stream_idx = sm_id;
2385 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2386 /* if the nb_queue is zero, means this tc is
2387 * not enabled on the POOL
2389 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2391 k = fwd_lcores[lc_id]->stream_nb +
2392 fwd_lcores[lc_id]->stream_idx;
2393 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2394 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2395 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2396 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2397 for (j = 0; j < nb_rx_queue; j++) {
2398 struct fwd_stream *fs;
2400 fs = fwd_streams[k + j];
2401 fs->rx_port = fwd_ports_ids[rxp];
2402 fs->rx_queue = rxq + j;
2403 fs->tx_port = fwd_ports_ids[txp];
2404 fs->tx_queue = txq + j % nb_tx_queue;
2405 fs->peer_addr = fs->tx_port;
2406 fs->retry_enabled = retry_enabled;
2408 fwd_lcores[lc_id]->stream_nb +=
2409 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2411 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2414 if (tc < rxp_dcb_info.nb_tcs)
2416 /* Restart from TC 0 on next RX port */
2418 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2420 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2423 if (rxp >= nb_fwd_ports)
2425 /* get the dcb information on next RX and TX ports */
2426 if ((rxp & 0x1) == 0)
2427 txp = (portid_t) (rxp + 1);
2429 txp = (portid_t) (rxp - 1);
2430 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2431 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2436 icmp_echo_config_setup(void)
2443 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2444 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2445 (nb_txq * nb_fwd_ports);
2447 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2448 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2449 cur_fwd_config.nb_fwd_streams =
2450 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2451 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2452 cur_fwd_config.nb_fwd_lcores =
2453 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2454 if (verbose_level > 0) {
2455 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2457 cur_fwd_config.nb_fwd_lcores,
2458 cur_fwd_config.nb_fwd_ports,
2459 cur_fwd_config.nb_fwd_streams);
2462 /* reinitialize forwarding streams */
2464 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2466 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2467 if (verbose_level > 0)
2468 printf(" core=%d: \n", lc_id);
2469 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2470 struct fwd_stream *fs;
2471 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2472 fs->rx_port = fwd_ports_ids[rxp];
2474 fs->tx_port = fs->rx_port;
2476 fs->peer_addr = fs->tx_port;
2477 fs->retry_enabled = retry_enabled;
2478 if (verbose_level > 0)
2479 printf(" stream=%d port=%d rxq=%d txq=%d\n",
2480 sm_id, fs->rx_port, fs->rx_queue,
2482 rxq = (queueid_t) (rxq + 1);
2483 if (rxq == nb_rxq) {
2485 rxp = (portid_t) (rxp + 1);
2491 #if defined RTE_LIBRTE_PMD_SOFTNIC
2493 softnic_fwd_config_setup(void)
2495 struct rte_port *port;
2496 portid_t pid, softnic_portid;
2498 uint8_t softnic_enable = 0;
2500 RTE_ETH_FOREACH_DEV(pid) {
2502 const char *driver = port->dev_info.driver_name;
2504 if (strcmp(driver, "net_softnic") == 0) {
2505 softnic_portid = pid;
2511 if (softnic_enable == 0) {
2512 printf("Softnic mode not configured(%s)!\n", __func__);
2516 cur_fwd_config.nb_fwd_ports = 1;
2517 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
2519 /* Re-initialize forwarding streams */
2523 * In the softnic forwarding test, the number of forwarding cores
2524 * is set to one and remaining are used for softnic packet processing.
2526 cur_fwd_config.nb_fwd_lcores = 1;
2527 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2529 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
2530 fwd_streams[i]->rx_port = softnic_portid;
2531 fwd_streams[i]->rx_queue = i;
2532 fwd_streams[i]->tx_port = softnic_portid;
2533 fwd_streams[i]->tx_queue = i;
2534 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2535 fwd_streams[i]->retry_enabled = retry_enabled;
2541 fwd_config_setup(void)
2543 cur_fwd_config.fwd_eng = cur_fwd_eng;
2544 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2545 icmp_echo_config_setup();
2549 #if defined RTE_LIBRTE_PMD_SOFTNIC
2550 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
2551 softnic_fwd_config_setup();
2556 if ((nb_rxq > 1) && (nb_txq > 1)){
2558 dcb_fwd_config_setup();
2560 rss_fwd_config_setup();
2563 simple_fwd_config_setup();
2567 mp_alloc_to_str(uint8_t mode)
2570 case MP_ALLOC_NATIVE:
2576 case MP_ALLOC_XMEM_HUGE:
2586 pkt_fwd_config_display(struct fwd_config *cfg)
2588 struct fwd_stream *fs;
2592 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2593 "NUMA support %s, MP allocation mode: %s\n",
2594 cfg->fwd_eng->fwd_mode_name,
2595 retry_enabled == 0 ? "" : " with retry",
2596 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2597 numa_support == 1 ? "enabled" : "disabled",
2598 mp_alloc_to_str(mp_alloc_type));
2601 printf("TX retry num: %u, delay between TX retries: %uus\n",
2602 burst_tx_retry_num, burst_tx_delay_time);
2603 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2604 printf("Logical Core %u (socket %u) forwards packets on "
2606 fwd_lcores_cpuids[lc_id],
2607 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2608 fwd_lcores[lc_id]->stream_nb);
2609 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2610 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2611 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
2612 "P=%d/Q=%d (socket %u) ",
2613 fs->rx_port, fs->rx_queue,
2614 ports[fs->rx_port].socket_id,
2615 fs->tx_port, fs->tx_queue,
2616 ports[fs->tx_port].socket_id);
2617 print_ethaddr("peer=",
2618 &peer_eth_addrs[fs->peer_addr]);
2626 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2628 struct rte_ether_addr new_peer_addr;
2629 if (!rte_eth_dev_is_valid_port(port_id)) {
2630 printf("Error: Invalid port number %i\n", port_id);
2633 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
2634 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2637 peer_eth_addrs[port_id] = new_peer_addr;
2641 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2644 unsigned int lcore_cpuid;
2649 for (i = 0; i < nb_lc; i++) {
2650 lcore_cpuid = lcorelist[i];
2651 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2652 printf("lcore %u not enabled\n", lcore_cpuid);
2655 if (lcore_cpuid == rte_get_master_lcore()) {
2656 printf("lcore %u cannot be masked on for running "
2657 "packet forwarding, which is the master lcore "
2658 "and reserved for command line parsing only\n",
2663 fwd_lcores_cpuids[i] = lcore_cpuid;
2665 if (record_now == 0) {
2669 nb_cfg_lcores = (lcoreid_t) nb_lc;
2670 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2671 printf("previous number of forwarding cores %u - changed to "
2672 "number of configured cores %u\n",
2673 (unsigned int) nb_fwd_lcores, nb_lc);
2674 nb_fwd_lcores = (lcoreid_t) nb_lc;
2681 set_fwd_lcores_mask(uint64_t lcoremask)
2683 unsigned int lcorelist[64];
2687 if (lcoremask == 0) {
2688 printf("Invalid NULL mask of cores\n");
2692 for (i = 0; i < 64; i++) {
2693 if (! ((uint64_t)(1ULL << i) & lcoremask))
2695 lcorelist[nb_lc++] = i;
2697 return set_fwd_lcores_list(lcorelist, nb_lc);
2701 set_fwd_lcores_number(uint16_t nb_lc)
2703 if (nb_lc > nb_cfg_lcores) {
2704 printf("nb fwd cores %u > %u (max. number of configured "
2705 "lcores) - ignored\n",
2706 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2709 nb_fwd_lcores = (lcoreid_t) nb_lc;
2710 printf("Number of forwarding cores set to %u\n",
2711 (unsigned int) nb_fwd_lcores);
2715 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2723 for (i = 0; i < nb_pt; i++) {
2724 port_id = (portid_t) portlist[i];
2725 if (port_id_is_invalid(port_id, ENABLED_WARN))
2728 fwd_ports_ids[i] = port_id;
2730 if (record_now == 0) {
2734 nb_cfg_ports = (portid_t) nb_pt;
2735 if (nb_fwd_ports != (portid_t) nb_pt) {
2736 printf("previous number of forwarding ports %u - changed to "
2737 "number of configured ports %u\n",
2738 (unsigned int) nb_fwd_ports, nb_pt);
2739 nb_fwd_ports = (portid_t) nb_pt;
2744 * Parse the user input and obtain the list of forwarding ports
2747 * String containing the user input. User can specify
2748 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
2749 * For example, if the user wants to use all the available
2750 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3.
2751 * If the user wants to use only the ports 1,2 then the input
2753 * valid characters are '-' and ','
2754 * @param[out] values
2755 * This array will be filled with a list of port IDs
2756 * based on the user input
2757 * Note that duplicate entries are discarded and only the first
2758 * count entries in this array are port IDs and all the rest
2759 * will contain default values
2760 * @param[in] maxsize
2761 * This parameter denotes 2 things
2762 * 1) Number of elements in the values array
2763 * 2) Maximum value of each element in the values array
2765 * On success, returns total count of parsed port IDs
2766 * On failure, returns 0
2769 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
2771 unsigned int count = 0;
2775 unsigned int marked[maxsize];
2777 if (list == NULL || values == NULL)
2780 for (i = 0; i < (int)maxsize; i++)
2786 /*Remove the blank spaces if any*/
2787 while (isblank(*list))
2792 value = strtol(list, &end, 10);
2793 if (errno || end == NULL)
2795 if (value < 0 || value >= (int)maxsize)
2797 while (isblank(*end))
2799 if (*end == '-' && min == INT_MAX) {
2801 } else if ((*end == ',') || (*end == '\0')) {
2805 for (i = min; i <= max; i++) {
2806 if (count < maxsize) {
2818 } while (*end != '\0');
2824 parse_fwd_portlist(const char *portlist)
2826 unsigned int portcount;
2827 unsigned int portindex[RTE_MAX_ETHPORTS];
2828 unsigned int i, valid_port_count = 0;
2830 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
2832 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
2835 * Here we verify the validity of the ports
2836 * and thereby calculate the total number of
2839 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
2840 if (rte_eth_dev_is_valid_port(portindex[i])) {
2841 portindex[valid_port_count] = portindex[i];
2846 set_fwd_ports_list(portindex, valid_port_count);
2850 set_fwd_ports_mask(uint64_t portmask)
2852 unsigned int portlist[64];
2856 if (portmask == 0) {
2857 printf("Invalid NULL mask of ports\n");
2861 RTE_ETH_FOREACH_DEV(i) {
2862 if (! ((uint64_t)(1ULL << i) & portmask))
2864 portlist[nb_pt++] = i;
2866 set_fwd_ports_list(portlist, nb_pt);
2870 set_fwd_ports_number(uint16_t nb_pt)
2872 if (nb_pt > nb_cfg_ports) {
2873 printf("nb fwd ports %u > %u (number of configured "
2874 "ports) - ignored\n",
2875 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2878 nb_fwd_ports = (portid_t) nb_pt;
2879 printf("Number of forwarding ports set to %u\n",
2880 (unsigned int) nb_fwd_ports);
2884 port_is_forwarding(portid_t port_id)
2888 if (port_id_is_invalid(port_id, ENABLED_WARN))
2891 for (i = 0; i < nb_fwd_ports; i++) {
2892 if (fwd_ports_ids[i] == port_id)
2900 set_nb_pkt_per_burst(uint16_t nb)
2902 if (nb > MAX_PKT_BURST) {
2903 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2905 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2908 nb_pkt_per_burst = nb;
2909 printf("Number of packets per burst set to %u\n",
2910 (unsigned int) nb_pkt_per_burst);
2914 tx_split_get_name(enum tx_pkt_split split)
2918 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2919 if (tx_split_name[i].split == split)
2920 return tx_split_name[i].name;
2926 set_tx_pkt_split(const char *name)
2930 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2931 if (strcmp(tx_split_name[i].name, name) == 0) {
2932 tx_pkt_split = tx_split_name[i].split;
2936 printf("unknown value: \"%s\"\n", name);
2940 show_tx_pkt_segments(void)
2946 split = tx_split_get_name(tx_pkt_split);
2948 printf("Number of segments: %u\n", n);
2949 printf("Segment sizes: ");
2950 for (i = 0; i != n - 1; i++)
2951 printf("%hu,", tx_pkt_seg_lengths[i]);
2952 printf("%hu\n", tx_pkt_seg_lengths[i]);
2953 printf("Split packet: %s\n", split);
2957 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2959 uint16_t tx_pkt_len;
2962 if (nb_segs >= (unsigned) nb_txd) {
2963 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2964 nb_segs, (unsigned int) nb_txd);
2969 * Check that each segment length is greater or equal than
2970 * the mbuf data sise.
2971 * Check also that the total packet length is greater or equal than the
2972 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
2976 for (i = 0; i < nb_segs; i++) {
2977 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2978 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2979 i, seg_lengths[i], (unsigned) mbuf_data_size);
2982 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2984 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
2985 printf("total packet length=%u < %d - give up\n",
2986 (unsigned) tx_pkt_len,
2987 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
2991 for (i = 0; i < nb_segs; i++)
2992 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2994 tx_pkt_length = tx_pkt_len;
2995 tx_pkt_nb_segs = (uint8_t) nb_segs;
2999 setup_gro(const char *onoff, portid_t port_id)
3001 if (!rte_eth_dev_is_valid_port(port_id)) {
3002 printf("invalid port id %u\n", port_id);
3005 if (test_done == 0) {
3006 printf("Before enable/disable GRO,"
3007 " please stop forwarding first\n");
3010 if (strcmp(onoff, "on") == 0) {
3011 if (gro_ports[port_id].enable != 0) {
3012 printf("Port %u has enabled GRO. Please"
3013 " disable GRO first\n", port_id);
3016 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3017 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
3018 gro_ports[port_id].param.max_flow_num =
3019 GRO_DEFAULT_FLOW_NUM;
3020 gro_ports[port_id].param.max_item_per_flow =
3021 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
3023 gro_ports[port_id].enable = 1;
3025 if (gro_ports[port_id].enable == 0) {
3026 printf("Port %u has disabled GRO\n", port_id);
3029 gro_ports[port_id].enable = 0;
3034 setup_gro_flush_cycles(uint8_t cycles)
3036 if (test_done == 0) {
3037 printf("Before change flush interval for GRO,"
3038 " please stop forwarding first.\n");
3042 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
3043 GRO_DEFAULT_FLUSH_CYCLES) {
3044 printf("The flushing cycle be in the range"
3045 " of 1 to %u. Revert to the default"
3047 GRO_MAX_FLUSH_CYCLES,
3048 GRO_DEFAULT_FLUSH_CYCLES);
3049 cycles = GRO_DEFAULT_FLUSH_CYCLES;
3052 gro_flush_cycles = cycles;
3056 show_gro(portid_t port_id)
3058 struct rte_gro_param *param;
3059 uint32_t max_pkts_num;
3061 param = &gro_ports[port_id].param;
3063 if (!rte_eth_dev_is_valid_port(port_id)) {
3064 printf("Invalid port id %u.\n", port_id);
3067 if (gro_ports[port_id].enable) {
3068 printf("GRO type: TCP/IPv4\n");
3069 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3070 max_pkts_num = param->max_flow_num *
3071 param->max_item_per_flow;
3073 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
3074 printf("Max number of packets to perform GRO: %u\n",
3076 printf("Flushing cycles: %u\n", gro_flush_cycles);
3078 printf("Port %u doesn't enable GRO.\n", port_id);
3082 setup_gso(const char *mode, portid_t port_id)
3084 if (!rte_eth_dev_is_valid_port(port_id)) {
3085 printf("invalid port id %u\n", port_id);
3088 if (strcmp(mode, "on") == 0) {
3089 if (test_done == 0) {
3090 printf("before enabling GSO,"
3091 " please stop forwarding first\n");
3094 gso_ports[port_id].enable = 1;
3095 } else if (strcmp(mode, "off") == 0) {
3096 if (test_done == 0) {
3097 printf("before disabling GSO,"
3098 " please stop forwarding first\n");
3101 gso_ports[port_id].enable = 0;
3106 list_pkt_forwarding_modes(void)
3108 static char fwd_modes[128] = "";
3109 const char *separator = "|";
3110 struct fwd_engine *fwd_eng;
3113 if (strlen (fwd_modes) == 0) {
3114 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3115 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3116 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3117 strncat(fwd_modes, separator,
3118 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3120 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3127 list_pkt_forwarding_retry_modes(void)
3129 static char fwd_modes[128] = "";
3130 const char *separator = "|";
3131 struct fwd_engine *fwd_eng;
3134 if (strlen(fwd_modes) == 0) {
3135 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3136 if (fwd_eng == &rx_only_engine)
3138 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3140 strlen(fwd_modes) - 1);
3141 strncat(fwd_modes, separator,
3143 strlen(fwd_modes) - 1);
3145 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3152 set_pkt_forwarding_mode(const char *fwd_mode_name)
3154 struct fwd_engine *fwd_eng;
3158 while ((fwd_eng = fwd_engines[i]) != NULL) {
3159 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
3160 printf("Set %s packet forwarding mode%s\n",
3162 retry_enabled == 0 ? "" : " with retry");
3163 cur_fwd_eng = fwd_eng;
3168 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
3172 add_rx_dump_callbacks(portid_t portid)
3174 struct rte_eth_dev_info dev_info;
3178 if (port_id_is_invalid(portid, ENABLED_WARN))
3181 ret = eth_dev_info_get_print_err(portid, &dev_info);
3185 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3186 if (!ports[portid].rx_dump_cb[queue])
3187 ports[portid].rx_dump_cb[queue] =
3188 rte_eth_add_rx_callback(portid, queue,
3189 dump_rx_pkts, NULL);
3193 add_tx_dump_callbacks(portid_t portid)
3195 struct rte_eth_dev_info dev_info;
3199 if (port_id_is_invalid(portid, ENABLED_WARN))
3202 ret = eth_dev_info_get_print_err(portid, &dev_info);
3206 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3207 if (!ports[portid].tx_dump_cb[queue])
3208 ports[portid].tx_dump_cb[queue] =
3209 rte_eth_add_tx_callback(portid, queue,
3210 dump_tx_pkts, NULL);
3214 remove_rx_dump_callbacks(portid_t portid)
3216 struct rte_eth_dev_info dev_info;
3220 if (port_id_is_invalid(portid, ENABLED_WARN))
3223 ret = eth_dev_info_get_print_err(portid, &dev_info);
3227 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3228 if (ports[portid].rx_dump_cb[queue]) {
3229 rte_eth_remove_rx_callback(portid, queue,
3230 ports[portid].rx_dump_cb[queue]);
3231 ports[portid].rx_dump_cb[queue] = NULL;
3236 remove_tx_dump_callbacks(portid_t portid)
3238 struct rte_eth_dev_info dev_info;
3242 if (port_id_is_invalid(portid, ENABLED_WARN))
3245 ret = eth_dev_info_get_print_err(portid, &dev_info);
3249 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3250 if (ports[portid].tx_dump_cb[queue]) {
3251 rte_eth_remove_tx_callback(portid, queue,
3252 ports[portid].tx_dump_cb[queue]);
3253 ports[portid].tx_dump_cb[queue] = NULL;
3258 configure_rxtx_dump_callbacks(uint16_t verbose)
3262 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3263 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
3267 RTE_ETH_FOREACH_DEV(portid)
3269 if (verbose == 1 || verbose > 2)
3270 add_rx_dump_callbacks(portid);
3272 remove_rx_dump_callbacks(portid);
3274 add_tx_dump_callbacks(portid);
3276 remove_tx_dump_callbacks(portid);
3281 set_verbose_level(uint16_t vb_level)
3283 printf("Change verbose level from %u to %u\n",
3284 (unsigned int) verbose_level, (unsigned int) vb_level);
3285 verbose_level = vb_level;
3286 configure_rxtx_dump_callbacks(verbose_level);
3290 vlan_extend_set(portid_t port_id, int on)
3294 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3296 if (port_id_is_invalid(port_id, ENABLED_WARN))
3299 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3302 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
3303 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3305 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
3306 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3309 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3311 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
3312 "diag=%d\n", port_id, on, diag);
3313 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3317 rx_vlan_strip_set(portid_t port_id, int on)
3321 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3323 if (port_id_is_invalid(port_id, ENABLED_WARN))
3326 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3329 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
3330 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3332 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
3333 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3336 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3338 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
3339 "diag=%d\n", port_id, on, diag);
3340 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3344 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
3348 if (port_id_is_invalid(port_id, ENABLED_WARN))
3351 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
3353 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
3354 "diag=%d\n", port_id, queue_id, on, diag);
3358 rx_vlan_filter_set(portid_t port_id, int on)
3362 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3364 if (port_id_is_invalid(port_id, ENABLED_WARN))
3367 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3370 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
3371 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3373 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
3374 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3377 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3379 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
3380 "diag=%d\n", port_id, on, diag);
3381 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3385 rx_vlan_qinq_strip_set(portid_t port_id, int on)
3389 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3391 if (port_id_is_invalid(port_id, ENABLED_WARN))
3394 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3397 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
3398 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3400 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
3401 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3404 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3406 printf("%s(port_pi=%d, on=%d) failed "
3407 "diag=%d\n", __func__, port_id, on, diag);
3408 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3412 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
3416 if (port_id_is_invalid(port_id, ENABLED_WARN))
3418 if (vlan_id_is_invalid(vlan_id))
3420 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
3423 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
3425 port_id, vlan_id, on, diag);
3430 rx_vlan_all_filter_set(portid_t port_id, int on)
3434 if (port_id_is_invalid(port_id, ENABLED_WARN))
3436 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
3437 if (rx_vft_set(port_id, vlan_id, on))
3443 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
3447 if (port_id_is_invalid(port_id, ENABLED_WARN))
3450 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
3454 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
3456 port_id, vlan_type, tp_id, diag);
3460 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
3462 struct rte_eth_dev_info dev_info;
3465 if (port_id_is_invalid(port_id, ENABLED_WARN))
3467 if (vlan_id_is_invalid(vlan_id))
3470 if (ports[port_id].dev_conf.txmode.offloads &
3471 DEV_TX_OFFLOAD_QINQ_INSERT) {
3472 printf("Error, as QinQ has been enabled.\n");
3476 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3480 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
3481 printf("Error: vlan insert is not supported by port %d\n",
3486 tx_vlan_reset(port_id);
3487 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
3488 ports[port_id].tx_vlan_id = vlan_id;
3492 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
3494 struct rte_eth_dev_info dev_info;
3497 if (port_id_is_invalid(port_id, ENABLED_WARN))
3499 if (vlan_id_is_invalid(vlan_id))
3501 if (vlan_id_is_invalid(vlan_id_outer))
3504 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3508 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
3509 printf("Error: qinq insert not supported by port %d\n",
3514 tx_vlan_reset(port_id);
3515 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
3516 DEV_TX_OFFLOAD_QINQ_INSERT);
3517 ports[port_id].tx_vlan_id = vlan_id;
3518 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
3522 tx_vlan_reset(portid_t port_id)
3524 if (port_id_is_invalid(port_id, ENABLED_WARN))
3526 ports[port_id].dev_conf.txmode.offloads &=
3527 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
3528 DEV_TX_OFFLOAD_QINQ_INSERT);
3529 ports[port_id].tx_vlan_id = 0;
3530 ports[port_id].tx_vlan_id_outer = 0;
3534 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
3536 if (port_id_is_invalid(port_id, ENABLED_WARN))
3539 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
3543 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
3546 uint8_t existing_mapping_found = 0;
3548 if (port_id_is_invalid(port_id, ENABLED_WARN))
3551 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
3554 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
3555 printf("map_value not in required range 0..%d\n",
3556 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
3560 if (!is_rx) { /*then tx*/
3561 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3562 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3563 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
3564 tx_queue_stats_mappings[i].stats_counter_id = map_value;
3565 existing_mapping_found = 1;
3569 if (!existing_mapping_found) { /* A new additional mapping... */
3570 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
3571 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
3572 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
3573 nb_tx_queue_stats_mappings++;
3577 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3578 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3579 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
3580 rx_queue_stats_mappings[i].stats_counter_id = map_value;
3581 existing_mapping_found = 1;
3585 if (!existing_mapping_found) { /* A new additional mapping... */
3586 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
3587 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
3588 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
3589 nb_rx_queue_stats_mappings++;
3595 set_xstats_hide_zero(uint8_t on_off)
3597 xstats_hide_zero = on_off;
3601 print_fdir_mask(struct rte_eth_fdir_masks *mask)
3603 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
3605 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3606 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
3607 " tunnel_id: 0x%08x",
3608 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
3609 rte_be_to_cpu_32(mask->tunnel_id_mask));
3610 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3611 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
3612 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
3613 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
3615 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
3616 rte_be_to_cpu_16(mask->src_port_mask),
3617 rte_be_to_cpu_16(mask->dst_port_mask));
3619 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3620 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
3621 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
3622 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
3623 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
3625 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3626 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
3627 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
3628 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
3629 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
3636 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3638 struct rte_eth_flex_payload_cfg *cfg;
3641 for (i = 0; i < flex_conf->nb_payloads; i++) {
3642 cfg = &flex_conf->flex_set[i];
3643 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3645 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3646 printf("\n L2_PAYLOAD: ");
3647 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3648 printf("\n L3_PAYLOAD: ");
3649 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3650 printf("\n L4_PAYLOAD: ");
3652 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
3653 for (j = 0; j < num; j++)
3654 printf(" %-5u", cfg->src_offset[j]);
3660 flowtype_to_str(uint16_t flow_type)
3662 struct flow_type_info {
3668 static struct flow_type_info flowtype_str_table[] = {
3669 {"raw", RTE_ETH_FLOW_RAW},
3670 {"ipv4", RTE_ETH_FLOW_IPV4},
3671 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3672 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3673 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3674 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3675 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3676 {"ipv6", RTE_ETH_FLOW_IPV6},
3677 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3678 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3679 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3680 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3681 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3682 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3683 {"port", RTE_ETH_FLOW_PORT},
3684 {"vxlan", RTE_ETH_FLOW_VXLAN},
3685 {"geneve", RTE_ETH_FLOW_GENEVE},
3686 {"nvgre", RTE_ETH_FLOW_NVGRE},
3687 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
3690 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3691 if (flowtype_str_table[i].ftype == flow_type)
3692 return flowtype_str_table[i].str;
3699 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3701 struct rte_eth_fdir_flex_mask *mask;
3705 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3706 mask = &flex_conf->flex_mask[i];
3707 p = flowtype_to_str(mask->flow_type);
3708 printf("\n %s:\t", p ? p : "unknown");
3709 for (j = 0; j < num; j++)
3710 printf(" %02x", mask->mask[j]);
3716 print_fdir_flow_type(uint32_t flow_types_mask)
3721 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3722 if (!(flow_types_mask & (1 << i)))
3724 p = flowtype_to_str(i);
3734 fdir_get_infos(portid_t port_id)
3736 struct rte_eth_fdir_stats fdir_stat;
3737 struct rte_eth_fdir_info fdir_info;
3740 static const char *fdir_stats_border = "########################";
3742 if (port_id_is_invalid(port_id, ENABLED_WARN))
3744 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3746 printf("\n FDIR is not supported on port %-2d\n",
3751 memset(&fdir_info, 0, sizeof(fdir_info));
3752 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3753 RTE_ETH_FILTER_INFO, &fdir_info);
3754 memset(&fdir_stat, 0, sizeof(fdir_stat));
3755 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3756 RTE_ETH_FILTER_STATS, &fdir_stat);
3757 printf("\n %s FDIR infos for port %-2d %s\n",
3758 fdir_stats_border, port_id, fdir_stats_border);
3760 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3761 printf(" PERFECT\n");
3762 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3763 printf(" PERFECT-MAC-VLAN\n");
3764 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3765 printf(" PERFECT-TUNNEL\n");
3766 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3767 printf(" SIGNATURE\n");
3769 printf(" DISABLE\n");
3770 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3771 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3772 printf(" SUPPORTED FLOW TYPE: ");
3773 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3775 printf(" FLEX PAYLOAD INFO:\n");
3776 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
3777 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
3778 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
3779 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3780 fdir_info.flex_payload_unit,
3781 fdir_info.max_flex_payload_segment_num,
3782 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3784 print_fdir_mask(&fdir_info.mask);
3785 if (fdir_info.flex_conf.nb_payloads > 0) {
3786 printf(" FLEX PAYLOAD SRC OFFSET:");
3787 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3789 if (fdir_info.flex_conf.nb_flexmasks > 0) {
3790 printf(" FLEX MASK CFG:");
3791 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3793 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
3794 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3795 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
3796 fdir_info.guarant_spc, fdir_info.best_spc);
3797 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
3798 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
3799 " add: %-10"PRIu64" remove: %"PRIu64"\n"
3800 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
3801 fdir_stat.collision, fdir_stat.free,
3802 fdir_stat.maxhash, fdir_stat.maxlen,
3803 fdir_stat.add, fdir_stat.remove,
3804 fdir_stat.f_add, fdir_stat.f_remove);
3805 printf(" %s############################%s\n",
3806 fdir_stats_border, fdir_stats_border);
3810 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3812 struct rte_port *port;
3813 struct rte_eth_fdir_flex_conf *flex_conf;
3816 port = &ports[port_id];
3817 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3818 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3819 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3824 if (i >= RTE_ETH_FLOW_MAX) {
3825 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3826 idx = flex_conf->nb_flexmasks;
3827 flex_conf->nb_flexmasks++;
3829 printf("The flex mask table is full. Can not set flex"
3830 " mask for flow_type(%u).", cfg->flow_type);
3834 rte_memcpy(&flex_conf->flex_mask[idx],
3836 sizeof(struct rte_eth_fdir_flex_mask));
3840 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3842 struct rte_port *port;
3843 struct rte_eth_fdir_flex_conf *flex_conf;
3846 port = &ports[port_id];
3847 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3848 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3849 if (cfg->type == flex_conf->flex_set[i].type) {
3854 if (i >= RTE_ETH_PAYLOAD_MAX) {
3855 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3856 idx = flex_conf->nb_payloads;
3857 flex_conf->nb_payloads++;
3859 printf("The flex payload table is full. Can not set"
3860 " flex payload for type(%u).", cfg->type);
3864 rte_memcpy(&flex_conf->flex_set[idx],
3866 sizeof(struct rte_eth_flex_payload_cfg));
3871 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3873 #ifdef RTE_LIBRTE_IXGBE_PMD
3877 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3879 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3883 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3884 is_rx ? "rx" : "tx", port_id, diag);
3887 printf("VF %s setting not supported for port %d\n",
3888 is_rx ? "Rx" : "Tx", port_id);
3894 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3897 struct rte_eth_link link;
3900 if (port_id_is_invalid(port_id, ENABLED_WARN))
3902 ret = eth_link_get_nowait_print_err(port_id, &link);
3905 if (rate > link.link_speed) {
3906 printf("Invalid rate value:%u bigger than link speed: %u\n",
3907 rate, link.link_speed);
3910 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3913 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3919 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3921 int diag = -ENOTSUP;
3925 RTE_SET_USED(q_msk);
3927 #ifdef RTE_LIBRTE_IXGBE_PMD
3928 if (diag == -ENOTSUP)
3929 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3932 #ifdef RTE_LIBRTE_BNXT_PMD
3933 if (diag == -ENOTSUP)
3934 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3939 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3945 * Functions to manage the set of filtered Multicast MAC addresses.
3947 * A pool of filtered multicast MAC addresses is associated with each port.
3948 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3949 * The address of the pool and the number of valid multicast MAC addresses
3950 * recorded in the pool are stored in the fields "mc_addr_pool" and
3951 * "mc_addr_nb" of the "rte_port" data structure.
3953 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3954 * to be supplied a contiguous array of multicast MAC addresses.
3955 * To comply with this constraint, the set of multicast addresses recorded
3956 * into the pool are systematically compacted at the beginning of the pool.
3957 * Hence, when a multicast address is removed from the pool, all following
3958 * addresses, if any, are copied back to keep the set contiguous.
3960 #define MCAST_POOL_INC 32
3963 mcast_addr_pool_extend(struct rte_port *port)
3965 struct rte_ether_addr *mc_pool;
3966 size_t mc_pool_size;
3969 * If a free entry is available at the end of the pool, just
3970 * increment the number of recorded multicast addresses.
3972 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3978 * [re]allocate a pool with MCAST_POOL_INC more entries.
3979 * The previous test guarantees that port->mc_addr_nb is a multiple
3980 * of MCAST_POOL_INC.
3982 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
3984 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
3986 if (mc_pool == NULL) {
3987 printf("allocation of pool of %u multicast addresses failed\n",
3988 port->mc_addr_nb + MCAST_POOL_INC);
3992 port->mc_addr_pool = mc_pool;
3999 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
4001 if (mcast_addr_pool_extend(port) != 0)
4003 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
4007 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
4010 if (addr_idx == port->mc_addr_nb) {
4011 /* No need to recompact the set of multicast addressses. */
4012 if (port->mc_addr_nb == 0) {
4013 /* free the pool of multicast addresses. */
4014 free(port->mc_addr_pool);
4015 port->mc_addr_pool = NULL;
4019 memmove(&port->mc_addr_pool[addr_idx],
4020 &port->mc_addr_pool[addr_idx + 1],
4021 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
4025 eth_port_multicast_addr_list_set(portid_t port_id)
4027 struct rte_port *port;
4030 port = &ports[port_id];
4031 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
4034 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
4035 port_id, port->mc_addr_nb, diag);
4041 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
4043 struct rte_port *port;
4046 if (port_id_is_invalid(port_id, ENABLED_WARN))
4049 port = &ports[port_id];
4052 * Check that the added multicast MAC address is not already recorded
4053 * in the pool of multicast addresses.
4055 for (i = 0; i < port->mc_addr_nb; i++) {
4056 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
4057 printf("multicast address already filtered by port\n");
4062 mcast_addr_pool_append(port, mc_addr);
4063 if (eth_port_multicast_addr_list_set(port_id) < 0)
4064 /* Rollback on failure, remove the address from the pool */
4065 mcast_addr_pool_remove(port, i);
4069 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
4071 struct rte_port *port;
4074 if (port_id_is_invalid(port_id, ENABLED_WARN))
4077 port = &ports[port_id];
4080 * Search the pool of multicast MAC addresses for the removed address.
4082 for (i = 0; i < port->mc_addr_nb; i++) {
4083 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
4086 if (i == port->mc_addr_nb) {
4087 printf("multicast address not filtered by port %d\n", port_id);
4091 mcast_addr_pool_remove(port, i);
4092 if (eth_port_multicast_addr_list_set(port_id) < 0)
4093 /* Rollback on failure, add the address back into the pool */
4094 mcast_addr_pool_append(port, mc_addr);
4098 port_dcb_info_display(portid_t port_id)
4100 struct rte_eth_dcb_info dcb_info;
4103 static const char *border = "================";
4105 if (port_id_is_invalid(port_id, ENABLED_WARN))
4108 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
4110 printf("\n Failed to get dcb infos on port %-2d\n",
4114 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
4115 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
4117 for (i = 0; i < dcb_info.nb_tcs; i++)
4119 printf("\n Priority : ");
4120 for (i = 0; i < dcb_info.nb_tcs; i++)
4121 printf("\t%4d", dcb_info.prio_tc[i]);
4122 printf("\n BW percent :");
4123 for (i = 0; i < dcb_info.nb_tcs; i++)
4124 printf("\t%4d%%", dcb_info.tc_bws[i]);
4125 printf("\n RXQ base : ");
4126 for (i = 0; i < dcb_info.nb_tcs; i++)
4127 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
4128 printf("\n RXQ number :");
4129 for (i = 0; i < dcb_info.nb_tcs; i++)
4130 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
4131 printf("\n TXQ base : ");
4132 for (i = 0; i < dcb_info.nb_tcs; i++)
4133 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
4134 printf("\n TXQ number :");
4135 for (i = 0; i < dcb_info.nb_tcs; i++)
4136 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
4141 open_file(const char *file_path, uint32_t *size)
4143 int fd = open(file_path, O_RDONLY);
4145 uint8_t *buf = NULL;
4153 printf("%s: Failed to open %s\n", __func__, file_path);
4157 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
4159 printf("%s: File operations failed\n", __func__);
4163 pkg_size = st_buf.st_size;
4166 printf("%s: File operations failed\n", __func__);
4170 buf = (uint8_t *)malloc(pkg_size);
4173 printf("%s: Failed to malloc memory\n", __func__);
4177 ret = read(fd, buf, pkg_size);
4180 printf("%s: File read operation failed\n", __func__);
4194 save_file(const char *file_path, uint8_t *buf, uint32_t size)
4196 FILE *fh = fopen(file_path, "wb");
4199 printf("%s: Failed to open %s\n", __func__, file_path);
4203 if (fwrite(buf, 1, size, fh) != size) {
4205 printf("%s: File write operation failed\n", __func__);
4215 close_file(uint8_t *buf)
4226 port_queue_region_info_display(portid_t port_id, void *buf)
4228 #ifdef RTE_LIBRTE_I40E_PMD
4230 struct rte_pmd_i40e_queue_regions *info =
4231 (struct rte_pmd_i40e_queue_regions *)buf;
4232 static const char *queue_region_info_stats_border = "-------";
4234 if (!info->queue_region_number)
4235 printf("there is no region has been set before");
4237 printf("\n %s All queue region info for port=%2d %s",
4238 queue_region_info_stats_border, port_id,
4239 queue_region_info_stats_border);
4240 printf("\n queue_region_number: %-14u \n",
4241 info->queue_region_number);
4243 for (i = 0; i < info->queue_region_number; i++) {
4244 printf("\n region_id: %-14u queue_number: %-14u "
4245 "queue_start_index: %-14u \n",
4246 info->region[i].region_id,
4247 info->region[i].queue_num,
4248 info->region[i].queue_start_index);
4250 printf(" user_priority_num is %-14u :",
4251 info->region[i].user_priority_num);
4252 for (j = 0; j < info->region[i].user_priority_num; j++)
4253 printf(" %-14u ", info->region[i].user_priority[j]);
4255 printf("\n flowtype_num is %-14u :",
4256 info->region[i].flowtype_num);
4257 for (j = 0; j < info->region[i].flowtype_num; j++)
4258 printf(" %-14u ", info->region[i].hw_flowtype[j]);
4261 RTE_SET_USED(port_id);
4269 show_macs(portid_t port_id)
4271 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4272 struct rte_eth_dev_info dev_info;
4273 struct rte_ether_addr *addr;
4274 uint32_t i, num_macs = 0;
4275 struct rte_eth_dev *dev;
4277 dev = &rte_eth_devices[port_id];
4279 rte_eth_dev_info_get(port_id, &dev_info);
4281 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4282 addr = &dev->data->mac_addrs[i];
4284 /* skip zero address */
4285 if (rte_is_zero_ether_addr(addr))
4291 printf("Number of MAC address added: %d\n", num_macs);
4293 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4294 addr = &dev->data->mac_addrs[i];
4296 /* skip zero address */
4297 if (rte_is_zero_ether_addr(addr))
4300 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4301 printf(" %s\n", buf);
4306 show_mcast_macs(portid_t port_id)
4308 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4309 struct rte_ether_addr *addr;
4310 struct rte_port *port;
4313 port = &ports[port_id];
4315 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
4317 for (i = 0; i < port->mc_addr_nb; i++) {
4318 addr = &port->mc_addr_pool[i];
4320 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4321 printf(" %s\n", buf);