1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
41 #include <rte_errno.h>
43 #include <rte_pmd_ixgbe.h>
46 #include <rte_pmd_i40e.h>
49 #include <rte_pmd_bnxt.h>
52 #include <rte_hexdump.h>
56 #define ETHDEV_FWVERS_LEN 32
58 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
59 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
64 #define NS_PER_SEC 1E9
66 static char *flowtype_to_str(uint16_t flow_type);
69 enum tx_pkt_split split;
73 .split = TX_PKT_SPLIT_OFF,
77 .split = TX_PKT_SPLIT_ON,
81 .split = TX_PKT_SPLIT_RND,
86 const struct rss_type_info rss_type_table[] = {
87 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
88 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
89 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
90 ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
92 { "eth", ETH_RSS_ETH },
93 { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
94 { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
95 { "vlan", ETH_RSS_VLAN },
96 { "s-vlan", ETH_RSS_S_VLAN },
97 { "c-vlan", ETH_RSS_C_VLAN },
98 { "ipv4", ETH_RSS_IPV4 },
99 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
100 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
101 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
102 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
103 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
104 { "ipv6", ETH_RSS_IPV6 },
105 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
106 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
107 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
108 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
109 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
110 { "l2-payload", ETH_RSS_L2_PAYLOAD },
111 { "ipv6-ex", ETH_RSS_IPV6_EX },
112 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
113 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
114 { "port", ETH_RSS_PORT },
115 { "vxlan", ETH_RSS_VXLAN },
116 { "geneve", ETH_RSS_GENEVE },
117 { "nvgre", ETH_RSS_NVGRE },
118 { "ip", ETH_RSS_IP },
119 { "udp", ETH_RSS_UDP },
120 { "tcp", ETH_RSS_TCP },
121 { "sctp", ETH_RSS_SCTP },
122 { "tunnel", ETH_RSS_TUNNEL },
123 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
124 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
125 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
126 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
127 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
128 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
129 { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
130 { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
131 { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
132 { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
133 { "esp", ETH_RSS_ESP },
134 { "ah", ETH_RSS_AH },
135 { "l2tpv3", ETH_RSS_L2TPV3 },
136 { "pfcp", ETH_RSS_PFCP },
137 { "pppoe", ETH_RSS_PPPOE },
138 { "gtpu", ETH_RSS_GTPU },
139 { "ecpri", ETH_RSS_ECPRI },
140 { "mpls", ETH_RSS_MPLS },
144 static const struct {
145 enum rte_eth_fec_mode mode;
147 } fec_mode_name[] = {
149 .mode = RTE_ETH_FEC_NOFEC,
153 .mode = RTE_ETH_FEC_AUTO,
157 .mode = RTE_ETH_FEC_BASER,
161 .mode = RTE_ETH_FEC_RS,
167 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
169 char buf[RTE_ETHER_ADDR_FMT_SIZE];
170 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
171 printf("%s%s", name, buf);
175 nic_stats_display(portid_t port_id)
177 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
178 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
179 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
180 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
181 static uint64_t prev_ns[RTE_MAX_ETHPORTS];
182 struct timespec cur_time;
183 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
185 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
186 struct rte_eth_stats stats;
188 static const char *nic_stats_border = "########################";
190 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
194 rte_eth_stats_get(port_id, &stats);
195 printf("\n %s NIC statistics for port %-2d %s\n",
196 nic_stats_border, port_id, nic_stats_border);
198 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
199 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes);
200 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
201 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf);
202 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
203 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes);
206 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
209 ns = cur_time.tv_sec * NS_PER_SEC;
210 ns += cur_time.tv_nsec;
212 if (prev_ns[port_id] != 0)
213 diff_ns = ns - prev_ns[port_id];
214 prev_ns[port_id] = ns;
217 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
218 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
219 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
220 (stats.opackets - prev_pkts_tx[port_id]) : 0;
221 prev_pkts_rx[port_id] = stats.ipackets;
222 prev_pkts_tx[port_id] = stats.opackets;
223 mpps_rx = diff_ns > 0 ?
224 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
225 mpps_tx = diff_ns > 0 ?
226 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
228 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
229 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
230 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
231 (stats.obytes - prev_bytes_tx[port_id]) : 0;
232 prev_bytes_rx[port_id] = stats.ibytes;
233 prev_bytes_tx[port_id] = stats.obytes;
234 mbps_rx = diff_ns > 0 ?
235 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
236 mbps_tx = diff_ns > 0 ?
237 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
239 printf("\n Throughput (since last show)\n");
240 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
241 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
242 mpps_tx, mbps_tx * 8);
244 printf(" %s############################%s\n",
245 nic_stats_border, nic_stats_border);
249 nic_stats_clear(portid_t port_id)
253 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
258 ret = rte_eth_stats_reset(port_id);
260 printf("%s: Error: failed to reset stats (port %u): %s",
261 __func__, port_id, strerror(-ret));
265 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
269 printf("%s: Error: failed to get stats (port %u): %s",
270 __func__, port_id, strerror(ret));
273 printf("\n NIC statistics for port %d cleared\n", port_id);
277 nic_xstats_display(portid_t port_id)
279 struct rte_eth_xstat *xstats;
280 int cnt_xstats, idx_xstat;
281 struct rte_eth_xstat_name *xstats_names;
283 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
287 printf("###### NIC extended statistics for port %-2d\n", port_id);
288 if (!rte_eth_dev_is_valid_port(port_id)) {
289 printf("Error: Invalid port number %i\n", port_id);
294 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
295 if (cnt_xstats < 0) {
296 printf("Error: Cannot get count of xstats\n");
300 /* Get id-name lookup table */
301 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
302 if (xstats_names == NULL) {
303 printf("Cannot allocate memory for xstats lookup\n");
306 if (cnt_xstats != rte_eth_xstats_get_names(
307 port_id, xstats_names, cnt_xstats)) {
308 printf("Error: Cannot get xstats lookup\n");
313 /* Get stats themselves */
314 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
315 if (xstats == NULL) {
316 printf("Cannot allocate memory for xstats\n");
320 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
321 printf("Error: Unable to get xstats\n");
328 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
329 if (xstats_hide_zero && !xstats[idx_xstat].value)
331 printf("%s: %"PRIu64"\n",
332 xstats_names[idx_xstat].name,
333 xstats[idx_xstat].value);
340 nic_xstats_clear(portid_t port_id)
344 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
349 ret = rte_eth_xstats_reset(port_id);
351 printf("%s: Error: failed to reset xstats (port %u): %s",
352 __func__, port_id, strerror(-ret));
356 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
360 printf("%s: Error: failed to get stats (port %u): %s",
361 __func__, port_id, strerror(ret));
367 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
369 struct rte_eth_burst_mode mode;
370 struct rte_eth_rxq_info qinfo;
372 static const char *info_border = "*********************";
374 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
376 printf("Failed to retrieve information for port: %u, "
377 "RX queue: %hu\nerror desc: %s(%d)\n",
378 port_id, queue_id, strerror(-rc), rc);
382 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
383 info_border, port_id, queue_id, info_border);
385 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
386 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
387 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
388 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
389 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
390 printf("\nRX drop packets: %s",
391 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
392 printf("\nRX deferred start: %s",
393 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
394 printf("\nRX scattered packets: %s",
395 (qinfo.scattered_rx != 0) ? "on" : "off");
396 if (qinfo.rx_buf_size != 0)
397 printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
398 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
400 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
401 printf("\nBurst mode: %s%s",
403 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
404 " (per queue)" : "");
410 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
412 struct rte_eth_burst_mode mode;
413 struct rte_eth_txq_info qinfo;
415 static const char *info_border = "*********************";
417 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
419 printf("Failed to retrieve information for port: %u, "
420 "TX queue: %hu\nerror desc: %s(%d)\n",
421 port_id, queue_id, strerror(-rc), rc);
425 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
426 info_border, port_id, queue_id, info_border);
428 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
429 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
430 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
431 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
432 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
433 printf("\nTX deferred start: %s",
434 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
435 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
437 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
438 printf("\nBurst mode: %s%s",
440 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
441 " (per queue)" : "");
446 static int bus_match_all(const struct rte_bus *bus, const void *data)
454 device_infos_display_speeds(uint32_t speed_capa)
456 printf("\n\tDevice speed capability:");
457 if (speed_capa == ETH_LINK_SPEED_AUTONEG)
458 printf(" Autonegotiate (all speeds)");
459 if (speed_capa & ETH_LINK_SPEED_FIXED)
460 printf(" Disable autonegotiate (fixed speed) ");
461 if (speed_capa & ETH_LINK_SPEED_10M_HD)
462 printf(" 10 Mbps half-duplex ");
463 if (speed_capa & ETH_LINK_SPEED_10M)
464 printf(" 10 Mbps full-duplex ");
465 if (speed_capa & ETH_LINK_SPEED_100M_HD)
466 printf(" 100 Mbps half-duplex ");
467 if (speed_capa & ETH_LINK_SPEED_100M)
468 printf(" 100 Mbps full-duplex ");
469 if (speed_capa & ETH_LINK_SPEED_1G)
471 if (speed_capa & ETH_LINK_SPEED_2_5G)
472 printf(" 2.5 Gbps ");
473 if (speed_capa & ETH_LINK_SPEED_5G)
475 if (speed_capa & ETH_LINK_SPEED_10G)
477 if (speed_capa & ETH_LINK_SPEED_20G)
479 if (speed_capa & ETH_LINK_SPEED_25G)
481 if (speed_capa & ETH_LINK_SPEED_40G)
483 if (speed_capa & ETH_LINK_SPEED_50G)
485 if (speed_capa & ETH_LINK_SPEED_56G)
487 if (speed_capa & ETH_LINK_SPEED_100G)
488 printf(" 100 Gbps ");
489 if (speed_capa & ETH_LINK_SPEED_200G)
490 printf(" 200 Gbps ");
494 device_infos_display(const char *identifier)
496 static const char *info_border = "*********************";
497 struct rte_bus *start = NULL, *next;
498 struct rte_dev_iterator dev_iter;
499 char name[RTE_ETH_NAME_MAX_LEN];
500 struct rte_ether_addr mac_addr;
501 struct rte_device *dev;
502 struct rte_devargs da;
504 struct rte_eth_dev_info dev_info;
507 memset(&da, 0, sizeof(da));
511 if (rte_devargs_parsef(&da, "%s", identifier)) {
512 printf("cannot parse identifier\n");
517 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
520 if (identifier && da.bus != next)
523 /* Skip buses that don't have iterate method */
524 if (!next->dev_iterate)
527 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
528 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
532 /* Check for matching device if identifier is present */
534 strncmp(da.name, dev->name, strlen(dev->name)))
536 printf("\n%s Infos for device %s %s\n",
537 info_border, dev->name, info_border);
538 printf("Bus name: %s", dev->bus->name);
539 printf("\nDriver name: %s", dev->driver->name);
540 printf("\nDevargs: %s",
541 dev->devargs ? dev->devargs->args : "");
542 printf("\nConnect to socket: %d", dev->numa_node);
545 /* List ports with matching device name */
546 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
547 printf("\n\tPort id: %-2d", port_id);
548 if (eth_macaddr_get_print_err(port_id,
550 print_ethaddr("\n\tMAC address: ",
552 rte_eth_dev_get_name_by_port(port_id, name);
553 printf("\n\tDevice name: %s", name);
554 if (rte_eth_dev_info_get(port_id, &dev_info) == 0)
555 device_infos_display_speeds(dev_info.speed_capa);
560 rte_devargs_reset(&da);
564 port_infos_display(portid_t port_id)
566 struct rte_port *port;
567 struct rte_ether_addr mac_addr;
568 struct rte_eth_link link;
569 struct rte_eth_dev_info dev_info;
571 struct rte_mempool * mp;
572 static const char *info_border = "*********************";
574 char name[RTE_ETH_NAME_MAX_LEN];
576 char fw_version[ETHDEV_FWVERS_LEN];
578 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
582 port = &ports[port_id];
583 ret = eth_link_get_nowait_print_err(port_id, &link);
587 ret = eth_dev_info_get_print_err(port_id, &dev_info);
591 printf("\n%s Infos for port %-2d %s\n",
592 info_border, port_id, info_border);
593 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
594 print_ethaddr("MAC address: ", &mac_addr);
595 rte_eth_dev_get_name_by_port(port_id, name);
596 printf("\nDevice name: %s", name);
597 printf("\nDriver name: %s", dev_info.driver_name);
599 if (rte_eth_dev_fw_version_get(port_id, fw_version,
600 ETHDEV_FWVERS_LEN) == 0)
601 printf("\nFirmware-version: %s", fw_version);
603 printf("\nFirmware-version: %s", "not available");
605 if (dev_info.device->devargs && dev_info.device->devargs->args)
606 printf("\nDevargs: %s", dev_info.device->devargs->args);
607 printf("\nConnect to socket: %u", port->socket_id);
609 if (port_numa[port_id] != NUMA_NO_CONFIG) {
610 mp = mbuf_pool_find(port_numa[port_id], 0);
612 printf("\nmemory allocation on the socket: %d",
615 printf("\nmemory allocation on the socket: %u",port->socket_id);
617 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
618 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
619 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
620 ("full-duplex") : ("half-duplex"));
622 if (!rte_eth_dev_get_mtu(port_id, &mtu))
623 printf("MTU: %u\n", mtu);
625 printf("Promiscuous mode: %s\n",
626 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
627 printf("Allmulticast mode: %s\n",
628 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
629 printf("Maximum number of MAC addresses: %u\n",
630 (unsigned int)(port->dev_info.max_mac_addrs));
631 printf("Maximum number of MAC addresses of hash filtering: %u\n",
632 (unsigned int)(port->dev_info.max_hash_mac_addrs));
634 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
635 if (vlan_offload >= 0){
636 printf("VLAN offload: \n");
637 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
638 printf(" strip on, ");
640 printf(" strip off, ");
642 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
643 printf("filter on, ");
645 printf("filter off, ");
647 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
648 printf("extend on, ");
650 printf("extend off, ");
652 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
653 printf("qinq strip on\n");
655 printf("qinq strip off\n");
658 if (dev_info.hash_key_size > 0)
659 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
660 if (dev_info.reta_size > 0)
661 printf("Redirection table size: %u\n", dev_info.reta_size);
662 if (!dev_info.flow_type_rss_offloads)
663 printf("No RSS offload flow type is supported.\n");
668 printf("Supported RSS offload flow types:\n");
669 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
670 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
671 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
673 p = flowtype_to_str(i);
677 printf(" user defined %d\n", i);
681 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
682 printf("Maximum configurable length of RX packet: %u\n",
683 dev_info.max_rx_pktlen);
684 printf("Maximum configurable size of LRO aggregated packet: %u\n",
685 dev_info.max_lro_pkt_size);
686 if (dev_info.max_vfs)
687 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
688 if (dev_info.max_vmdq_pools)
689 printf("Maximum number of VMDq pools: %u\n",
690 dev_info.max_vmdq_pools);
692 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
693 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
694 printf("Max possible number of RXDs per queue: %hu\n",
695 dev_info.rx_desc_lim.nb_max);
696 printf("Min possible number of RXDs per queue: %hu\n",
697 dev_info.rx_desc_lim.nb_min);
698 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
700 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
701 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
702 printf("Max possible number of TXDs per queue: %hu\n",
703 dev_info.tx_desc_lim.nb_max);
704 printf("Min possible number of TXDs per queue: %hu\n",
705 dev_info.tx_desc_lim.nb_min);
706 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
707 printf("Max segment number per packet: %hu\n",
708 dev_info.tx_desc_lim.nb_seg_max);
709 printf("Max segment number per MTU/TSO: %hu\n",
710 dev_info.tx_desc_lim.nb_mtu_seg_max);
712 /* Show switch info only if valid switch domain and port id is set */
713 if (dev_info.switch_info.domain_id !=
714 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
715 if (dev_info.switch_info.name)
716 printf("Switch name: %s\n", dev_info.switch_info.name);
718 printf("Switch domain Id: %u\n",
719 dev_info.switch_info.domain_id);
720 printf("Switch Port Id: %u\n",
721 dev_info.switch_info.port_id);
726 port_summary_header_display(void)
728 uint16_t port_number;
730 port_number = rte_eth_dev_count_avail();
731 printf("Number of available ports: %i\n", port_number);
732 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
733 "Driver", "Status", "Link");
737 port_summary_display(portid_t port_id)
739 struct rte_ether_addr mac_addr;
740 struct rte_eth_link link;
741 struct rte_eth_dev_info dev_info;
742 char name[RTE_ETH_NAME_MAX_LEN];
745 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
750 ret = eth_link_get_nowait_print_err(port_id, &link);
754 ret = eth_dev_info_get_print_err(port_id, &dev_info);
758 rte_eth_dev_get_name_by_port(port_id, name);
759 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
763 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n",
764 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
765 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
766 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
767 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
768 rte_eth_link_speed_to_str(link.link_speed));
772 port_eeprom_display(portid_t port_id)
774 struct rte_dev_eeprom_info einfo;
776 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
781 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
782 if (len_eeprom < 0) {
783 switch (len_eeprom) {
785 printf("port index %d invalid\n", port_id);
788 printf("operation not supported by device\n");
791 printf("device is removed\n");
794 printf("Unable to get EEPROM: %d\n", len_eeprom);
800 char buf[len_eeprom];
802 einfo.length = len_eeprom;
805 ret = rte_eth_dev_get_eeprom(port_id, &einfo);
809 printf("port index %d invalid\n", port_id);
812 printf("operation not supported by device\n");
815 printf("device is removed\n");
818 printf("Unable to get EEPROM: %d\n", ret);
823 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
824 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
828 port_module_eeprom_display(portid_t port_id)
830 struct rte_eth_dev_module_info minfo;
831 struct rte_dev_eeprom_info einfo;
834 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
840 ret = rte_eth_dev_get_module_info(port_id, &minfo);
844 printf("port index %d invalid\n", port_id);
847 printf("operation not supported by device\n");
850 printf("device is removed\n");
853 printf("Unable to get module EEPROM: %d\n", ret);
859 char buf[minfo.eeprom_len];
861 einfo.length = minfo.eeprom_len;
864 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
868 printf("port index %d invalid\n", port_id);
871 printf("operation not supported by device\n");
874 printf("device is removed\n");
877 printf("Unable to get module EEPROM: %d\n", ret);
883 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
884 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
888 port_id_is_invalid(portid_t port_id, enum print_warning warning)
892 if (port_id == (portid_t)RTE_PORT_ALL)
895 RTE_ETH_FOREACH_DEV(pid)
899 if (warning == ENABLED_WARN)
900 printf("Invalid port %d\n", port_id);
905 void print_valid_ports(void)
909 printf("The valid ports array is [");
910 RTE_ETH_FOREACH_DEV(pid) {
917 vlan_id_is_invalid(uint16_t vlan_id)
921 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
926 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
928 const struct rte_pci_device *pci_dev;
929 const struct rte_bus *bus;
933 printf("Port register offset 0x%X not aligned on a 4-byte "
939 if (!ports[port_id].dev_info.device) {
940 printf("Invalid device\n");
944 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
945 if (bus && !strcmp(bus->name, "pci")) {
946 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
948 printf("Not a PCI device\n");
952 pci_len = pci_dev->mem_resource[0].len;
953 if (reg_off >= pci_len) {
954 printf("Port %d: register offset %u (0x%X) out of port PCI "
955 "resource (length=%"PRIu64")\n",
956 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
963 reg_bit_pos_is_invalid(uint8_t bit_pos)
967 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
971 #define display_port_and_reg_off(port_id, reg_off) \
972 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
975 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
977 display_port_and_reg_off(port_id, (unsigned)reg_off);
978 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
982 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
987 if (port_id_is_invalid(port_id, ENABLED_WARN))
989 if (port_reg_off_is_invalid(port_id, reg_off))
991 if (reg_bit_pos_is_invalid(bit_x))
993 reg_v = port_id_pci_reg_read(port_id, reg_off);
994 display_port_and_reg_off(port_id, (unsigned)reg_off);
995 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
999 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1000 uint8_t bit1_pos, uint8_t bit2_pos)
1006 if (port_id_is_invalid(port_id, ENABLED_WARN))
1008 if (port_reg_off_is_invalid(port_id, reg_off))
1010 if (reg_bit_pos_is_invalid(bit1_pos))
1012 if (reg_bit_pos_is_invalid(bit2_pos))
1014 if (bit1_pos > bit2_pos)
1015 l_bit = bit2_pos, h_bit = bit1_pos;
1017 l_bit = bit1_pos, h_bit = bit2_pos;
1019 reg_v = port_id_pci_reg_read(port_id, reg_off);
1022 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1023 display_port_and_reg_off(port_id, (unsigned)reg_off);
1024 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1025 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1029 port_reg_display(portid_t port_id, uint32_t reg_off)
1033 if (port_id_is_invalid(port_id, ENABLED_WARN))
1035 if (port_reg_off_is_invalid(port_id, reg_off))
1037 reg_v = port_id_pci_reg_read(port_id, reg_off);
1038 display_port_reg_value(port_id, reg_off, reg_v);
1042 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1047 if (port_id_is_invalid(port_id, ENABLED_WARN))
1049 if (port_reg_off_is_invalid(port_id, reg_off))
1051 if (reg_bit_pos_is_invalid(bit_pos))
1054 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1057 reg_v = port_id_pci_reg_read(port_id, reg_off);
1059 reg_v &= ~(1 << bit_pos);
1061 reg_v |= (1 << bit_pos);
1062 port_id_pci_reg_write(port_id, reg_off, reg_v);
1063 display_port_reg_value(port_id, reg_off, reg_v);
1067 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1068 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1075 if (port_id_is_invalid(port_id, ENABLED_WARN))
1077 if (port_reg_off_is_invalid(port_id, reg_off))
1079 if (reg_bit_pos_is_invalid(bit1_pos))
1081 if (reg_bit_pos_is_invalid(bit2_pos))
1083 if (bit1_pos > bit2_pos)
1084 l_bit = bit2_pos, h_bit = bit1_pos;
1086 l_bit = bit1_pos, h_bit = bit2_pos;
1088 if ((h_bit - l_bit) < 31)
1089 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1093 if (value > max_v) {
1094 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1095 (unsigned)value, (unsigned)value,
1096 (unsigned)max_v, (unsigned)max_v);
1099 reg_v = port_id_pci_reg_read(port_id, reg_off);
1100 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1101 reg_v |= (value << l_bit); /* Set changed bits */
1102 port_id_pci_reg_write(port_id, reg_off, reg_v);
1103 display_port_reg_value(port_id, reg_off, reg_v);
1107 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1109 if (port_id_is_invalid(port_id, ENABLED_WARN))
1111 if (port_reg_off_is_invalid(port_id, reg_off))
1113 port_id_pci_reg_write(port_id, reg_off, reg_v);
1114 display_port_reg_value(port_id, reg_off, reg_v);
1118 port_mtu_set(portid_t port_id, uint16_t mtu)
1121 struct rte_port *rte_port = &ports[port_id];
1122 struct rte_eth_dev_info dev_info;
1123 uint16_t eth_overhead;
1126 if (port_id_is_invalid(port_id, ENABLED_WARN))
1129 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1133 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1134 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1135 mtu, dev_info.min_mtu, dev_info.max_mtu);
1138 diag = rte_eth_dev_set_mtu(port_id, mtu);
1140 printf("Set MTU failed. diag=%d\n", diag);
1141 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1143 * Ether overhead in driver is equal to the difference of
1144 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
1145 * device supports jumbo frame.
1147 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
1148 if (mtu > RTE_ETHER_MTU) {
1149 rte_port->dev_conf.rxmode.offloads |=
1150 DEV_RX_OFFLOAD_JUMBO_FRAME;
1151 rte_port->dev_conf.rxmode.max_rx_pkt_len =
1154 rte_port->dev_conf.rxmode.offloads &=
1155 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1159 /* Generic flow management functions. */
1161 static struct port_flow_tunnel *
1162 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id)
1164 struct port_flow_tunnel *flow_tunnel;
1166 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1167 if (flow_tunnel->id == port_tunnel_id)
1177 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel)
1180 switch (tunnel->type) {
1184 case RTE_FLOW_ITEM_TYPE_VXLAN:
1192 struct port_flow_tunnel *
1193 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun)
1195 struct rte_port *port = &ports[port_id];
1196 struct port_flow_tunnel *flow_tunnel;
1198 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1199 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun)))
1208 void port_flow_tunnel_list(portid_t port_id)
1210 struct rte_port *port = &ports[port_id];
1211 struct port_flow_tunnel *flt;
1213 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1214 printf("port %u tunnel #%u type=%s",
1215 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel));
1216 if (flt->tunnel.tun_id)
1217 printf(" id=%" PRIu64, flt->tunnel.tun_id);
1222 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id)
1224 struct rte_port *port = &ports[port_id];
1225 struct port_flow_tunnel *flt;
1227 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1228 if (flt->id == tunnel_id)
1232 LIST_REMOVE(flt, chain);
1234 printf("port %u: flow tunnel #%u destroyed\n",
1235 port_id, tunnel_id);
1239 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops)
1241 struct rte_port *port = &ports[port_id];
1242 enum rte_flow_item_type type;
1243 struct port_flow_tunnel *flt;
1245 if (!strcmp(ops->type, "vxlan"))
1246 type = RTE_FLOW_ITEM_TYPE_VXLAN;
1248 printf("cannot offload \"%s\" tunnel type\n", ops->type);
1251 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1252 if (flt->tunnel.type == type)
1256 flt = calloc(1, sizeof(*flt));
1258 printf("failed to allocate port flt object\n");
1261 flt->tunnel.type = type;
1262 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 :
1263 LIST_FIRST(&port->flow_tunnel_list)->id + 1;
1264 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain);
1266 printf("port %d: flow tunnel #%u type %s\n",
1267 port_id, flt->id, ops->type);
1270 /** Generate a port_flow entry from attributes/pattern/actions. */
1271 static struct port_flow *
1272 port_flow_new(const struct rte_flow_attr *attr,
1273 const struct rte_flow_item *pattern,
1274 const struct rte_flow_action *actions,
1275 struct rte_flow_error *error)
1277 const struct rte_flow_conv_rule rule = {
1279 .pattern_ro = pattern,
1280 .actions_ro = actions,
1282 struct port_flow *pf;
1285 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1288 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1291 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1295 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1302 /** Print a message out of a flow error. */
1304 port_flow_complain(struct rte_flow_error *error)
1306 static const char *const errstrlist[] = {
1307 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1308 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1309 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1310 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1311 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1312 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1313 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1314 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1315 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1316 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1317 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1318 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1319 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1320 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1321 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1322 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1323 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1327 int err = rte_errno;
1329 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1330 !errstrlist[error->type])
1331 errstr = "unknown type";
1333 errstr = errstrlist[error->type];
1334 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__,
1335 error->type, errstr,
1336 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1337 error->cause), buf) : "",
1338 error->message ? error->message : "(no stated reason)",
1344 rss_config_display(struct rte_flow_action_rss *rss_conf)
1348 if (rss_conf == NULL) {
1349 printf("Invalid rule\n");
1355 if (rss_conf->queue_num == 0)
1357 for (i = 0; i < rss_conf->queue_num; i++)
1358 printf(" %d", rss_conf->queue[i]);
1361 printf(" function: ");
1362 switch (rss_conf->func) {
1363 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1364 printf("default\n");
1366 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1367 printf("toeplitz\n");
1369 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1370 printf("simple_xor\n");
1372 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1373 printf("symmetric_toeplitz\n");
1376 printf("Unknown function\n");
1380 printf(" types:\n");
1381 if (rss_conf->types == 0) {
1385 for (i = 0; rss_type_table[i].str; i++) {
1386 if ((rss_conf->types &
1387 rss_type_table[i].rss_type) ==
1388 rss_type_table[i].rss_type &&
1389 rss_type_table[i].rss_type != 0)
1390 printf(" %s\n", rss_type_table[i].str);
1394 static struct port_shared_action *
1395 action_get_by_id(portid_t port_id, uint32_t id)
1397 struct rte_port *port;
1398 struct port_shared_action **ppsa;
1399 struct port_shared_action *psa = NULL;
1401 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1402 port_id == (portid_t)RTE_PORT_ALL)
1404 port = &ports[port_id];
1405 ppsa = &port->actions_list;
1407 if ((*ppsa)->id == id) {
1411 ppsa = &(*ppsa)->next;
1414 printf("Failed to find shared action #%u on port %u\n",
1420 action_alloc(portid_t port_id, uint32_t id,
1421 struct port_shared_action **action)
1423 struct rte_port *port;
1424 struct port_shared_action **ppsa;
1425 struct port_shared_action *psa = NULL;
1428 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1429 port_id == (portid_t)RTE_PORT_ALL)
1431 port = &ports[port_id];
1432 if (id == UINT32_MAX) {
1433 /* taking first available ID */
1434 if (port->actions_list) {
1435 if (port->actions_list->id == UINT32_MAX - 1) {
1436 printf("Highest shared action ID is already"
1437 " assigned, delete it first\n");
1440 id = port->actions_list->id + 1;
1445 psa = calloc(1, sizeof(*psa));
1447 printf("Allocation of port %u shared action failed\n",
1451 ppsa = &port->actions_list;
1452 while (*ppsa && (*ppsa)->id > id)
1453 ppsa = &(*ppsa)->next;
1454 if (*ppsa && (*ppsa)->id == id) {
1455 printf("Shared action #%u is already assigned,"
1456 " delete it first\n", id);
1467 /** Create shared action */
1469 port_shared_action_create(portid_t port_id, uint32_t id,
1470 const struct rte_flow_shared_action_conf *conf,
1471 const struct rte_flow_action *action)
1473 struct port_shared_action *psa;
1475 struct rte_flow_error error;
1477 ret = action_alloc(port_id, id, &psa);
1480 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
1481 struct rte_flow_action_age *age =
1482 (struct rte_flow_action_age *)(uintptr_t)(action->conf);
1484 psa->age_type = ACTION_AGE_CONTEXT_TYPE_SHARED_ACTION;
1485 age->context = &psa->age_type;
1487 /* Poisoning to make sure PMDs update it in case of error. */
1488 memset(&error, 0x22, sizeof(error));
1489 psa->action = rte_flow_shared_action_create(port_id, conf, action,
1492 uint32_t destroy_id = psa->id;
1493 port_shared_action_destroy(port_id, 1, &destroy_id);
1494 return port_flow_complain(&error);
1496 psa->type = action->type;
1497 printf("Shared action #%u created\n", psa->id);
1501 /** Destroy shared action */
1503 port_shared_action_destroy(portid_t port_id,
1505 const uint32_t *actions)
1507 struct rte_port *port;
1508 struct port_shared_action **tmp;
1512 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1513 port_id == (portid_t)RTE_PORT_ALL)
1515 port = &ports[port_id];
1516 tmp = &port->actions_list;
1520 for (i = 0; i != n; ++i) {
1521 struct rte_flow_error error;
1522 struct port_shared_action *psa = *tmp;
1524 if (actions[i] != psa->id)
1527 * Poisoning to make sure PMDs update it in case
1530 memset(&error, 0x33, sizeof(error));
1532 if (psa->action && rte_flow_shared_action_destroy(
1533 port_id, psa->action, &error)) {
1534 ret = port_flow_complain(&error);
1538 printf("Shared action #%u destroyed\n", psa->id);
1543 tmp = &(*tmp)->next;
1550 /** Get shared action by port + id */
1551 struct rte_flow_shared_action *
1552 port_shared_action_get_by_id(portid_t port_id, uint32_t id)
1555 struct port_shared_action *psa = action_get_by_id(port_id, id);
1557 return (psa) ? psa->action : NULL;
1560 /** Update shared action */
1562 port_shared_action_update(portid_t port_id, uint32_t id,
1563 const struct rte_flow_action *action)
1565 struct rte_flow_error error;
1566 struct rte_flow_shared_action *shared_action;
1568 shared_action = port_shared_action_get_by_id(port_id, id);
1571 if (rte_flow_shared_action_update(port_id, shared_action, action,
1573 return port_flow_complain(&error);
1575 printf("Shared action #%u updated\n", id);
1580 port_shared_action_query(portid_t port_id, uint32_t id)
1582 struct rte_flow_error error;
1583 struct port_shared_action *psa;
1584 uint64_t default_data;
1588 psa = action_get_by_id(port_id, id);
1591 switch (psa->type) {
1592 case RTE_FLOW_ACTION_TYPE_RSS:
1593 case RTE_FLOW_ACTION_TYPE_AGE:
1594 data = &default_data;
1597 printf("Shared action %u (type: %d) on port %u doesn't support"
1598 " query\n", id, psa->type, port_id);
1601 if (rte_flow_shared_action_query(port_id, psa->action, data, &error))
1602 ret = port_flow_complain(&error);
1603 switch (psa->type) {
1604 case RTE_FLOW_ACTION_TYPE_RSS:
1606 printf("Shared RSS action:\n\trefs:%u\n",
1607 *((uint32_t *)data));
1610 case RTE_FLOW_ACTION_TYPE_AGE:
1612 struct rte_flow_query_age *resp = data;
1616 " sec_since_last_hit_valid: %u\n"
1617 " sec_since_last_hit: %" PRIu32 "\n",
1619 resp->sec_since_last_hit_valid,
1620 resp->sec_since_last_hit);
1625 printf("Shared action %u (type: %d) on port %u doesn't support"
1626 " query\n", id, psa->type, port_id);
1632 static struct port_flow_tunnel *
1633 port_flow_tunnel_offload_cmd_prep(portid_t port_id,
1634 const struct rte_flow_item *pattern,
1635 const struct rte_flow_action *actions,
1636 const struct tunnel_ops *tunnel_ops)
1639 struct rte_port *port;
1640 struct port_flow_tunnel *pft;
1641 struct rte_flow_error error;
1643 port = &ports[port_id];
1644 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id);
1646 printf("failed to locate port flow tunnel #%u\n",
1650 if (tunnel_ops->actions) {
1651 uint32_t num_actions;
1652 const struct rte_flow_action *aptr;
1654 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel,
1656 &pft->num_pmd_actions,
1659 port_flow_complain(&error);
1662 for (aptr = actions, num_actions = 1;
1663 aptr->type != RTE_FLOW_ACTION_TYPE_END;
1664 aptr++, num_actions++);
1665 pft->actions = malloc(
1666 (num_actions + pft->num_pmd_actions) *
1667 sizeof(actions[0]));
1668 if (!pft->actions) {
1669 rte_flow_tunnel_action_decap_release(
1670 port_id, pft->actions,
1671 pft->num_pmd_actions, &error);
1674 rte_memcpy(pft->actions, pft->pmd_actions,
1675 pft->num_pmd_actions * sizeof(actions[0]));
1676 rte_memcpy(pft->actions + pft->num_pmd_actions, actions,
1677 num_actions * sizeof(actions[0]));
1679 if (tunnel_ops->items) {
1681 const struct rte_flow_item *iptr;
1683 ret = rte_flow_tunnel_match(port_id, &pft->tunnel,
1685 &pft->num_pmd_items,
1688 port_flow_complain(&error);
1691 for (iptr = pattern, num_items = 1;
1692 iptr->type != RTE_FLOW_ITEM_TYPE_END;
1693 iptr++, num_items++);
1694 pft->items = malloc((num_items + pft->num_pmd_items) *
1695 sizeof(pattern[0]));
1697 rte_flow_tunnel_item_release(
1698 port_id, pft->pmd_items,
1699 pft->num_pmd_items, &error);
1702 rte_memcpy(pft->items, pft->pmd_items,
1703 pft->num_pmd_items * sizeof(pattern[0]));
1704 rte_memcpy(pft->items + pft->num_pmd_items, pattern,
1705 num_items * sizeof(pattern[0]));
1712 port_flow_tunnel_offload_cmd_release(portid_t port_id,
1713 const struct tunnel_ops *tunnel_ops,
1714 struct port_flow_tunnel *pft)
1716 struct rte_flow_error error;
1718 if (tunnel_ops->actions) {
1720 rte_flow_tunnel_action_decap_release(
1721 port_id, pft->pmd_actions,
1722 pft->num_pmd_actions, &error);
1723 pft->actions = NULL;
1724 pft->pmd_actions = NULL;
1726 if (tunnel_ops->items) {
1728 rte_flow_tunnel_item_release(port_id, pft->pmd_items,
1732 pft->pmd_items = NULL;
1736 /** Validate flow rule. */
1738 port_flow_validate(portid_t port_id,
1739 const struct rte_flow_attr *attr,
1740 const struct rte_flow_item *pattern,
1741 const struct rte_flow_action *actions,
1742 const struct tunnel_ops *tunnel_ops)
1744 struct rte_flow_error error;
1745 struct port_flow_tunnel *pft = NULL;
1747 /* Poisoning to make sure PMDs update it in case of error. */
1748 memset(&error, 0x11, sizeof(error));
1749 if (tunnel_ops->enabled) {
1750 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
1751 actions, tunnel_ops);
1755 pattern = pft->items;
1757 actions = pft->actions;
1759 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1760 return port_flow_complain(&error);
1761 if (tunnel_ops->enabled)
1762 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
1763 printf("Flow rule validated\n");
1767 /** Return age action structure if exists, otherwise NULL. */
1768 static struct rte_flow_action_age *
1769 age_action_get(const struct rte_flow_action *actions)
1771 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1772 switch (actions->type) {
1773 case RTE_FLOW_ACTION_TYPE_AGE:
1774 return (struct rte_flow_action_age *)
1775 (uintptr_t)actions->conf;
1783 /** Create flow rule. */
1785 port_flow_create(portid_t port_id,
1786 const struct rte_flow_attr *attr,
1787 const struct rte_flow_item *pattern,
1788 const struct rte_flow_action *actions,
1789 const struct tunnel_ops *tunnel_ops)
1791 struct rte_flow *flow;
1792 struct rte_port *port;
1793 struct port_flow *pf;
1795 struct rte_flow_error error;
1796 struct port_flow_tunnel *pft = NULL;
1797 struct rte_flow_action_age *age = age_action_get(actions);
1799 port = &ports[port_id];
1800 if (port->flow_list) {
1801 if (port->flow_list->id == UINT32_MAX) {
1802 printf("Highest rule ID is already assigned, delete"
1806 id = port->flow_list->id + 1;
1808 if (tunnel_ops->enabled) {
1809 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
1810 actions, tunnel_ops);
1814 pattern = pft->items;
1816 actions = pft->actions;
1818 pf = port_flow_new(attr, pattern, actions, &error);
1820 return port_flow_complain(&error);
1822 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
1823 age->context = &pf->age_type;
1825 /* Poisoning to make sure PMDs update it in case of error. */
1826 memset(&error, 0x22, sizeof(error));
1827 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1830 return port_flow_complain(&error);
1832 pf->next = port->flow_list;
1835 port->flow_list = pf;
1836 if (tunnel_ops->enabled)
1837 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
1838 printf("Flow rule #%u created\n", pf->id);
1842 /** Destroy a number of flow rules. */
1844 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1846 struct rte_port *port;
1847 struct port_flow **tmp;
1851 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1852 port_id == (portid_t)RTE_PORT_ALL)
1854 port = &ports[port_id];
1855 tmp = &port->flow_list;
1859 for (i = 0; i != n; ++i) {
1860 struct rte_flow_error error;
1861 struct port_flow *pf = *tmp;
1863 if (rule[i] != pf->id)
1866 * Poisoning to make sure PMDs update it in case
1869 memset(&error, 0x33, sizeof(error));
1870 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1871 ret = port_flow_complain(&error);
1874 printf("Flow rule #%u destroyed\n", pf->id);
1880 tmp = &(*tmp)->next;
1886 /** Remove all flow rules. */
1888 port_flow_flush(portid_t port_id)
1890 struct rte_flow_error error;
1891 struct rte_port *port;
1894 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1895 port_id == (portid_t)RTE_PORT_ALL)
1898 port = &ports[port_id];
1900 if (port->flow_list == NULL)
1903 /* Poisoning to make sure PMDs update it in case of error. */
1904 memset(&error, 0x44, sizeof(error));
1905 if (rte_flow_flush(port_id, &error)) {
1906 port_flow_complain(&error);
1909 while (port->flow_list) {
1910 struct port_flow *pf = port->flow_list->next;
1912 free(port->flow_list);
1913 port->flow_list = pf;
1918 /** Dump flow rules. */
1920 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id,
1921 const char *file_name)
1924 FILE *file = stdout;
1925 struct rte_flow_error error;
1926 struct rte_port *port;
1927 struct port_flow *pflow;
1928 struct rte_flow *tmpFlow = NULL;
1931 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1932 port_id == (portid_t)RTE_PORT_ALL)
1936 port = &ports[port_id];
1937 pflow = port->flow_list;
1939 if (rule_id != pflow->id) {
1940 pflow = pflow->next;
1942 tmpFlow = pflow->flow;
1948 if (found == false) {
1949 printf("Failed to dump to flow %d\n", rule_id);
1954 if (file_name && strlen(file_name)) {
1955 file = fopen(file_name, "w");
1957 printf("Failed to create file %s: %s\n", file_name,
1964 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error);
1966 ret = rte_flow_dev_dump(port_id, NULL, file, &error);
1968 port_flow_complain(&error);
1969 printf("Failed to dump flow: %s\n", strerror(-ret));
1971 printf("Flow dump finished\n");
1972 if (file_name && strlen(file_name))
1977 /** Query a flow rule. */
1979 port_flow_query(portid_t port_id, uint32_t rule,
1980 const struct rte_flow_action *action)
1982 struct rte_flow_error error;
1983 struct rte_port *port;
1984 struct port_flow *pf;
1987 struct rte_flow_query_count count;
1988 struct rte_flow_action_rss rss_conf;
1989 struct rte_flow_query_age age;
1993 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1994 port_id == (portid_t)RTE_PORT_ALL)
1996 port = &ports[port_id];
1997 for (pf = port->flow_list; pf; pf = pf->next)
2001 printf("Flow rule #%u not found\n", rule);
2004 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2005 &name, sizeof(name),
2006 (void *)(uintptr_t)action->type, &error);
2008 return port_flow_complain(&error);
2009 switch (action->type) {
2010 case RTE_FLOW_ACTION_TYPE_COUNT:
2011 case RTE_FLOW_ACTION_TYPE_RSS:
2012 case RTE_FLOW_ACTION_TYPE_AGE:
2015 printf("Cannot query action type %d (%s)\n",
2016 action->type, name);
2019 /* Poisoning to make sure PMDs update it in case of error. */
2020 memset(&error, 0x55, sizeof(error));
2021 memset(&query, 0, sizeof(query));
2022 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
2023 return port_flow_complain(&error);
2024 switch (action->type) {
2025 case RTE_FLOW_ACTION_TYPE_COUNT:
2029 " hits: %" PRIu64 "\n"
2030 " bytes: %" PRIu64 "\n",
2032 query.count.hits_set,
2033 query.count.bytes_set,
2037 case RTE_FLOW_ACTION_TYPE_RSS:
2038 rss_config_display(&query.rss_conf);
2040 case RTE_FLOW_ACTION_TYPE_AGE:
2043 " sec_since_last_hit_valid: %u\n"
2044 " sec_since_last_hit: %" PRIu32 "\n",
2047 query.age.sec_since_last_hit_valid,
2048 query.age.sec_since_last_hit);
2051 printf("Cannot display result for action type %d (%s)\n",
2052 action->type, name);
2058 /** List simply and destroy all aged flows. */
2060 port_flow_aged(portid_t port_id, uint8_t destroy)
2063 int nb_context, total = 0, idx;
2064 struct rte_flow_error error;
2065 enum age_action_context_type *type;
2067 struct port_flow *pf;
2068 struct port_shared_action *psa;
2071 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2072 port_id == (portid_t)RTE_PORT_ALL)
2074 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
2075 printf("Port %u total aged flows: %d\n", port_id, total);
2077 port_flow_complain(&error);
2082 contexts = malloc(sizeof(void *) * total);
2083 if (contexts == NULL) {
2084 printf("Cannot allocate contexts for aged flow\n");
2087 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
2088 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
2089 if (nb_context != total) {
2090 printf("Port:%d get aged flows count(%d) != total(%d)\n",
2091 port_id, nb_context, total);
2096 for (idx = 0; idx < nb_context; idx++) {
2097 if (!contexts[idx]) {
2098 printf("Error: get Null context in port %u\n", port_id);
2101 type = (enum age_action_context_type *)contexts[idx];
2103 case ACTION_AGE_CONTEXT_TYPE_FLOW:
2104 ctx.pf = container_of(type, struct port_flow, age_type);
2105 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32
2109 ctx.pf->rule.attr->group,
2110 ctx.pf->rule.attr->priority,
2111 ctx.pf->rule.attr->ingress ? 'i' : '-',
2112 ctx.pf->rule.attr->egress ? 'e' : '-',
2113 ctx.pf->rule.attr->transfer ? 't' : '-');
2114 if (destroy && !port_flow_destroy(port_id, 1,
2118 case ACTION_AGE_CONTEXT_TYPE_SHARED_ACTION:
2119 ctx.psa = container_of(type, struct port_shared_action,
2121 printf("%-20s\t%" PRIu32 "\n", "Shared action",
2125 printf("Error: invalid context type %u\n", port_id);
2129 printf("\n%d flows destroyed\n", total);
2133 /** List flow rules. */
2135 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
2137 struct rte_port *port;
2138 struct port_flow *pf;
2139 struct port_flow *list = NULL;
2142 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2143 port_id == (portid_t)RTE_PORT_ALL)
2145 port = &ports[port_id];
2146 if (!port->flow_list)
2148 /* Sort flows by group, priority and ID. */
2149 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2150 struct port_flow **tmp;
2151 const struct rte_flow_attr *curr = pf->rule.attr;
2154 /* Filter out unwanted groups. */
2155 for (i = 0; i != n; ++i)
2156 if (curr->group == group[i])
2161 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
2162 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
2164 if (curr->group > comp->group ||
2165 (curr->group == comp->group &&
2166 curr->priority > comp->priority) ||
2167 (curr->group == comp->group &&
2168 curr->priority == comp->priority &&
2169 pf->id > (*tmp)->id))
2176 printf("ID\tGroup\tPrio\tAttr\tRule\n");
2177 for (pf = list; pf != NULL; pf = pf->tmp) {
2178 const struct rte_flow_item *item = pf->rule.pattern;
2179 const struct rte_flow_action *action = pf->rule.actions;
2182 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
2184 pf->rule.attr->group,
2185 pf->rule.attr->priority,
2186 pf->rule.attr->ingress ? 'i' : '-',
2187 pf->rule.attr->egress ? 'e' : '-',
2188 pf->rule.attr->transfer ? 't' : '-');
2189 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
2190 if ((uint32_t)item->type > INT_MAX)
2191 name = "PMD_INTERNAL";
2192 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
2193 &name, sizeof(name),
2194 (void *)(uintptr_t)item->type,
2197 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
2198 printf("%s ", name);
2202 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
2203 if ((uint32_t)action->type > INT_MAX)
2204 name = "PMD_INTERNAL";
2205 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2206 &name, sizeof(name),
2207 (void *)(uintptr_t)action->type,
2210 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
2211 printf(" %s", name);
2218 /** Restrict ingress traffic to the defined flow rules. */
2220 port_flow_isolate(portid_t port_id, int set)
2222 struct rte_flow_error error;
2224 /* Poisoning to make sure PMDs update it in case of error. */
2225 memset(&error, 0x66, sizeof(error));
2226 if (rte_flow_isolate(port_id, set, &error))
2227 return port_flow_complain(&error);
2228 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
2230 set ? "now restricted" : "not restricted anymore");
2235 * RX/TX ring descriptors display functions.
2238 rx_queue_id_is_invalid(queueid_t rxq_id)
2240 if (rxq_id < nb_rxq)
2242 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
2247 tx_queue_id_is_invalid(queueid_t txq_id)
2249 if (txq_id < nb_txq)
2251 printf("Invalid TX queue %d (must be < nb_txq=%d)\n", txq_id, nb_txq);
2256 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
2258 struct rte_port *port = &ports[port_id];
2259 struct rte_eth_rxq_info rx_qinfo;
2262 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
2264 *ring_size = rx_qinfo.nb_desc;
2268 if (ret != -ENOTSUP)
2271 * If the rte_eth_rx_queue_info_get is not support for this PMD,
2272 * ring_size stored in testpmd will be used for validity verification.
2273 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
2274 * being 0, it will use a default value provided by PMDs to setup this
2275 * rxq. If the default value is 0, it will use the
2276 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
2278 if (port->nb_rx_desc[rxq_id])
2279 *ring_size = port->nb_rx_desc[rxq_id];
2280 else if (port->dev_info.default_rxportconf.ring_size)
2281 *ring_size = port->dev_info.default_rxportconf.ring_size;
2283 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2288 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
2290 struct rte_port *port = &ports[port_id];
2291 struct rte_eth_txq_info tx_qinfo;
2294 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
2296 *ring_size = tx_qinfo.nb_desc;
2300 if (ret != -ENOTSUP)
2303 * If the rte_eth_tx_queue_info_get is not support for this PMD,
2304 * ring_size stored in testpmd will be used for validity verification.
2305 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
2306 * being 0, it will use a default value provided by PMDs to setup this
2307 * txq. If the default value is 0, it will use the
2308 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
2310 if (port->nb_tx_desc[txq_id])
2311 *ring_size = port->nb_tx_desc[txq_id];
2312 else if (port->dev_info.default_txportconf.ring_size)
2313 *ring_size = port->dev_info.default_txportconf.ring_size;
2315 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2320 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
2325 ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
2329 if (rxdesc_id < ring_size)
2332 printf("Invalid RX descriptor %u (must be < ring_size=%u)\n",
2333 rxdesc_id, ring_size);
2338 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
2343 ret = get_tx_ring_size(port_id, txq_id, &ring_size);
2347 if (txdesc_id < ring_size)
2350 printf("Invalid TX descriptor %u (must be < ring_size=%u)\n",
2351 txdesc_id, ring_size);
2355 static const struct rte_memzone *
2356 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
2358 char mz_name[RTE_MEMZONE_NAMESIZE];
2359 const struct rte_memzone *mz;
2361 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
2362 port_id, q_id, ring_name);
2363 mz = rte_memzone_lookup(mz_name);
2365 printf("%s ring memory zoneof (port %d, queue %d) not"
2366 "found (zone name = %s\n",
2367 ring_name, port_id, q_id, mz_name);
2371 union igb_ring_dword {
2374 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2384 struct igb_ring_desc_32_bytes {
2385 union igb_ring_dword lo_dword;
2386 union igb_ring_dword hi_dword;
2387 union igb_ring_dword resv1;
2388 union igb_ring_dword resv2;
2391 struct igb_ring_desc_16_bytes {
2392 union igb_ring_dword lo_dword;
2393 union igb_ring_dword hi_dword;
2397 ring_rxd_display_dword(union igb_ring_dword dword)
2399 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
2400 (unsigned)dword.words.hi);
2404 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
2405 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2408 __rte_unused portid_t port_id,
2412 struct igb_ring_desc_16_bytes *ring =
2413 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2414 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2416 struct rte_eth_dev_info dev_info;
2418 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2422 if (strstr(dev_info.driver_name, "i40e") != NULL) {
2423 /* 32 bytes RX descriptor, i40e only */
2424 struct igb_ring_desc_32_bytes *ring =
2425 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
2426 ring[desc_id].lo_dword.dword =
2427 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2428 ring_rxd_display_dword(ring[desc_id].lo_dword);
2429 ring[desc_id].hi_dword.dword =
2430 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2431 ring_rxd_display_dword(ring[desc_id].hi_dword);
2432 ring[desc_id].resv1.dword =
2433 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
2434 ring_rxd_display_dword(ring[desc_id].resv1);
2435 ring[desc_id].resv2.dword =
2436 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
2437 ring_rxd_display_dword(ring[desc_id].resv2);
2442 /* 16 bytes RX descriptor */
2443 ring[desc_id].lo_dword.dword =
2444 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2445 ring_rxd_display_dword(ring[desc_id].lo_dword);
2446 ring[desc_id].hi_dword.dword =
2447 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2448 ring_rxd_display_dword(ring[desc_id].hi_dword);
2452 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
2454 struct igb_ring_desc_16_bytes *ring;
2455 struct igb_ring_desc_16_bytes txd;
2457 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2458 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2459 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2460 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
2461 (unsigned)txd.lo_dword.words.lo,
2462 (unsigned)txd.lo_dword.words.hi,
2463 (unsigned)txd.hi_dword.words.lo,
2464 (unsigned)txd.hi_dword.words.hi);
2468 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
2470 const struct rte_memzone *rx_mz;
2472 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
2474 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
2477 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
2481 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
2483 const struct rte_memzone *tx_mz;
2485 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
2487 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
2490 ring_tx_descriptor_display(tx_mz, txd_id);
2494 fwd_lcores_config_display(void)
2498 printf("List of forwarding lcores:");
2499 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
2500 printf(" %2u", fwd_lcores_cpuids[lc_id]);
2504 rxtx_config_display(void)
2509 printf(" %s packet forwarding%s packets/burst=%d\n",
2510 cur_fwd_eng->fwd_mode_name,
2511 retry_enabled == 0 ? "" : " with retry",
2514 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
2515 printf(" packet len=%u - nb packet segments=%d\n",
2516 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
2518 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
2519 nb_fwd_lcores, nb_fwd_ports);
2521 RTE_ETH_FOREACH_DEV(pid) {
2522 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
2523 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
2524 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
2525 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
2526 struct rte_eth_rxq_info rx_qinfo;
2527 struct rte_eth_txq_info tx_qinfo;
2528 uint16_t rx_free_thresh_tmp;
2529 uint16_t tx_free_thresh_tmp;
2530 uint16_t tx_rs_thresh_tmp;
2531 uint16_t nb_rx_desc_tmp;
2532 uint16_t nb_tx_desc_tmp;
2533 uint64_t offloads_tmp;
2534 uint8_t pthresh_tmp;
2535 uint8_t hthresh_tmp;
2536 uint8_t wthresh_tmp;
2539 /* per port config */
2540 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
2541 (unsigned int)pid, nb_rxq, nb_txq);
2543 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
2544 ports[pid].dev_conf.rxmode.offloads,
2545 ports[pid].dev_conf.txmode.offloads);
2547 /* per rx queue config only for first queue to be less verbose */
2548 for (qid = 0; qid < 1; qid++) {
2549 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
2551 nb_rx_desc_tmp = nb_rx_desc[qid];
2552 rx_free_thresh_tmp =
2553 rx_conf[qid].rx_free_thresh;
2554 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
2555 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
2556 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
2557 offloads_tmp = rx_conf[qid].offloads;
2559 nb_rx_desc_tmp = rx_qinfo.nb_desc;
2560 rx_free_thresh_tmp =
2561 rx_qinfo.conf.rx_free_thresh;
2562 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
2563 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
2564 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
2565 offloads_tmp = rx_qinfo.conf.offloads;
2568 printf(" RX queue: %d\n", qid);
2569 printf(" RX desc=%d - RX free threshold=%d\n",
2570 nb_rx_desc_tmp, rx_free_thresh_tmp);
2571 printf(" RX threshold registers: pthresh=%d hthresh=%d "
2573 pthresh_tmp, hthresh_tmp, wthresh_tmp);
2574 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp);
2577 /* per tx queue config only for first queue to be less verbose */
2578 for (qid = 0; qid < 1; qid++) {
2579 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
2581 nb_tx_desc_tmp = nb_tx_desc[qid];
2582 tx_free_thresh_tmp =
2583 tx_conf[qid].tx_free_thresh;
2584 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
2585 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
2586 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
2587 offloads_tmp = tx_conf[qid].offloads;
2588 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
2590 nb_tx_desc_tmp = tx_qinfo.nb_desc;
2591 tx_free_thresh_tmp =
2592 tx_qinfo.conf.tx_free_thresh;
2593 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
2594 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
2595 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
2596 offloads_tmp = tx_qinfo.conf.offloads;
2597 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
2600 printf(" TX queue: %d\n", qid);
2601 printf(" TX desc=%d - TX free threshold=%d\n",
2602 nb_tx_desc_tmp, tx_free_thresh_tmp);
2603 printf(" TX threshold registers: pthresh=%d hthresh=%d "
2605 pthresh_tmp, hthresh_tmp, wthresh_tmp);
2606 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
2607 offloads_tmp, tx_rs_thresh_tmp);
2613 port_rss_reta_info(portid_t port_id,
2614 struct rte_eth_rss_reta_entry64 *reta_conf,
2615 uint16_t nb_entries)
2617 uint16_t i, idx, shift;
2620 if (port_id_is_invalid(port_id, ENABLED_WARN))
2623 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
2625 printf("Failed to get RSS RETA info, return code = %d\n", ret);
2629 for (i = 0; i < nb_entries; i++) {
2630 idx = i / RTE_RETA_GROUP_SIZE;
2631 shift = i % RTE_RETA_GROUP_SIZE;
2632 if (!(reta_conf[idx].mask & (1ULL << shift)))
2634 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
2635 i, reta_conf[idx].reta[shift]);
2640 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
2644 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
2646 struct rte_eth_rss_conf rss_conf = {0};
2647 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
2651 struct rte_eth_dev_info dev_info;
2652 uint8_t hash_key_size;
2655 if (port_id_is_invalid(port_id, ENABLED_WARN))
2658 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2662 if (dev_info.hash_key_size > 0 &&
2663 dev_info.hash_key_size <= sizeof(rss_key))
2664 hash_key_size = dev_info.hash_key_size;
2666 printf("dev_info did not provide a valid hash key size\n");
2670 /* Get RSS hash key if asked to display it */
2671 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
2672 rss_conf.rss_key_len = hash_key_size;
2673 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2677 printf("port index %d invalid\n", port_id);
2680 printf("operation not supported by device\n");
2683 printf("operation failed - diag=%d\n", diag);
2688 rss_hf = rss_conf.rss_hf;
2690 printf("RSS disabled\n");
2693 printf("RSS functions:\n ");
2694 for (i = 0; rss_type_table[i].str; i++) {
2695 if (rss_hf & rss_type_table[i].rss_type)
2696 printf("%s ", rss_type_table[i].str);
2701 printf("RSS key:\n");
2702 for (i = 0; i < hash_key_size; i++)
2703 printf("%02X", rss_key[i]);
2708 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
2709 uint8_t hash_key_len)
2711 struct rte_eth_rss_conf rss_conf;
2715 rss_conf.rss_key = NULL;
2716 rss_conf.rss_key_len = hash_key_len;
2717 rss_conf.rss_hf = 0;
2718 for (i = 0; rss_type_table[i].str; i++) {
2719 if (!strcmp(rss_type_table[i].str, rss_type))
2720 rss_conf.rss_hf = rss_type_table[i].rss_type;
2722 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2724 rss_conf.rss_key = hash_key;
2725 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
2732 printf("port index %d invalid\n", port_id);
2735 printf("operation not supported by device\n");
2738 printf("operation failed - diag=%d\n", diag);
2744 * Setup forwarding configuration for each logical core.
2747 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
2749 streamid_t nb_fs_per_lcore;
2757 nb_fs = cfg->nb_fwd_streams;
2758 nb_fc = cfg->nb_fwd_lcores;
2759 if (nb_fs <= nb_fc) {
2760 nb_fs_per_lcore = 1;
2763 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2764 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2767 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2769 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2770 fwd_lcores[lc_id]->stream_idx = sm_id;
2771 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2772 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2776 * Assign extra remaining streams, if any.
2778 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2779 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2780 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2781 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2782 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2787 fwd_topology_tx_port_get(portid_t rxp)
2789 static int warning_once = 1;
2791 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2793 switch (port_topology) {
2795 case PORT_TOPOLOGY_PAIRED:
2796 if ((rxp & 0x1) == 0) {
2797 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2800 printf("\nWarning! port-topology=paired"
2801 " and odd forward ports number,"
2802 " the last port will pair with"
2809 case PORT_TOPOLOGY_CHAINED:
2810 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2811 case PORT_TOPOLOGY_LOOP:
2817 simple_fwd_config_setup(void)
2821 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2822 cur_fwd_config.nb_fwd_streams =
2823 (streamid_t) cur_fwd_config.nb_fwd_ports;
2825 /* reinitialize forwarding streams */
2829 * In the simple forwarding test, the number of forwarding cores
2830 * must be lower or equal to the number of forwarding ports.
2832 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2833 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2834 cur_fwd_config.nb_fwd_lcores =
2835 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2836 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2838 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2839 fwd_streams[i]->rx_port = fwd_ports_ids[i];
2840 fwd_streams[i]->rx_queue = 0;
2841 fwd_streams[i]->tx_port =
2842 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2843 fwd_streams[i]->tx_queue = 0;
2844 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2845 fwd_streams[i]->retry_enabled = retry_enabled;
2850 * For the RSS forwarding test all streams distributed over lcores. Each stream
2851 * being composed of a RX queue to poll on a RX port for input messages,
2852 * associated with a TX queue of a TX port where to send forwarded packets.
2855 rss_fwd_config_setup(void)
2866 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2867 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2868 cur_fwd_config.nb_fwd_streams =
2869 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2871 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2872 cur_fwd_config.nb_fwd_lcores =
2873 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2875 /* reinitialize forwarding streams */
2878 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2880 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2881 struct fwd_stream *fs;
2883 fs = fwd_streams[sm_id];
2884 txp = fwd_topology_tx_port_get(rxp);
2885 fs->rx_port = fwd_ports_ids[rxp];
2887 fs->tx_port = fwd_ports_ids[txp];
2889 fs->peer_addr = fs->tx_port;
2890 fs->retry_enabled = retry_enabled;
2892 if (rxp < nb_fwd_ports)
2900 * For the DCB forwarding test, each core is assigned on each traffic class.
2902 * Each core is assigned a multi-stream, each stream being composed of
2903 * a RX queue to poll on a RX port for input messages, associated with
2904 * a TX queue of a TX port where to send forwarded packets. All RX and
2905 * TX queues are mapping to the same traffic class.
2906 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2910 dcb_fwd_config_setup(void)
2912 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2913 portid_t txp, rxp = 0;
2914 queueid_t txq, rxq = 0;
2916 uint16_t nb_rx_queue, nb_tx_queue;
2917 uint16_t i, j, k, sm_id = 0;
2920 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2921 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2922 cur_fwd_config.nb_fwd_streams =
2923 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2925 /* reinitialize forwarding streams */
2929 /* get the dcb info on the first RX and TX ports */
2930 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2931 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2933 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2934 fwd_lcores[lc_id]->stream_nb = 0;
2935 fwd_lcores[lc_id]->stream_idx = sm_id;
2936 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2937 /* if the nb_queue is zero, means this tc is
2938 * not enabled on the POOL
2940 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2942 k = fwd_lcores[lc_id]->stream_nb +
2943 fwd_lcores[lc_id]->stream_idx;
2944 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2945 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2946 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2947 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2948 for (j = 0; j < nb_rx_queue; j++) {
2949 struct fwd_stream *fs;
2951 fs = fwd_streams[k + j];
2952 fs->rx_port = fwd_ports_ids[rxp];
2953 fs->rx_queue = rxq + j;
2954 fs->tx_port = fwd_ports_ids[txp];
2955 fs->tx_queue = txq + j % nb_tx_queue;
2956 fs->peer_addr = fs->tx_port;
2957 fs->retry_enabled = retry_enabled;
2959 fwd_lcores[lc_id]->stream_nb +=
2960 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2962 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2965 if (tc < rxp_dcb_info.nb_tcs)
2967 /* Restart from TC 0 on next RX port */
2969 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2971 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2974 if (rxp >= nb_fwd_ports)
2976 /* get the dcb information on next RX and TX ports */
2977 if ((rxp & 0x1) == 0)
2978 txp = (portid_t) (rxp + 1);
2980 txp = (portid_t) (rxp - 1);
2981 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2982 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2987 icmp_echo_config_setup(void)
2994 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2995 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2996 (nb_txq * nb_fwd_ports);
2998 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2999 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3000 cur_fwd_config.nb_fwd_streams =
3001 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
3002 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
3003 cur_fwd_config.nb_fwd_lcores =
3004 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
3005 if (verbose_level > 0) {
3006 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
3008 cur_fwd_config.nb_fwd_lcores,
3009 cur_fwd_config.nb_fwd_ports,
3010 cur_fwd_config.nb_fwd_streams);
3013 /* reinitialize forwarding streams */
3015 setup_fwd_config_of_each_lcore(&cur_fwd_config);
3017 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
3018 if (verbose_level > 0)
3019 printf(" core=%d: \n", lc_id);
3020 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3021 struct fwd_stream *fs;
3022 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3023 fs->rx_port = fwd_ports_ids[rxp];
3025 fs->tx_port = fs->rx_port;
3027 fs->peer_addr = fs->tx_port;
3028 fs->retry_enabled = retry_enabled;
3029 if (verbose_level > 0)
3030 printf(" stream=%d port=%d rxq=%d txq=%d\n",
3031 sm_id, fs->rx_port, fs->rx_queue,
3033 rxq = (queueid_t) (rxq + 1);
3034 if (rxq == nb_rxq) {
3036 rxp = (portid_t) (rxp + 1);
3043 fwd_config_setup(void)
3045 cur_fwd_config.fwd_eng = cur_fwd_eng;
3046 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
3047 icmp_echo_config_setup();
3051 if ((nb_rxq > 1) && (nb_txq > 1)){
3053 dcb_fwd_config_setup();
3055 rss_fwd_config_setup();
3058 simple_fwd_config_setup();
3062 mp_alloc_to_str(uint8_t mode)
3065 case MP_ALLOC_NATIVE:
3071 case MP_ALLOC_XMEM_HUGE:
3081 pkt_fwd_config_display(struct fwd_config *cfg)
3083 struct fwd_stream *fs;
3087 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
3088 "NUMA support %s, MP allocation mode: %s\n",
3089 cfg->fwd_eng->fwd_mode_name,
3090 retry_enabled == 0 ? "" : " with retry",
3091 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
3092 numa_support == 1 ? "enabled" : "disabled",
3093 mp_alloc_to_str(mp_alloc_type));
3096 printf("TX retry num: %u, delay between TX retries: %uus\n",
3097 burst_tx_retry_num, burst_tx_delay_time);
3098 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
3099 printf("Logical Core %u (socket %u) forwards packets on "
3101 fwd_lcores_cpuids[lc_id],
3102 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
3103 fwd_lcores[lc_id]->stream_nb);
3104 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3105 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3106 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
3107 "P=%d/Q=%d (socket %u) ",
3108 fs->rx_port, fs->rx_queue,
3109 ports[fs->rx_port].socket_id,
3110 fs->tx_port, fs->tx_queue,
3111 ports[fs->tx_port].socket_id);
3112 print_ethaddr("peer=",
3113 &peer_eth_addrs[fs->peer_addr]);
3121 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
3123 struct rte_ether_addr new_peer_addr;
3124 if (!rte_eth_dev_is_valid_port(port_id)) {
3125 printf("Error: Invalid port number %i\n", port_id);
3128 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
3129 printf("Error: Invalid ethernet address: %s\n", peer_addr);
3132 peer_eth_addrs[port_id] = new_peer_addr;
3136 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
3139 unsigned int lcore_cpuid;
3144 for (i = 0; i < nb_lc; i++) {
3145 lcore_cpuid = lcorelist[i];
3146 if (! rte_lcore_is_enabled(lcore_cpuid)) {
3147 printf("lcore %u not enabled\n", lcore_cpuid);
3150 if (lcore_cpuid == rte_get_main_lcore()) {
3151 printf("lcore %u cannot be masked on for running "
3152 "packet forwarding, which is the main lcore "
3153 "and reserved for command line parsing only\n",
3158 fwd_lcores_cpuids[i] = lcore_cpuid;
3160 if (record_now == 0) {
3164 nb_cfg_lcores = (lcoreid_t) nb_lc;
3165 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
3166 printf("previous number of forwarding cores %u - changed to "
3167 "number of configured cores %u\n",
3168 (unsigned int) nb_fwd_lcores, nb_lc);
3169 nb_fwd_lcores = (lcoreid_t) nb_lc;
3176 set_fwd_lcores_mask(uint64_t lcoremask)
3178 unsigned int lcorelist[64];
3182 if (lcoremask == 0) {
3183 printf("Invalid NULL mask of cores\n");
3187 for (i = 0; i < 64; i++) {
3188 if (! ((uint64_t)(1ULL << i) & lcoremask))
3190 lcorelist[nb_lc++] = i;
3192 return set_fwd_lcores_list(lcorelist, nb_lc);
3196 set_fwd_lcores_number(uint16_t nb_lc)
3198 if (test_done == 0) {
3199 printf("Please stop forwarding first\n");
3202 if (nb_lc > nb_cfg_lcores) {
3203 printf("nb fwd cores %u > %u (max. number of configured "
3204 "lcores) - ignored\n",
3205 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
3208 nb_fwd_lcores = (lcoreid_t) nb_lc;
3209 printf("Number of forwarding cores set to %u\n",
3210 (unsigned int) nb_fwd_lcores);
3214 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
3222 for (i = 0; i < nb_pt; i++) {
3223 port_id = (portid_t) portlist[i];
3224 if (port_id_is_invalid(port_id, ENABLED_WARN))
3227 fwd_ports_ids[i] = port_id;
3229 if (record_now == 0) {
3233 nb_cfg_ports = (portid_t) nb_pt;
3234 if (nb_fwd_ports != (portid_t) nb_pt) {
3235 printf("previous number of forwarding ports %u - changed to "
3236 "number of configured ports %u\n",
3237 (unsigned int) nb_fwd_ports, nb_pt);
3238 nb_fwd_ports = (portid_t) nb_pt;
3243 * Parse the user input and obtain the list of forwarding ports
3246 * String containing the user input. User can specify
3247 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
3248 * For example, if the user wants to use all the available
3249 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3.
3250 * If the user wants to use only the ports 1,2 then the input
3252 * valid characters are '-' and ','
3253 * @param[out] values
3254 * This array will be filled with a list of port IDs
3255 * based on the user input
3256 * Note that duplicate entries are discarded and only the first
3257 * count entries in this array are port IDs and all the rest
3258 * will contain default values
3259 * @param[in] maxsize
3260 * This parameter denotes 2 things
3261 * 1) Number of elements in the values array
3262 * 2) Maximum value of each element in the values array
3264 * On success, returns total count of parsed port IDs
3265 * On failure, returns 0
3268 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
3270 unsigned int count = 0;
3274 unsigned int marked[maxsize];
3276 if (list == NULL || values == NULL)
3279 for (i = 0; i < (int)maxsize; i++)
3285 /*Remove the blank spaces if any*/
3286 while (isblank(*list))
3291 value = strtol(list, &end, 10);
3292 if (errno || end == NULL)
3294 if (value < 0 || value >= (int)maxsize)
3296 while (isblank(*end))
3298 if (*end == '-' && min == INT_MAX) {
3300 } else if ((*end == ',') || (*end == '\0')) {
3304 for (i = min; i <= max; i++) {
3305 if (count < maxsize) {
3317 } while (*end != '\0');
3323 parse_fwd_portlist(const char *portlist)
3325 unsigned int portcount;
3326 unsigned int portindex[RTE_MAX_ETHPORTS];
3327 unsigned int i, valid_port_count = 0;
3329 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
3331 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
3334 * Here we verify the validity of the ports
3335 * and thereby calculate the total number of
3338 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
3339 if (rte_eth_dev_is_valid_port(portindex[i])) {
3340 portindex[valid_port_count] = portindex[i];
3345 set_fwd_ports_list(portindex, valid_port_count);
3349 set_fwd_ports_mask(uint64_t portmask)
3351 unsigned int portlist[64];
3355 if (portmask == 0) {
3356 printf("Invalid NULL mask of ports\n");
3360 RTE_ETH_FOREACH_DEV(i) {
3361 if (! ((uint64_t)(1ULL << i) & portmask))
3363 portlist[nb_pt++] = i;
3365 set_fwd_ports_list(portlist, nb_pt);
3369 set_fwd_ports_number(uint16_t nb_pt)
3371 if (nb_pt > nb_cfg_ports) {
3372 printf("nb fwd ports %u > %u (number of configured "
3373 "ports) - ignored\n",
3374 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
3377 nb_fwd_ports = (portid_t) nb_pt;
3378 printf("Number of forwarding ports set to %u\n",
3379 (unsigned int) nb_fwd_ports);
3383 port_is_forwarding(portid_t port_id)
3387 if (port_id_is_invalid(port_id, ENABLED_WARN))
3390 for (i = 0; i < nb_fwd_ports; i++) {
3391 if (fwd_ports_ids[i] == port_id)
3399 set_nb_pkt_per_burst(uint16_t nb)
3401 if (nb > MAX_PKT_BURST) {
3402 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
3404 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
3407 nb_pkt_per_burst = nb;
3408 printf("Number of packets per burst set to %u\n",
3409 (unsigned int) nb_pkt_per_burst);
3413 tx_split_get_name(enum tx_pkt_split split)
3417 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3418 if (tx_split_name[i].split == split)
3419 return tx_split_name[i].name;
3425 set_tx_pkt_split(const char *name)
3429 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3430 if (strcmp(tx_split_name[i].name, name) == 0) {
3431 tx_pkt_split = tx_split_name[i].split;
3435 printf("unknown value: \"%s\"\n", name);
3439 parse_fec_mode(const char *name, uint32_t *mode)
3443 for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
3444 if (strcmp(fec_mode_name[i].name, name) == 0) {
3445 *mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
3453 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa)
3457 printf("FEC capabilities:\n");
3459 for (i = 0; i < num; i++) {
3461 rte_eth_link_speed_to_str(speed_fec_capa[i].speed));
3463 for (j = 0; j < RTE_DIM(fec_mode_name); j++) {
3464 if (RTE_ETH_FEC_MODE_TO_CAPA(j) &
3465 speed_fec_capa[i].capa)
3466 printf("%s ", fec_mode_name[j].name);
3473 show_rx_pkt_offsets(void)
3478 printf("Number of offsets: %u\n", n);
3480 printf("Segment offsets: ");
3481 for (i = 0; i != n - 1; i++)
3482 printf("%hu,", rx_pkt_seg_offsets[i]);
3483 printf("%hu\n", rx_pkt_seg_lengths[i]);
3488 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs)
3492 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) {
3493 printf("nb segments per RX packets=%u >= "
3494 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs);
3499 * No extra check here, the segment length will be checked by PMD
3500 * in the extended queue setup.
3502 for (i = 0; i < nb_offs; i++) {
3503 if (seg_offsets[i] >= UINT16_MAX) {
3504 printf("offset[%u]=%u > UINT16_MAX - give up\n",
3510 for (i = 0; i < nb_offs; i++)
3511 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i];
3513 rx_pkt_nb_offs = (uint8_t) nb_offs;
3517 show_rx_pkt_segments(void)
3522 printf("Number of segments: %u\n", n);
3524 printf("Segment sizes: ");
3525 for (i = 0; i != n - 1; i++)
3526 printf("%hu,", rx_pkt_seg_lengths[i]);
3527 printf("%hu\n", rx_pkt_seg_lengths[i]);
3532 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
3536 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) {
3537 printf("nb segments per RX packets=%u >= "
3538 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs);
3543 * No extra check here, the segment length will be checked by PMD
3544 * in the extended queue setup.
3546 for (i = 0; i < nb_segs; i++) {
3547 if (seg_lengths[i] >= UINT16_MAX) {
3548 printf("length[%u]=%u > UINT16_MAX - give up\n",
3554 for (i = 0; i < nb_segs; i++)
3555 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
3557 rx_pkt_nb_segs = (uint8_t) nb_segs;
3561 show_tx_pkt_segments(void)
3567 split = tx_split_get_name(tx_pkt_split);
3569 printf("Number of segments: %u\n", n);
3570 printf("Segment sizes: ");
3571 for (i = 0; i != n - 1; i++)
3572 printf("%hu,", tx_pkt_seg_lengths[i]);
3573 printf("%hu\n", tx_pkt_seg_lengths[i]);
3574 printf("Split packet: %s\n", split);
3578 nb_segs_is_invalid(unsigned int nb_segs)
3585 RTE_ETH_FOREACH_DEV(port_id) {
3586 for (queue_id = 0; queue_id < nb_txq; queue_id++) {
3587 ret = get_tx_ring_size(port_id, queue_id, &ring_size);
3592 if (ring_size < nb_segs) {
3593 printf("nb segments per TX packets=%u >= "
3594 "TX queue(%u) ring_size=%u - ignored\n",
3595 nb_segs, queue_id, ring_size);
3605 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
3607 uint16_t tx_pkt_len;
3610 if (nb_segs_is_invalid(nb_segs))
3614 * Check that each segment length is greater or equal than
3615 * the mbuf data sise.
3616 * Check also that the total packet length is greater or equal than the
3617 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
3621 for (i = 0; i < nb_segs; i++) {
3622 if (seg_lengths[i] > mbuf_data_size[0]) {
3623 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
3624 i, seg_lengths[i], mbuf_data_size[0]);
3627 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
3629 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
3630 printf("total packet length=%u < %d - give up\n",
3631 (unsigned) tx_pkt_len,
3632 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
3636 for (i = 0; i < nb_segs; i++)
3637 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
3639 tx_pkt_length = tx_pkt_len;
3640 tx_pkt_nb_segs = (uint8_t) nb_segs;
3644 show_tx_pkt_times(void)
3646 printf("Interburst gap: %u\n", tx_pkt_times_inter);
3647 printf("Intraburst gap: %u\n", tx_pkt_times_intra);
3651 set_tx_pkt_times(unsigned int *tx_times)
3653 tx_pkt_times_inter = tx_times[0];
3654 tx_pkt_times_intra = tx_times[1];
3658 setup_gro(const char *onoff, portid_t port_id)
3660 if (!rte_eth_dev_is_valid_port(port_id)) {
3661 printf("invalid port id %u\n", port_id);
3664 if (test_done == 0) {
3665 printf("Before enable/disable GRO,"
3666 " please stop forwarding first\n");
3669 if (strcmp(onoff, "on") == 0) {
3670 if (gro_ports[port_id].enable != 0) {
3671 printf("Port %u has enabled GRO. Please"
3672 " disable GRO first\n", port_id);
3675 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3676 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
3677 gro_ports[port_id].param.max_flow_num =
3678 GRO_DEFAULT_FLOW_NUM;
3679 gro_ports[port_id].param.max_item_per_flow =
3680 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
3682 gro_ports[port_id].enable = 1;
3684 if (gro_ports[port_id].enable == 0) {
3685 printf("Port %u has disabled GRO\n", port_id);
3688 gro_ports[port_id].enable = 0;
3693 setup_gro_flush_cycles(uint8_t cycles)
3695 if (test_done == 0) {
3696 printf("Before change flush interval for GRO,"
3697 " please stop forwarding first.\n");
3701 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
3702 GRO_DEFAULT_FLUSH_CYCLES) {
3703 printf("The flushing cycle be in the range"
3704 " of 1 to %u. Revert to the default"
3706 GRO_MAX_FLUSH_CYCLES,
3707 GRO_DEFAULT_FLUSH_CYCLES);
3708 cycles = GRO_DEFAULT_FLUSH_CYCLES;
3711 gro_flush_cycles = cycles;
3715 show_gro(portid_t port_id)
3717 struct rte_gro_param *param;
3718 uint32_t max_pkts_num;
3720 param = &gro_ports[port_id].param;
3722 if (!rte_eth_dev_is_valid_port(port_id)) {
3723 printf("Invalid port id %u.\n", port_id);
3726 if (gro_ports[port_id].enable) {
3727 printf("GRO type: TCP/IPv4\n");
3728 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3729 max_pkts_num = param->max_flow_num *
3730 param->max_item_per_flow;
3732 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
3733 printf("Max number of packets to perform GRO: %u\n",
3735 printf("Flushing cycles: %u\n", gro_flush_cycles);
3737 printf("Port %u doesn't enable GRO.\n", port_id);
3741 setup_gso(const char *mode, portid_t port_id)
3743 if (!rte_eth_dev_is_valid_port(port_id)) {
3744 printf("invalid port id %u\n", port_id);
3747 if (strcmp(mode, "on") == 0) {
3748 if (test_done == 0) {
3749 printf("before enabling GSO,"
3750 " please stop forwarding first\n");
3753 gso_ports[port_id].enable = 1;
3754 } else if (strcmp(mode, "off") == 0) {
3755 if (test_done == 0) {
3756 printf("before disabling GSO,"
3757 " please stop forwarding first\n");
3760 gso_ports[port_id].enable = 0;
3765 list_pkt_forwarding_modes(void)
3767 static char fwd_modes[128] = "";
3768 const char *separator = "|";
3769 struct fwd_engine *fwd_eng;
3772 if (strlen (fwd_modes) == 0) {
3773 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3774 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3775 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3776 strncat(fwd_modes, separator,
3777 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3779 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3786 list_pkt_forwarding_retry_modes(void)
3788 static char fwd_modes[128] = "";
3789 const char *separator = "|";
3790 struct fwd_engine *fwd_eng;
3793 if (strlen(fwd_modes) == 0) {
3794 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3795 if (fwd_eng == &rx_only_engine)
3797 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3799 strlen(fwd_modes) - 1);
3800 strncat(fwd_modes, separator,
3802 strlen(fwd_modes) - 1);
3804 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3811 set_pkt_forwarding_mode(const char *fwd_mode_name)
3813 struct fwd_engine *fwd_eng;
3817 while ((fwd_eng = fwd_engines[i]) != NULL) {
3818 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
3819 printf("Set %s packet forwarding mode%s\n",
3821 retry_enabled == 0 ? "" : " with retry");
3822 cur_fwd_eng = fwd_eng;
3827 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
3831 add_rx_dump_callbacks(portid_t portid)
3833 struct rte_eth_dev_info dev_info;
3837 if (port_id_is_invalid(portid, ENABLED_WARN))
3840 ret = eth_dev_info_get_print_err(portid, &dev_info);
3844 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3845 if (!ports[portid].rx_dump_cb[queue])
3846 ports[portid].rx_dump_cb[queue] =
3847 rte_eth_add_rx_callback(portid, queue,
3848 dump_rx_pkts, NULL);
3852 add_tx_dump_callbacks(portid_t portid)
3854 struct rte_eth_dev_info dev_info;
3858 if (port_id_is_invalid(portid, ENABLED_WARN))
3861 ret = eth_dev_info_get_print_err(portid, &dev_info);
3865 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3866 if (!ports[portid].tx_dump_cb[queue])
3867 ports[portid].tx_dump_cb[queue] =
3868 rte_eth_add_tx_callback(portid, queue,
3869 dump_tx_pkts, NULL);
3873 remove_rx_dump_callbacks(portid_t portid)
3875 struct rte_eth_dev_info dev_info;
3879 if (port_id_is_invalid(portid, ENABLED_WARN))
3882 ret = eth_dev_info_get_print_err(portid, &dev_info);
3886 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3887 if (ports[portid].rx_dump_cb[queue]) {
3888 rte_eth_remove_rx_callback(portid, queue,
3889 ports[portid].rx_dump_cb[queue]);
3890 ports[portid].rx_dump_cb[queue] = NULL;
3895 remove_tx_dump_callbacks(portid_t portid)
3897 struct rte_eth_dev_info dev_info;
3901 if (port_id_is_invalid(portid, ENABLED_WARN))
3904 ret = eth_dev_info_get_print_err(portid, &dev_info);
3908 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3909 if (ports[portid].tx_dump_cb[queue]) {
3910 rte_eth_remove_tx_callback(portid, queue,
3911 ports[portid].tx_dump_cb[queue]);
3912 ports[portid].tx_dump_cb[queue] = NULL;
3917 configure_rxtx_dump_callbacks(uint16_t verbose)
3921 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3922 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
3926 RTE_ETH_FOREACH_DEV(portid)
3928 if (verbose == 1 || verbose > 2)
3929 add_rx_dump_callbacks(portid);
3931 remove_rx_dump_callbacks(portid);
3933 add_tx_dump_callbacks(portid);
3935 remove_tx_dump_callbacks(portid);
3940 set_verbose_level(uint16_t vb_level)
3942 printf("Change verbose level from %u to %u\n",
3943 (unsigned int) verbose_level, (unsigned int) vb_level);
3944 verbose_level = vb_level;
3945 configure_rxtx_dump_callbacks(verbose_level);
3949 vlan_extend_set(portid_t port_id, int on)
3953 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3955 if (port_id_is_invalid(port_id, ENABLED_WARN))
3958 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3961 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
3962 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3964 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
3965 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3968 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3970 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
3971 "diag=%d\n", port_id, on, diag);
3974 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3978 rx_vlan_strip_set(portid_t port_id, int on)
3982 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3984 if (port_id_is_invalid(port_id, ENABLED_WARN))
3987 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3990 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
3991 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3993 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
3994 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3997 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3999 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
4000 "diag=%d\n", port_id, on, diag);
4003 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4007 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
4011 if (port_id_is_invalid(port_id, ENABLED_WARN))
4014 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
4016 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
4017 "diag=%d\n", port_id, queue_id, on, diag);
4021 rx_vlan_filter_set(portid_t port_id, int on)
4025 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4027 if (port_id_is_invalid(port_id, ENABLED_WARN))
4030 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4033 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
4034 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
4036 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
4037 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
4040 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4042 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
4043 "diag=%d\n", port_id, on, diag);
4046 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4050 rx_vlan_qinq_strip_set(portid_t port_id, int on)
4054 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4056 if (port_id_is_invalid(port_id, ENABLED_WARN))
4059 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4062 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
4063 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
4065 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
4066 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
4069 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4071 printf("%s(port_pi=%d, on=%d) failed "
4072 "diag=%d\n", __func__, port_id, on, diag);
4075 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4079 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
4083 if (port_id_is_invalid(port_id, ENABLED_WARN))
4085 if (vlan_id_is_invalid(vlan_id))
4087 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
4090 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
4092 port_id, vlan_id, on, diag);
4097 rx_vlan_all_filter_set(portid_t port_id, int on)
4101 if (port_id_is_invalid(port_id, ENABLED_WARN))
4103 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
4104 if (rx_vft_set(port_id, vlan_id, on))
4110 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
4114 if (port_id_is_invalid(port_id, ENABLED_WARN))
4117 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
4121 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
4123 port_id, vlan_type, tp_id, diag);
4127 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
4129 struct rte_eth_dev_info dev_info;
4132 if (vlan_id_is_invalid(vlan_id))
4135 if (ports[port_id].dev_conf.txmode.offloads &
4136 DEV_TX_OFFLOAD_QINQ_INSERT) {
4137 printf("Error, as QinQ has been enabled.\n");
4141 ret = eth_dev_info_get_print_err(port_id, &dev_info);
4145 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
4146 printf("Error: vlan insert is not supported by port %d\n",
4151 tx_vlan_reset(port_id);
4152 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
4153 ports[port_id].tx_vlan_id = vlan_id;
4157 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
4159 struct rte_eth_dev_info dev_info;
4162 if (vlan_id_is_invalid(vlan_id))
4164 if (vlan_id_is_invalid(vlan_id_outer))
4167 ret = eth_dev_info_get_print_err(port_id, &dev_info);
4171 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
4172 printf("Error: qinq insert not supported by port %d\n",
4177 tx_vlan_reset(port_id);
4178 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
4179 DEV_TX_OFFLOAD_QINQ_INSERT);
4180 ports[port_id].tx_vlan_id = vlan_id;
4181 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
4185 tx_vlan_reset(portid_t port_id)
4187 ports[port_id].dev_conf.txmode.offloads &=
4188 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
4189 DEV_TX_OFFLOAD_QINQ_INSERT);
4190 ports[port_id].tx_vlan_id = 0;
4191 ports[port_id].tx_vlan_id_outer = 0;
4195 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
4197 if (port_id_is_invalid(port_id, ENABLED_WARN))
4200 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
4204 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
4208 if (port_id_is_invalid(port_id, ENABLED_WARN))
4211 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
4214 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
4215 printf("map_value not in required range 0..%d\n",
4216 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
4220 if (!is_rx) { /* tx */
4221 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id,
4224 printf("failed to set tx queue stats mapping.\n");
4228 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id,
4231 printf("failed to set rx queue stats mapping.\n");
4238 set_xstats_hide_zero(uint8_t on_off)
4240 xstats_hide_zero = on_off;
4244 set_record_core_cycles(uint8_t on_off)
4246 record_core_cycles = on_off;
4250 set_record_burst_stats(uint8_t on_off)
4252 record_burst_stats = on_off;
4256 print_fdir_mask(struct rte_eth_fdir_masks *mask)
4258 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
4260 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4261 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
4262 " tunnel_id: 0x%08x",
4263 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
4264 rte_be_to_cpu_32(mask->tunnel_id_mask));
4265 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
4266 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
4267 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
4268 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
4270 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
4271 rte_be_to_cpu_16(mask->src_port_mask),
4272 rte_be_to_cpu_16(mask->dst_port_mask));
4274 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4275 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
4276 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
4277 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
4278 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
4280 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4281 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
4282 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
4283 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
4284 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
4291 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4293 struct rte_eth_flex_payload_cfg *cfg;
4296 for (i = 0; i < flex_conf->nb_payloads; i++) {
4297 cfg = &flex_conf->flex_set[i];
4298 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
4300 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
4301 printf("\n L2_PAYLOAD: ");
4302 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
4303 printf("\n L3_PAYLOAD: ");
4304 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
4305 printf("\n L4_PAYLOAD: ");
4307 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
4308 for (j = 0; j < num; j++)
4309 printf(" %-5u", cfg->src_offset[j]);
4315 flowtype_to_str(uint16_t flow_type)
4317 struct flow_type_info {
4323 static struct flow_type_info flowtype_str_table[] = {
4324 {"raw", RTE_ETH_FLOW_RAW},
4325 {"ipv4", RTE_ETH_FLOW_IPV4},
4326 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
4327 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
4328 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
4329 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
4330 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
4331 {"ipv6", RTE_ETH_FLOW_IPV6},
4332 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
4333 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
4334 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
4335 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
4336 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
4337 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
4338 {"port", RTE_ETH_FLOW_PORT},
4339 {"vxlan", RTE_ETH_FLOW_VXLAN},
4340 {"geneve", RTE_ETH_FLOW_GENEVE},
4341 {"nvgre", RTE_ETH_FLOW_NVGRE},
4342 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
4345 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
4346 if (flowtype_str_table[i].ftype == flow_type)
4347 return flowtype_str_table[i].str;
4353 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
4356 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4358 struct rte_eth_fdir_flex_mask *mask;
4362 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
4363 mask = &flex_conf->flex_mask[i];
4364 p = flowtype_to_str(mask->flow_type);
4365 printf("\n %s:\t", p ? p : "unknown");
4366 for (j = 0; j < num; j++)
4367 printf(" %02x", mask->mask[j]);
4373 print_fdir_flow_type(uint32_t flow_types_mask)
4378 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
4379 if (!(flow_types_mask & (1 << i)))
4381 p = flowtype_to_str(i);
4391 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
4392 struct rte_eth_fdir_stats *fdir_stat)
4397 if (ret == -ENOTSUP) {
4398 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
4400 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
4403 #ifdef RTE_NET_IXGBE
4404 if (ret == -ENOTSUP) {
4405 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
4407 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
4414 printf("\n FDIR is not supported on port %-2d\n",
4418 printf("programming error: (%s)\n", strerror(-ret));
4425 fdir_get_infos(portid_t port_id)
4427 struct rte_eth_fdir_stats fdir_stat;
4428 struct rte_eth_fdir_info fdir_info;
4430 static const char *fdir_stats_border = "########################";
4432 if (port_id_is_invalid(port_id, ENABLED_WARN))
4435 memset(&fdir_info, 0, sizeof(fdir_info));
4436 memset(&fdir_stat, 0, sizeof(fdir_stat));
4437 if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
4440 printf("\n %s FDIR infos for port %-2d %s\n",
4441 fdir_stats_border, port_id, fdir_stats_border);
4443 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
4444 printf(" PERFECT\n");
4445 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
4446 printf(" PERFECT-MAC-VLAN\n");
4447 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4448 printf(" PERFECT-TUNNEL\n");
4449 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
4450 printf(" SIGNATURE\n");
4452 printf(" DISABLE\n");
4453 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
4454 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
4455 printf(" SUPPORTED FLOW TYPE: ");
4456 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
4458 printf(" FLEX PAYLOAD INFO:\n");
4459 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
4460 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
4461 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
4462 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
4463 fdir_info.flex_payload_unit,
4464 fdir_info.max_flex_payload_segment_num,
4465 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
4467 print_fdir_mask(&fdir_info.mask);
4468 if (fdir_info.flex_conf.nb_payloads > 0) {
4469 printf(" FLEX PAYLOAD SRC OFFSET:");
4470 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4472 if (fdir_info.flex_conf.nb_flexmasks > 0) {
4473 printf(" FLEX MASK CFG:");
4474 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4476 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
4477 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
4478 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
4479 fdir_info.guarant_spc, fdir_info.best_spc);
4480 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
4481 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
4482 " add: %-10"PRIu64" remove: %"PRIu64"\n"
4483 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
4484 fdir_stat.collision, fdir_stat.free,
4485 fdir_stat.maxhash, fdir_stat.maxlen,
4486 fdir_stat.add, fdir_stat.remove,
4487 fdir_stat.f_add, fdir_stat.f_remove);
4488 printf(" %s############################%s\n",
4489 fdir_stats_border, fdir_stats_border);
4492 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */
4495 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
4497 struct rte_port *port;
4498 struct rte_eth_fdir_flex_conf *flex_conf;
4501 port = &ports[port_id];
4502 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4503 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
4504 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
4509 if (i >= RTE_ETH_FLOW_MAX) {
4510 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
4511 idx = flex_conf->nb_flexmasks;
4512 flex_conf->nb_flexmasks++;
4514 printf("The flex mask table is full. Can not set flex"
4515 " mask for flow_type(%u).", cfg->flow_type);
4519 rte_memcpy(&flex_conf->flex_mask[idx],
4521 sizeof(struct rte_eth_fdir_flex_mask));
4525 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
4527 struct rte_port *port;
4528 struct rte_eth_fdir_flex_conf *flex_conf;
4531 port = &ports[port_id];
4532 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4533 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
4534 if (cfg->type == flex_conf->flex_set[i].type) {
4539 if (i >= RTE_ETH_PAYLOAD_MAX) {
4540 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
4541 idx = flex_conf->nb_payloads;
4542 flex_conf->nb_payloads++;
4544 printf("The flex payload table is full. Can not set"
4545 " flex payload for type(%u).", cfg->type);
4549 rte_memcpy(&flex_conf->flex_set[idx],
4551 sizeof(struct rte_eth_flex_payload_cfg));
4556 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
4558 #ifdef RTE_NET_IXGBE
4562 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
4564 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
4568 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
4569 is_rx ? "rx" : "tx", port_id, diag);
4572 printf("VF %s setting not supported for port %d\n",
4573 is_rx ? "Rx" : "Tx", port_id);
4579 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
4582 struct rte_eth_link link;
4585 if (port_id_is_invalid(port_id, ENABLED_WARN))
4587 ret = eth_link_get_nowait_print_err(port_id, &link);
4590 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
4591 rate > link.link_speed) {
4592 printf("Invalid rate value:%u bigger than link speed: %u\n",
4593 rate, link.link_speed);
4596 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
4599 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
4605 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
4607 int diag = -ENOTSUP;
4611 RTE_SET_USED(q_msk);
4613 #ifdef RTE_NET_IXGBE
4614 if (diag == -ENOTSUP)
4615 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
4619 if (diag == -ENOTSUP)
4620 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
4625 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
4631 * Functions to manage the set of filtered Multicast MAC addresses.
4633 * A pool of filtered multicast MAC addresses is associated with each port.
4634 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
4635 * The address of the pool and the number of valid multicast MAC addresses
4636 * recorded in the pool are stored in the fields "mc_addr_pool" and
4637 * "mc_addr_nb" of the "rte_port" data structure.
4639 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
4640 * to be supplied a contiguous array of multicast MAC addresses.
4641 * To comply with this constraint, the set of multicast addresses recorded
4642 * into the pool are systematically compacted at the beginning of the pool.
4643 * Hence, when a multicast address is removed from the pool, all following
4644 * addresses, if any, are copied back to keep the set contiguous.
4646 #define MCAST_POOL_INC 32
4649 mcast_addr_pool_extend(struct rte_port *port)
4651 struct rte_ether_addr *mc_pool;
4652 size_t mc_pool_size;
4655 * If a free entry is available at the end of the pool, just
4656 * increment the number of recorded multicast addresses.
4658 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
4664 * [re]allocate a pool with MCAST_POOL_INC more entries.
4665 * The previous test guarantees that port->mc_addr_nb is a multiple
4666 * of MCAST_POOL_INC.
4668 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
4670 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
4672 if (mc_pool == NULL) {
4673 printf("allocation of pool of %u multicast addresses failed\n",
4674 port->mc_addr_nb + MCAST_POOL_INC);
4678 port->mc_addr_pool = mc_pool;
4685 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
4687 if (mcast_addr_pool_extend(port) != 0)
4689 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
4693 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
4696 if (addr_idx == port->mc_addr_nb) {
4697 /* No need to recompact the set of multicast addressses. */
4698 if (port->mc_addr_nb == 0) {
4699 /* free the pool of multicast addresses. */
4700 free(port->mc_addr_pool);
4701 port->mc_addr_pool = NULL;
4705 memmove(&port->mc_addr_pool[addr_idx],
4706 &port->mc_addr_pool[addr_idx + 1],
4707 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
4711 eth_port_multicast_addr_list_set(portid_t port_id)
4713 struct rte_port *port;
4716 port = &ports[port_id];
4717 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
4720 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
4721 port_id, port->mc_addr_nb, diag);
4727 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
4729 struct rte_port *port;
4732 if (port_id_is_invalid(port_id, ENABLED_WARN))
4735 port = &ports[port_id];
4738 * Check that the added multicast MAC address is not already recorded
4739 * in the pool of multicast addresses.
4741 for (i = 0; i < port->mc_addr_nb; i++) {
4742 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
4743 printf("multicast address already filtered by port\n");
4748 mcast_addr_pool_append(port, mc_addr);
4749 if (eth_port_multicast_addr_list_set(port_id) < 0)
4750 /* Rollback on failure, remove the address from the pool */
4751 mcast_addr_pool_remove(port, i);
4755 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
4757 struct rte_port *port;
4760 if (port_id_is_invalid(port_id, ENABLED_WARN))
4763 port = &ports[port_id];
4766 * Search the pool of multicast MAC addresses for the removed address.
4768 for (i = 0; i < port->mc_addr_nb; i++) {
4769 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
4772 if (i == port->mc_addr_nb) {
4773 printf("multicast address not filtered by port %d\n", port_id);
4777 mcast_addr_pool_remove(port, i);
4778 if (eth_port_multicast_addr_list_set(port_id) < 0)
4779 /* Rollback on failure, add the address back into the pool */
4780 mcast_addr_pool_append(port, mc_addr);
4784 port_dcb_info_display(portid_t port_id)
4786 struct rte_eth_dcb_info dcb_info;
4789 static const char *border = "================";
4791 if (port_id_is_invalid(port_id, ENABLED_WARN))
4794 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
4796 printf("\n Failed to get dcb infos on port %-2d\n",
4800 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
4801 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
4803 for (i = 0; i < dcb_info.nb_tcs; i++)
4805 printf("\n Priority : ");
4806 for (i = 0; i < dcb_info.nb_tcs; i++)
4807 printf("\t%4d", dcb_info.prio_tc[i]);
4808 printf("\n BW percent :");
4809 for (i = 0; i < dcb_info.nb_tcs; i++)
4810 printf("\t%4d%%", dcb_info.tc_bws[i]);
4811 printf("\n RXQ base : ");
4812 for (i = 0; i < dcb_info.nb_tcs; i++)
4813 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
4814 printf("\n RXQ number :");
4815 for (i = 0; i < dcb_info.nb_tcs; i++)
4816 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
4817 printf("\n TXQ base : ");
4818 for (i = 0; i < dcb_info.nb_tcs; i++)
4819 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
4820 printf("\n TXQ number :");
4821 for (i = 0; i < dcb_info.nb_tcs; i++)
4822 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
4827 open_file(const char *file_path, uint32_t *size)
4829 int fd = open(file_path, O_RDONLY);
4831 uint8_t *buf = NULL;
4839 printf("%s: Failed to open %s\n", __func__, file_path);
4843 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
4845 printf("%s: File operations failed\n", __func__);
4849 pkg_size = st_buf.st_size;
4852 printf("%s: File operations failed\n", __func__);
4856 buf = (uint8_t *)malloc(pkg_size);
4859 printf("%s: Failed to malloc memory\n", __func__);
4863 ret = read(fd, buf, pkg_size);
4866 printf("%s: File read operation failed\n", __func__);
4880 save_file(const char *file_path, uint8_t *buf, uint32_t size)
4882 FILE *fh = fopen(file_path, "wb");
4885 printf("%s: Failed to open %s\n", __func__, file_path);
4889 if (fwrite(buf, 1, size, fh) != size) {
4891 printf("%s: File write operation failed\n", __func__);
4901 close_file(uint8_t *buf)
4912 port_queue_region_info_display(portid_t port_id, void *buf)
4916 struct rte_pmd_i40e_queue_regions *info =
4917 (struct rte_pmd_i40e_queue_regions *)buf;
4918 static const char *queue_region_info_stats_border = "-------";
4920 if (!info->queue_region_number)
4921 printf("there is no region has been set before");
4923 printf("\n %s All queue region info for port=%2d %s",
4924 queue_region_info_stats_border, port_id,
4925 queue_region_info_stats_border);
4926 printf("\n queue_region_number: %-14u \n",
4927 info->queue_region_number);
4929 for (i = 0; i < info->queue_region_number; i++) {
4930 printf("\n region_id: %-14u queue_number: %-14u "
4931 "queue_start_index: %-14u \n",
4932 info->region[i].region_id,
4933 info->region[i].queue_num,
4934 info->region[i].queue_start_index);
4936 printf(" user_priority_num is %-14u :",
4937 info->region[i].user_priority_num);
4938 for (j = 0; j < info->region[i].user_priority_num; j++)
4939 printf(" %-14u ", info->region[i].user_priority[j]);
4941 printf("\n flowtype_num is %-14u :",
4942 info->region[i].flowtype_num);
4943 for (j = 0; j < info->region[i].flowtype_num; j++)
4944 printf(" %-14u ", info->region[i].hw_flowtype[j]);
4947 RTE_SET_USED(port_id);
4955 show_macs(portid_t port_id)
4957 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4958 struct rte_eth_dev_info dev_info;
4959 struct rte_ether_addr *addr;
4960 uint32_t i, num_macs = 0;
4961 struct rte_eth_dev *dev;
4963 dev = &rte_eth_devices[port_id];
4965 if (eth_dev_info_get_print_err(port_id, &dev_info))
4968 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4969 addr = &dev->data->mac_addrs[i];
4971 /* skip zero address */
4972 if (rte_is_zero_ether_addr(addr))
4978 printf("Number of MAC address added: %d\n", num_macs);
4980 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4981 addr = &dev->data->mac_addrs[i];
4983 /* skip zero address */
4984 if (rte_is_zero_ether_addr(addr))
4987 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4988 printf(" %s\n", buf);
4993 show_mcast_macs(portid_t port_id)
4995 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4996 struct rte_ether_addr *addr;
4997 struct rte_port *port;
5000 port = &ports[port_id];
5002 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
5004 for (i = 0; i < port->mc_addr_nb; i++) {
5005 addr = &port->mc_addr_pool[i];
5007 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
5008 printf(" %s\n", buf);