1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
52 #include <rte_config.h>
56 static char *flowtype_to_str(uint16_t flow_type);
59 enum tx_pkt_split split;
63 .split = TX_PKT_SPLIT_OFF,
67 .split = TX_PKT_SPLIT_ON,
71 .split = TX_PKT_SPLIT_RND,
76 const struct rss_type_info rss_type_table[] = {
77 { "all", ETH_RSS_IP | ETH_RSS_TCP |
78 ETH_RSS_UDP | ETH_RSS_SCTP |
81 { "ipv4", ETH_RSS_IPV4 },
82 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
83 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
84 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
85 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
86 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
87 { "ipv6", ETH_RSS_IPV6 },
88 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
89 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
90 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
91 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
92 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
93 { "l2-payload", ETH_RSS_L2_PAYLOAD },
94 { "ipv6-ex", ETH_RSS_IPV6_EX },
95 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
96 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
97 { "port", ETH_RSS_PORT },
98 { "vxlan", ETH_RSS_VXLAN },
99 { "geneve", ETH_RSS_GENEVE },
100 { "nvgre", ETH_RSS_NVGRE },
101 { "ip", ETH_RSS_IP },
102 { "udp", ETH_RSS_UDP },
103 { "tcp", ETH_RSS_TCP },
104 { "sctp", ETH_RSS_SCTP },
105 { "tunnel", ETH_RSS_TUNNEL },
110 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
112 char buf[RTE_ETHER_ADDR_FMT_SIZE];
113 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
114 printf("%s%s", name, buf);
118 nic_stats_display(portid_t port_id)
120 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
121 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
122 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
123 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
124 static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
125 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
127 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
128 struct rte_eth_stats stats;
129 struct rte_port *port = &ports[port_id];
132 static const char *nic_stats_border = "########################";
134 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
138 rte_eth_stats_get(port_id, &stats);
139 printf("\n %s NIC statistics for port %-2d %s\n",
140 nic_stats_border, port_id, nic_stats_border);
142 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
143 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
145 stats.ipackets, stats.imissed, stats.ibytes);
146 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
147 printf(" RX-nombuf: %-10"PRIu64"\n",
149 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
151 stats.opackets, stats.oerrors, stats.obytes);
154 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
155 " RX-bytes: %10"PRIu64"\n",
156 stats.ipackets, stats.ierrors, stats.ibytes);
157 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
158 printf(" RX-nombuf: %10"PRIu64"\n",
160 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
161 " TX-bytes: %10"PRIu64"\n",
162 stats.opackets, stats.oerrors, stats.obytes);
165 if (port->rx_queue_stats_mapping_enabled) {
167 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
168 printf(" Stats reg %2d RX-packets: %10"PRIu64
169 " RX-errors: %10"PRIu64
170 " RX-bytes: %10"PRIu64"\n",
171 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
174 if (port->tx_queue_stats_mapping_enabled) {
176 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
177 printf(" Stats reg %2d TX-packets: %10"PRIu64
178 " TX-bytes: %10"PRIu64"\n",
179 i, stats.q_opackets[i], stats.q_obytes[i]);
183 diff_cycles = prev_cycles[port_id];
184 prev_cycles[port_id] = rte_rdtsc();
186 diff_cycles = prev_cycles[port_id] - diff_cycles;
188 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
189 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
190 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
191 (stats.opackets - prev_pkts_tx[port_id]) : 0;
192 prev_pkts_rx[port_id] = stats.ipackets;
193 prev_pkts_tx[port_id] = stats.opackets;
194 mpps_rx = diff_cycles > 0 ?
195 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
196 mpps_tx = diff_cycles > 0 ?
197 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
199 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
200 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
201 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
202 (stats.obytes - prev_bytes_tx[port_id]) : 0;
203 prev_bytes_rx[port_id] = stats.ibytes;
204 prev_bytes_tx[port_id] = stats.obytes;
205 mbps_rx = diff_cycles > 0 ?
206 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0;
207 mbps_tx = diff_cycles > 0 ?
208 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0;
210 printf("\n Throughput (since last show)\n");
211 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
212 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
213 mpps_tx, mbps_tx * 8);
215 printf(" %s############################%s\n",
216 nic_stats_border, nic_stats_border);
220 nic_stats_clear(portid_t port_id)
222 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
226 rte_eth_stats_reset(port_id);
227 printf("\n NIC statistics for port %d cleared\n", port_id);
231 nic_xstats_display(portid_t port_id)
233 struct rte_eth_xstat *xstats;
234 int cnt_xstats, idx_xstat;
235 struct rte_eth_xstat_name *xstats_names;
237 printf("###### NIC extended statistics for port %-2d\n", port_id);
238 if (!rte_eth_dev_is_valid_port(port_id)) {
239 printf("Error: Invalid port number %i\n", port_id);
244 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
245 if (cnt_xstats < 0) {
246 printf("Error: Cannot get count of xstats\n");
250 /* Get id-name lookup table */
251 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
252 if (xstats_names == NULL) {
253 printf("Cannot allocate memory for xstats lookup\n");
256 if (cnt_xstats != rte_eth_xstats_get_names(
257 port_id, xstats_names, cnt_xstats)) {
258 printf("Error: Cannot get xstats lookup\n");
263 /* Get stats themselves */
264 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
265 if (xstats == NULL) {
266 printf("Cannot allocate memory for xstats\n");
270 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
271 printf("Error: Unable to get xstats\n");
278 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
279 if (xstats_hide_zero && !xstats[idx_xstat].value)
281 printf("%s: %"PRIu64"\n",
282 xstats_names[idx_xstat].name,
283 xstats[idx_xstat].value);
290 nic_xstats_clear(portid_t port_id)
294 ret = rte_eth_xstats_reset(port_id);
296 printf("%s: Error: failed to reset xstats (port %u): %s",
297 __func__, port_id, strerror(ret));
302 nic_stats_mapping_display(portid_t port_id)
304 struct rte_port *port = &ports[port_id];
307 static const char *nic_stats_mapping_border = "########################";
309 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
314 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
315 printf("Port id %d - either does not support queue statistic mapping or"
316 " no queue statistic mapping set\n", port_id);
320 printf("\n %s NIC statistics mapping for port %-2d %s\n",
321 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
323 if (port->rx_queue_stats_mapping_enabled) {
324 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
325 if (rx_queue_stats_mappings[i].port_id == port_id) {
326 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
327 rx_queue_stats_mappings[i].queue_id,
328 rx_queue_stats_mappings[i].stats_counter_id);
335 if (port->tx_queue_stats_mapping_enabled) {
336 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
337 if (tx_queue_stats_mappings[i].port_id == port_id) {
338 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
339 tx_queue_stats_mappings[i].queue_id,
340 tx_queue_stats_mappings[i].stats_counter_id);
345 printf(" %s####################################%s\n",
346 nic_stats_mapping_border, nic_stats_mapping_border);
350 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
352 struct rte_eth_rxq_info qinfo;
354 static const char *info_border = "*********************";
356 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
358 printf("Failed to retrieve information for port: %u, "
359 "RX queue: %hu\nerror desc: %s(%d)\n",
360 port_id, queue_id, strerror(-rc), rc);
364 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
365 info_border, port_id, queue_id, info_border);
367 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
368 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
369 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
370 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
371 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
372 printf("\nRX drop packets: %s",
373 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
374 printf("\nRX deferred start: %s",
375 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
376 printf("\nRX scattered packets: %s",
377 (qinfo.scattered_rx != 0) ? "on" : "off");
378 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
383 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
385 struct rte_eth_txq_info qinfo;
387 static const char *info_border = "*********************";
389 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
391 printf("Failed to retrieve information for port: %u, "
392 "TX queue: %hu\nerror desc: %s(%d)\n",
393 port_id, queue_id, strerror(-rc), rc);
397 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
398 info_border, port_id, queue_id, info_border);
400 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
401 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
402 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
403 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
404 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
405 printf("\nTX deferred start: %s",
406 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
407 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
411 static int bus_match_all(const struct rte_bus *bus, const void *data)
419 device_infos_display(const char *identifier)
421 static const char *info_border = "*********************";
422 struct rte_bus *start = NULL, *next;
423 struct rte_dev_iterator dev_iter;
424 char name[RTE_ETH_NAME_MAX_LEN];
425 struct rte_ether_addr mac_addr;
426 struct rte_device *dev;
427 struct rte_devargs da;
431 memset(&da, 0, sizeof(da));
435 if (rte_devargs_parsef(&da, "%s", identifier)) {
436 printf("cannot parse identifier\n");
443 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
446 if (identifier && da.bus != next)
449 /* Skip buses that don't have iterate method */
450 if (!next->dev_iterate)
453 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
454 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
458 /* Check for matching device if identifier is present */
460 strncmp(da.name, dev->name, strlen(dev->name)))
462 printf("\n%s Infos for device %s %s\n",
463 info_border, dev->name, info_border);
464 printf("Bus name: %s", dev->bus->name);
465 printf("\nDriver name: %s", dev->driver->name);
466 printf("\nDevargs: %s",
467 dev->devargs ? dev->devargs->args : "");
468 printf("\nConnect to socket: %d", dev->numa_node);
471 /* List ports with matching device name */
472 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
473 printf("\n\tPort id: %-2d", port_id);
474 if (eth_macaddr_get_print_err(port_id,
476 print_ethaddr("\n\tMAC address: ",
478 rte_eth_dev_get_name_by_port(port_id, name);
479 printf("\n\tDevice name: %s", name);
487 port_infos_display(portid_t port_id)
489 struct rte_port *port;
490 struct rte_ether_addr mac_addr;
491 struct rte_eth_link link;
492 struct rte_eth_dev_info dev_info;
494 struct rte_mempool * mp;
495 static const char *info_border = "*********************";
497 char name[RTE_ETH_NAME_MAX_LEN];
500 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
504 port = &ports[port_id];
505 ret = eth_link_get_nowait_print_err(port_id, &link);
509 ret = eth_dev_info_get_print_err(port_id, &dev_info);
513 printf("\n%s Infos for port %-2d %s\n",
514 info_border, port_id, info_border);
515 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
516 print_ethaddr("MAC address: ", &mac_addr);
517 rte_eth_dev_get_name_by_port(port_id, name);
518 printf("\nDevice name: %s", name);
519 printf("\nDriver name: %s", dev_info.driver_name);
520 if (dev_info.device->devargs && dev_info.device->devargs->args)
521 printf("\nDevargs: %s", dev_info.device->devargs->args);
522 printf("\nConnect to socket: %u", port->socket_id);
524 if (port_numa[port_id] != NUMA_NO_CONFIG) {
525 mp = mbuf_pool_find(port_numa[port_id]);
527 printf("\nmemory allocation on the socket: %d",
530 printf("\nmemory allocation on the socket: %u",port->socket_id);
532 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
533 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
534 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
535 ("full-duplex") : ("half-duplex"));
537 if (!rte_eth_dev_get_mtu(port_id, &mtu))
538 printf("MTU: %u\n", mtu);
540 printf("Promiscuous mode: %s\n",
541 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
542 printf("Allmulticast mode: %s\n",
543 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
544 printf("Maximum number of MAC addresses: %u\n",
545 (unsigned int)(port->dev_info.max_mac_addrs));
546 printf("Maximum number of MAC addresses of hash filtering: %u\n",
547 (unsigned int)(port->dev_info.max_hash_mac_addrs));
549 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
550 if (vlan_offload >= 0){
551 printf("VLAN offload: \n");
552 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
553 printf(" strip on, ");
555 printf(" strip off, ");
557 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
558 printf("filter on, ");
560 printf("filter off, ");
562 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
563 printf("extend on, ");
565 printf("extend off, ");
567 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
568 printf("qinq strip on\n");
570 printf("qinq strip off\n");
573 if (dev_info.hash_key_size > 0)
574 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
575 if (dev_info.reta_size > 0)
576 printf("Redirection table size: %u\n", dev_info.reta_size);
577 if (!dev_info.flow_type_rss_offloads)
578 printf("No RSS offload flow type is supported.\n");
583 printf("Supported RSS offload flow types:\n");
584 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
585 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
586 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
588 p = flowtype_to_str(i);
592 printf(" user defined %d\n", i);
596 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
597 printf("Maximum configurable length of RX packet: %u\n",
598 dev_info.max_rx_pktlen);
599 if (dev_info.max_vfs)
600 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
601 if (dev_info.max_vmdq_pools)
602 printf("Maximum number of VMDq pools: %u\n",
603 dev_info.max_vmdq_pools);
605 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
606 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
607 printf("Max possible number of RXDs per queue: %hu\n",
608 dev_info.rx_desc_lim.nb_max);
609 printf("Min possible number of RXDs per queue: %hu\n",
610 dev_info.rx_desc_lim.nb_min);
611 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
613 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
614 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
615 printf("Max possible number of TXDs per queue: %hu\n",
616 dev_info.tx_desc_lim.nb_max);
617 printf("Min possible number of TXDs per queue: %hu\n",
618 dev_info.tx_desc_lim.nb_min);
619 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
620 printf("Max segment number per packet: %hu\n",
621 dev_info.tx_desc_lim.nb_seg_max);
622 printf("Max segment number per MTU/TSO: %hu\n",
623 dev_info.tx_desc_lim.nb_mtu_seg_max);
625 /* Show switch info only if valid switch domain and port id is set */
626 if (dev_info.switch_info.domain_id !=
627 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
628 if (dev_info.switch_info.name)
629 printf("Switch name: %s\n", dev_info.switch_info.name);
631 printf("Switch domain Id: %u\n",
632 dev_info.switch_info.domain_id);
633 printf("Switch Port Id: %u\n",
634 dev_info.switch_info.port_id);
639 port_summary_header_display(void)
641 uint16_t port_number;
643 port_number = rte_eth_dev_count_avail();
644 printf("Number of available ports: %i\n", port_number);
645 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
646 "Driver", "Status", "Link");
650 port_summary_display(portid_t port_id)
652 struct rte_ether_addr mac_addr;
653 struct rte_eth_link link;
654 struct rte_eth_dev_info dev_info;
655 char name[RTE_ETH_NAME_MAX_LEN];
658 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
663 ret = eth_link_get_nowait_print_err(port_id, &link);
667 ret = eth_dev_info_get_print_err(port_id, &dev_info);
671 rte_eth_dev_get_name_by_port(port_id, name);
672 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
676 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n",
677 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
678 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
679 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
680 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
681 (unsigned int) link.link_speed);
685 port_offload_cap_display(portid_t port_id)
687 struct rte_eth_dev_info dev_info;
688 static const char *info_border = "************";
691 if (port_id_is_invalid(port_id, ENABLED_WARN))
694 ret = eth_dev_info_get_print_err(port_id, &dev_info);
698 printf("\n%s Port %d supported offload features: %s\n",
699 info_border, port_id, info_border);
701 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
702 printf("VLAN stripped: ");
703 if (ports[port_id].dev_conf.rxmode.offloads &
704 DEV_RX_OFFLOAD_VLAN_STRIP)
710 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
711 printf("Double VLANs stripped: ");
712 if (ports[port_id].dev_conf.rxmode.offloads &
713 DEV_RX_OFFLOAD_QINQ_STRIP)
719 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
720 printf("RX IPv4 checksum: ");
721 if (ports[port_id].dev_conf.rxmode.offloads &
722 DEV_RX_OFFLOAD_IPV4_CKSUM)
728 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
729 printf("RX UDP checksum: ");
730 if (ports[port_id].dev_conf.rxmode.offloads &
731 DEV_RX_OFFLOAD_UDP_CKSUM)
737 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
738 printf("RX TCP checksum: ");
739 if (ports[port_id].dev_conf.rxmode.offloads &
740 DEV_RX_OFFLOAD_TCP_CKSUM)
746 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
747 printf("RX SCTP checksum: ");
748 if (ports[port_id].dev_conf.rxmode.offloads &
749 DEV_RX_OFFLOAD_SCTP_CKSUM)
755 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
756 printf("RX Outer IPv4 checksum: ");
757 if (ports[port_id].dev_conf.rxmode.offloads &
758 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
764 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
765 printf("RX Outer UDP checksum: ");
766 if (ports[port_id].dev_conf.rxmode.offloads &
767 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
773 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
774 printf("Large receive offload: ");
775 if (ports[port_id].dev_conf.rxmode.offloads &
776 DEV_RX_OFFLOAD_TCP_LRO)
782 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
783 printf("HW timestamp: ");
784 if (ports[port_id].dev_conf.rxmode.offloads &
785 DEV_RX_OFFLOAD_TIMESTAMP)
791 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
792 printf("Rx Keep CRC: ");
793 if (ports[port_id].dev_conf.rxmode.offloads &
794 DEV_RX_OFFLOAD_KEEP_CRC)
800 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
801 printf("RX offload security: ");
802 if (ports[port_id].dev_conf.rxmode.offloads &
803 DEV_RX_OFFLOAD_SECURITY)
809 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
810 printf("VLAN insert: ");
811 if (ports[port_id].dev_conf.txmode.offloads &
812 DEV_TX_OFFLOAD_VLAN_INSERT)
818 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
819 printf("Double VLANs insert: ");
820 if (ports[port_id].dev_conf.txmode.offloads &
821 DEV_TX_OFFLOAD_QINQ_INSERT)
827 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
828 printf("TX IPv4 checksum: ");
829 if (ports[port_id].dev_conf.txmode.offloads &
830 DEV_TX_OFFLOAD_IPV4_CKSUM)
836 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
837 printf("TX UDP checksum: ");
838 if (ports[port_id].dev_conf.txmode.offloads &
839 DEV_TX_OFFLOAD_UDP_CKSUM)
845 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
846 printf("TX TCP checksum: ");
847 if (ports[port_id].dev_conf.txmode.offloads &
848 DEV_TX_OFFLOAD_TCP_CKSUM)
854 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
855 printf("TX SCTP checksum: ");
856 if (ports[port_id].dev_conf.txmode.offloads &
857 DEV_TX_OFFLOAD_SCTP_CKSUM)
863 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
864 printf("TX Outer IPv4 checksum: ");
865 if (ports[port_id].dev_conf.txmode.offloads &
866 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
872 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
873 printf("TX TCP segmentation: ");
874 if (ports[port_id].dev_conf.txmode.offloads &
875 DEV_TX_OFFLOAD_TCP_TSO)
881 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
882 printf("TX UDP segmentation: ");
883 if (ports[port_id].dev_conf.txmode.offloads &
884 DEV_TX_OFFLOAD_UDP_TSO)
890 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
891 printf("TSO for VXLAN tunnel packet: ");
892 if (ports[port_id].dev_conf.txmode.offloads &
893 DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
899 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
900 printf("TSO for GRE tunnel packet: ");
901 if (ports[port_id].dev_conf.txmode.offloads &
902 DEV_TX_OFFLOAD_GRE_TNL_TSO)
908 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
909 printf("TSO for IPIP tunnel packet: ");
910 if (ports[port_id].dev_conf.txmode.offloads &
911 DEV_TX_OFFLOAD_IPIP_TNL_TSO)
917 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
918 printf("TSO for GENEVE tunnel packet: ");
919 if (ports[port_id].dev_conf.txmode.offloads &
920 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
926 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
927 printf("IP tunnel TSO: ");
928 if (ports[port_id].dev_conf.txmode.offloads &
929 DEV_TX_OFFLOAD_IP_TNL_TSO)
935 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
936 printf("UDP tunnel TSO: ");
937 if (ports[port_id].dev_conf.txmode.offloads &
938 DEV_TX_OFFLOAD_UDP_TNL_TSO)
944 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
945 printf("TX Outer UDP checksum: ");
946 if (ports[port_id].dev_conf.txmode.offloads &
947 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
956 port_id_is_invalid(portid_t port_id, enum print_warning warning)
960 if (port_id == (portid_t)RTE_PORT_ALL)
963 RTE_ETH_FOREACH_DEV(pid)
967 if (warning == ENABLED_WARN)
968 printf("Invalid port %d\n", port_id);
973 void print_valid_ports(void)
977 printf("The valid ports array is [");
978 RTE_ETH_FOREACH_DEV(pid) {
985 vlan_id_is_invalid(uint16_t vlan_id)
989 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
994 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
996 const struct rte_pci_device *pci_dev;
997 const struct rte_bus *bus;
1000 if (reg_off & 0x3) {
1001 printf("Port register offset 0x%X not aligned on a 4-byte "
1007 if (!ports[port_id].dev_info.device) {
1008 printf("Invalid device\n");
1012 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1013 if (bus && !strcmp(bus->name, "pci")) {
1014 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1016 printf("Not a PCI device\n");
1020 pci_len = pci_dev->mem_resource[0].len;
1021 if (reg_off >= pci_len) {
1022 printf("Port %d: register offset %u (0x%X) out of port PCI "
1023 "resource (length=%"PRIu64")\n",
1024 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
1031 reg_bit_pos_is_invalid(uint8_t bit_pos)
1035 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
1039 #define display_port_and_reg_off(port_id, reg_off) \
1040 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1043 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1045 display_port_and_reg_off(port_id, (unsigned)reg_off);
1046 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1050 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1055 if (port_id_is_invalid(port_id, ENABLED_WARN))
1057 if (port_reg_off_is_invalid(port_id, reg_off))
1059 if (reg_bit_pos_is_invalid(bit_x))
1061 reg_v = port_id_pci_reg_read(port_id, reg_off);
1062 display_port_and_reg_off(port_id, (unsigned)reg_off);
1063 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1067 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1068 uint8_t bit1_pos, uint8_t bit2_pos)
1074 if (port_id_is_invalid(port_id, ENABLED_WARN))
1076 if (port_reg_off_is_invalid(port_id, reg_off))
1078 if (reg_bit_pos_is_invalid(bit1_pos))
1080 if (reg_bit_pos_is_invalid(bit2_pos))
1082 if (bit1_pos > bit2_pos)
1083 l_bit = bit2_pos, h_bit = bit1_pos;
1085 l_bit = bit1_pos, h_bit = bit2_pos;
1087 reg_v = port_id_pci_reg_read(port_id, reg_off);
1090 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1091 display_port_and_reg_off(port_id, (unsigned)reg_off);
1092 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1093 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1097 port_reg_display(portid_t port_id, uint32_t reg_off)
1101 if (port_id_is_invalid(port_id, ENABLED_WARN))
1103 if (port_reg_off_is_invalid(port_id, reg_off))
1105 reg_v = port_id_pci_reg_read(port_id, reg_off);
1106 display_port_reg_value(port_id, reg_off, reg_v);
1110 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1115 if (port_id_is_invalid(port_id, ENABLED_WARN))
1117 if (port_reg_off_is_invalid(port_id, reg_off))
1119 if (reg_bit_pos_is_invalid(bit_pos))
1122 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1125 reg_v = port_id_pci_reg_read(port_id, reg_off);
1127 reg_v &= ~(1 << bit_pos);
1129 reg_v |= (1 << bit_pos);
1130 port_id_pci_reg_write(port_id, reg_off, reg_v);
1131 display_port_reg_value(port_id, reg_off, reg_v);
1135 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1136 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1143 if (port_id_is_invalid(port_id, ENABLED_WARN))
1145 if (port_reg_off_is_invalid(port_id, reg_off))
1147 if (reg_bit_pos_is_invalid(bit1_pos))
1149 if (reg_bit_pos_is_invalid(bit2_pos))
1151 if (bit1_pos > bit2_pos)
1152 l_bit = bit2_pos, h_bit = bit1_pos;
1154 l_bit = bit1_pos, h_bit = bit2_pos;
1156 if ((h_bit - l_bit) < 31)
1157 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1161 if (value > max_v) {
1162 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1163 (unsigned)value, (unsigned)value,
1164 (unsigned)max_v, (unsigned)max_v);
1167 reg_v = port_id_pci_reg_read(port_id, reg_off);
1168 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1169 reg_v |= (value << l_bit); /* Set changed bits */
1170 port_id_pci_reg_write(port_id, reg_off, reg_v);
1171 display_port_reg_value(port_id, reg_off, reg_v);
1175 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1177 if (port_id_is_invalid(port_id, ENABLED_WARN))
1179 if (port_reg_off_is_invalid(port_id, reg_off))
1181 port_id_pci_reg_write(port_id, reg_off, reg_v);
1182 display_port_reg_value(port_id, reg_off, reg_v);
1186 port_mtu_set(portid_t port_id, uint16_t mtu)
1189 struct rte_eth_dev_info dev_info;
1192 if (port_id_is_invalid(port_id, ENABLED_WARN))
1195 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1199 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1200 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1201 mtu, dev_info.min_mtu, dev_info.max_mtu);
1204 diag = rte_eth_dev_set_mtu(port_id, mtu);
1207 printf("Set MTU failed. diag=%d\n", diag);
1210 /* Generic flow management functions. */
1212 /** Generate a port_flow entry from attributes/pattern/actions. */
1213 static struct port_flow *
1214 port_flow_new(const struct rte_flow_attr *attr,
1215 const struct rte_flow_item *pattern,
1216 const struct rte_flow_action *actions,
1217 struct rte_flow_error *error)
1219 const struct rte_flow_conv_rule rule = {
1221 .pattern_ro = pattern,
1222 .actions_ro = actions,
1224 struct port_flow *pf;
1227 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1230 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1233 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1237 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1244 /** Print a message out of a flow error. */
1246 port_flow_complain(struct rte_flow_error *error)
1248 static const char *const errstrlist[] = {
1249 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1250 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1251 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1252 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1253 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1254 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1255 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1256 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1257 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1258 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1259 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1260 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1261 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1262 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1263 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1264 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1265 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1269 int err = rte_errno;
1271 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1272 !errstrlist[error->type])
1273 errstr = "unknown type";
1275 errstr = errstrlist[error->type];
1276 printf("Caught error type %d (%s): %s%s: %s\n",
1277 error->type, errstr,
1278 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1279 error->cause), buf) : "",
1280 error->message ? error->message : "(no stated reason)",
1285 /** Validate flow rule. */
1287 port_flow_validate(portid_t port_id,
1288 const struct rte_flow_attr *attr,
1289 const struct rte_flow_item *pattern,
1290 const struct rte_flow_action *actions)
1292 struct rte_flow_error error;
1294 /* Poisoning to make sure PMDs update it in case of error. */
1295 memset(&error, 0x11, sizeof(error));
1296 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1297 return port_flow_complain(&error);
1298 printf("Flow rule validated\n");
1302 /** Create flow rule. */
1304 port_flow_create(portid_t port_id,
1305 const struct rte_flow_attr *attr,
1306 const struct rte_flow_item *pattern,
1307 const struct rte_flow_action *actions)
1309 struct rte_flow *flow;
1310 struct rte_port *port;
1311 struct port_flow *pf;
1313 struct rte_flow_error error;
1315 /* Poisoning to make sure PMDs update it in case of error. */
1316 memset(&error, 0x22, sizeof(error));
1317 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1319 return port_flow_complain(&error);
1320 port = &ports[port_id];
1321 if (port->flow_list) {
1322 if (port->flow_list->id == UINT32_MAX) {
1323 printf("Highest rule ID is already assigned, delete"
1325 rte_flow_destroy(port_id, flow, NULL);
1328 id = port->flow_list->id + 1;
1331 pf = port_flow_new(attr, pattern, actions, &error);
1333 rte_flow_destroy(port_id, flow, NULL);
1334 return port_flow_complain(&error);
1336 pf->next = port->flow_list;
1339 port->flow_list = pf;
1340 printf("Flow rule #%u created\n", pf->id);
1344 /** Destroy a number of flow rules. */
1346 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1348 struct rte_port *port;
1349 struct port_flow **tmp;
1353 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1354 port_id == (portid_t)RTE_PORT_ALL)
1356 port = &ports[port_id];
1357 tmp = &port->flow_list;
1361 for (i = 0; i != n; ++i) {
1362 struct rte_flow_error error;
1363 struct port_flow *pf = *tmp;
1365 if (rule[i] != pf->id)
1368 * Poisoning to make sure PMDs update it in case
1371 memset(&error, 0x33, sizeof(error));
1372 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1373 ret = port_flow_complain(&error);
1376 printf("Flow rule #%u destroyed\n", pf->id);
1382 tmp = &(*tmp)->next;
1388 /** Remove all flow rules. */
1390 port_flow_flush(portid_t port_id)
1392 struct rte_flow_error error;
1393 struct rte_port *port;
1396 /* Poisoning to make sure PMDs update it in case of error. */
1397 memset(&error, 0x44, sizeof(error));
1398 if (rte_flow_flush(port_id, &error)) {
1399 ret = port_flow_complain(&error);
1400 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1401 port_id == (portid_t)RTE_PORT_ALL)
1404 port = &ports[port_id];
1405 while (port->flow_list) {
1406 struct port_flow *pf = port->flow_list->next;
1408 free(port->flow_list);
1409 port->flow_list = pf;
1414 /** Query a flow rule. */
1416 port_flow_query(portid_t port_id, uint32_t rule,
1417 const struct rte_flow_action *action)
1419 struct rte_flow_error error;
1420 struct rte_port *port;
1421 struct port_flow *pf;
1424 struct rte_flow_query_count count;
1428 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1429 port_id == (portid_t)RTE_PORT_ALL)
1431 port = &ports[port_id];
1432 for (pf = port->flow_list; pf; pf = pf->next)
1436 printf("Flow rule #%u not found\n", rule);
1439 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1440 &name, sizeof(name),
1441 (void *)(uintptr_t)action->type, &error);
1443 return port_flow_complain(&error);
1444 switch (action->type) {
1445 case RTE_FLOW_ACTION_TYPE_COUNT:
1448 printf("Cannot query action type %d (%s)\n",
1449 action->type, name);
1452 /* Poisoning to make sure PMDs update it in case of error. */
1453 memset(&error, 0x55, sizeof(error));
1454 memset(&query, 0, sizeof(query));
1455 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1456 return port_flow_complain(&error);
1457 switch (action->type) {
1458 case RTE_FLOW_ACTION_TYPE_COUNT:
1462 " hits: %" PRIu64 "\n"
1463 " bytes: %" PRIu64 "\n",
1465 query.count.hits_set,
1466 query.count.bytes_set,
1471 printf("Cannot display result for action type %d (%s)\n",
1472 action->type, name);
1478 /** List flow rules. */
1480 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1482 struct rte_port *port;
1483 struct port_flow *pf;
1484 struct port_flow *list = NULL;
1487 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1488 port_id == (portid_t)RTE_PORT_ALL)
1490 port = &ports[port_id];
1491 if (!port->flow_list)
1493 /* Sort flows by group, priority and ID. */
1494 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1495 struct port_flow **tmp;
1496 const struct rte_flow_attr *curr = pf->rule.attr;
1499 /* Filter out unwanted groups. */
1500 for (i = 0; i != n; ++i)
1501 if (curr->group == group[i])
1506 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1507 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1509 if (curr->group > comp->group ||
1510 (curr->group == comp->group &&
1511 curr->priority > comp->priority) ||
1512 (curr->group == comp->group &&
1513 curr->priority == comp->priority &&
1514 pf->id > (*tmp)->id))
1521 printf("ID\tGroup\tPrio\tAttr\tRule\n");
1522 for (pf = list; pf != NULL; pf = pf->tmp) {
1523 const struct rte_flow_item *item = pf->rule.pattern;
1524 const struct rte_flow_action *action = pf->rule.actions;
1527 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1529 pf->rule.attr->group,
1530 pf->rule.attr->priority,
1531 pf->rule.attr->ingress ? 'i' : '-',
1532 pf->rule.attr->egress ? 'e' : '-',
1533 pf->rule.attr->transfer ? 't' : '-');
1534 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1535 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1536 &name, sizeof(name),
1537 (void *)(uintptr_t)item->type,
1540 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1541 printf("%s ", name);
1545 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1546 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1547 &name, sizeof(name),
1548 (void *)(uintptr_t)action->type,
1551 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1552 printf(" %s", name);
1559 /** Restrict ingress traffic to the defined flow rules. */
1561 port_flow_isolate(portid_t port_id, int set)
1563 struct rte_flow_error error;
1565 /* Poisoning to make sure PMDs update it in case of error. */
1566 memset(&error, 0x66, sizeof(error));
1567 if (rte_flow_isolate(port_id, set, &error))
1568 return port_flow_complain(&error);
1569 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1571 set ? "now restricted" : "not restricted anymore");
1576 * RX/TX ring descriptors display functions.
1579 rx_queue_id_is_invalid(queueid_t rxq_id)
1581 if (rxq_id < nb_rxq)
1583 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1588 tx_queue_id_is_invalid(queueid_t txq_id)
1590 if (txq_id < nb_txq)
1592 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1597 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1599 if (rxdesc_id < nb_rxd)
1601 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1607 tx_desc_id_is_invalid(uint16_t txdesc_id)
1609 if (txdesc_id < nb_txd)
1611 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1616 static const struct rte_memzone *
1617 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1619 char mz_name[RTE_MEMZONE_NAMESIZE];
1620 const struct rte_memzone *mz;
1622 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
1623 port_id, q_id, ring_name);
1624 mz = rte_memzone_lookup(mz_name);
1626 printf("%s ring memory zoneof (port %d, queue %d) not"
1627 "found (zone name = %s\n",
1628 ring_name, port_id, q_id, mz_name);
1632 union igb_ring_dword {
1635 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1645 struct igb_ring_desc_32_bytes {
1646 union igb_ring_dword lo_dword;
1647 union igb_ring_dword hi_dword;
1648 union igb_ring_dword resv1;
1649 union igb_ring_dword resv2;
1652 struct igb_ring_desc_16_bytes {
1653 union igb_ring_dword lo_dword;
1654 union igb_ring_dword hi_dword;
1658 ring_rxd_display_dword(union igb_ring_dword dword)
1660 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1661 (unsigned)dword.words.hi);
1665 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1666 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1669 __rte_unused portid_t port_id,
1673 struct igb_ring_desc_16_bytes *ring =
1674 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1675 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1677 struct rte_eth_dev_info dev_info;
1679 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1683 if (strstr(dev_info.driver_name, "i40e") != NULL) {
1684 /* 32 bytes RX descriptor, i40e only */
1685 struct igb_ring_desc_32_bytes *ring =
1686 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1687 ring[desc_id].lo_dword.dword =
1688 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1689 ring_rxd_display_dword(ring[desc_id].lo_dword);
1690 ring[desc_id].hi_dword.dword =
1691 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1692 ring_rxd_display_dword(ring[desc_id].hi_dword);
1693 ring[desc_id].resv1.dword =
1694 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1695 ring_rxd_display_dword(ring[desc_id].resv1);
1696 ring[desc_id].resv2.dword =
1697 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1698 ring_rxd_display_dword(ring[desc_id].resv2);
1703 /* 16 bytes RX descriptor */
1704 ring[desc_id].lo_dword.dword =
1705 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1706 ring_rxd_display_dword(ring[desc_id].lo_dword);
1707 ring[desc_id].hi_dword.dword =
1708 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1709 ring_rxd_display_dword(ring[desc_id].hi_dword);
1713 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1715 struct igb_ring_desc_16_bytes *ring;
1716 struct igb_ring_desc_16_bytes txd;
1718 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1719 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1720 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1721 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1722 (unsigned)txd.lo_dword.words.lo,
1723 (unsigned)txd.lo_dword.words.hi,
1724 (unsigned)txd.hi_dword.words.lo,
1725 (unsigned)txd.hi_dword.words.hi);
1729 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1731 const struct rte_memzone *rx_mz;
1733 if (port_id_is_invalid(port_id, ENABLED_WARN))
1735 if (rx_queue_id_is_invalid(rxq_id))
1737 if (rx_desc_id_is_invalid(rxd_id))
1739 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1742 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1746 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1748 const struct rte_memzone *tx_mz;
1750 if (port_id_is_invalid(port_id, ENABLED_WARN))
1752 if (tx_queue_id_is_invalid(txq_id))
1754 if (tx_desc_id_is_invalid(txd_id))
1756 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1759 ring_tx_descriptor_display(tx_mz, txd_id);
1763 fwd_lcores_config_display(void)
1767 printf("List of forwarding lcores:");
1768 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1769 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1773 rxtx_config_display(void)
1778 printf(" %s packet forwarding%s packets/burst=%d\n",
1779 cur_fwd_eng->fwd_mode_name,
1780 retry_enabled == 0 ? "" : " with retry",
1783 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1784 printf(" packet len=%u - nb packet segments=%d\n",
1785 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1787 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
1788 nb_fwd_lcores, nb_fwd_ports);
1790 RTE_ETH_FOREACH_DEV(pid) {
1791 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
1792 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
1793 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
1794 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
1795 uint16_t nb_rx_desc_tmp;
1796 uint16_t nb_tx_desc_tmp;
1797 struct rte_eth_rxq_info rx_qinfo;
1798 struct rte_eth_txq_info tx_qinfo;
1801 /* per port config */
1802 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
1803 (unsigned int)pid, nb_rxq, nb_txq);
1805 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
1806 ports[pid].dev_conf.rxmode.offloads,
1807 ports[pid].dev_conf.txmode.offloads);
1809 /* per rx queue config only for first queue to be less verbose */
1810 for (qid = 0; qid < 1; qid++) {
1811 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
1813 nb_rx_desc_tmp = nb_rx_desc[qid];
1815 nb_rx_desc_tmp = rx_qinfo.nb_desc;
1817 printf(" RX queue: %d\n", qid);
1818 printf(" RX desc=%d - RX free threshold=%d\n",
1819 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
1820 printf(" RX threshold registers: pthresh=%d hthresh=%d "
1822 rx_conf[qid].rx_thresh.pthresh,
1823 rx_conf[qid].rx_thresh.hthresh,
1824 rx_conf[qid].rx_thresh.wthresh);
1825 printf(" RX Offloads=0x%"PRIx64"\n",
1826 rx_conf[qid].offloads);
1829 /* per tx queue config only for first queue to be less verbose */
1830 for (qid = 0; qid < 1; qid++) {
1831 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
1833 nb_tx_desc_tmp = nb_tx_desc[qid];
1835 nb_tx_desc_tmp = tx_qinfo.nb_desc;
1837 printf(" TX queue: %d\n", qid);
1838 printf(" TX desc=%d - TX free threshold=%d\n",
1839 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
1840 printf(" TX threshold registers: pthresh=%d hthresh=%d "
1842 tx_conf[qid].tx_thresh.pthresh,
1843 tx_conf[qid].tx_thresh.hthresh,
1844 tx_conf[qid].tx_thresh.wthresh);
1845 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
1846 tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
1852 port_rss_reta_info(portid_t port_id,
1853 struct rte_eth_rss_reta_entry64 *reta_conf,
1854 uint16_t nb_entries)
1856 uint16_t i, idx, shift;
1859 if (port_id_is_invalid(port_id, ENABLED_WARN))
1862 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1864 printf("Failed to get RSS RETA info, return code = %d\n", ret);
1868 for (i = 0; i < nb_entries; i++) {
1869 idx = i / RTE_RETA_GROUP_SIZE;
1870 shift = i % RTE_RETA_GROUP_SIZE;
1871 if (!(reta_conf[idx].mask & (1ULL << shift)))
1873 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1874 i, reta_conf[idx].reta[shift]);
1879 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1883 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
1885 struct rte_eth_rss_conf rss_conf = {0};
1886 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1890 struct rte_eth_dev_info dev_info;
1891 uint8_t hash_key_size;
1894 if (port_id_is_invalid(port_id, ENABLED_WARN))
1897 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1901 if (dev_info.hash_key_size > 0 &&
1902 dev_info.hash_key_size <= sizeof(rss_key))
1903 hash_key_size = dev_info.hash_key_size;
1905 printf("dev_info did not provide a valid hash key size\n");
1909 /* Get RSS hash key if asked to display it */
1910 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1911 rss_conf.rss_key_len = hash_key_size;
1912 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1916 printf("port index %d invalid\n", port_id);
1919 printf("operation not supported by device\n");
1922 printf("operation failed - diag=%d\n", diag);
1927 rss_hf = rss_conf.rss_hf;
1929 printf("RSS disabled\n");
1932 printf("RSS functions:\n ");
1933 for (i = 0; rss_type_table[i].str; i++) {
1934 if (rss_hf & rss_type_table[i].rss_type)
1935 printf("%s ", rss_type_table[i].str);
1940 printf("RSS key:\n");
1941 for (i = 0; i < hash_key_size; i++)
1942 printf("%02X", rss_key[i]);
1947 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1950 struct rte_eth_rss_conf rss_conf;
1954 rss_conf.rss_key = NULL;
1955 rss_conf.rss_key_len = hash_key_len;
1956 rss_conf.rss_hf = 0;
1957 for (i = 0; rss_type_table[i].str; i++) {
1958 if (!strcmp(rss_type_table[i].str, rss_type))
1959 rss_conf.rss_hf = rss_type_table[i].rss_type;
1961 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1963 rss_conf.rss_key = hash_key;
1964 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1971 printf("port index %d invalid\n", port_id);
1974 printf("operation not supported by device\n");
1977 printf("operation failed - diag=%d\n", diag);
1983 * Setup forwarding configuration for each logical core.
1986 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1988 streamid_t nb_fs_per_lcore;
1996 nb_fs = cfg->nb_fwd_streams;
1997 nb_fc = cfg->nb_fwd_lcores;
1998 if (nb_fs <= nb_fc) {
1999 nb_fs_per_lcore = 1;
2002 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2003 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2006 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2008 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2009 fwd_lcores[lc_id]->stream_idx = sm_id;
2010 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2011 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2015 * Assign extra remaining streams, if any.
2017 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2018 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2019 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2020 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2021 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2026 fwd_topology_tx_port_get(portid_t rxp)
2028 static int warning_once = 1;
2030 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2032 switch (port_topology) {
2034 case PORT_TOPOLOGY_PAIRED:
2035 if ((rxp & 0x1) == 0) {
2036 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2039 printf("\nWarning! port-topology=paired"
2040 " and odd forward ports number,"
2041 " the last port will pair with"
2048 case PORT_TOPOLOGY_CHAINED:
2049 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2050 case PORT_TOPOLOGY_LOOP:
2056 simple_fwd_config_setup(void)
2060 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2061 cur_fwd_config.nb_fwd_streams =
2062 (streamid_t) cur_fwd_config.nb_fwd_ports;
2064 /* reinitialize forwarding streams */
2068 * In the simple forwarding test, the number of forwarding cores
2069 * must be lower or equal to the number of forwarding ports.
2071 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2072 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2073 cur_fwd_config.nb_fwd_lcores =
2074 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2075 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2077 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2078 fwd_streams[i]->rx_port = fwd_ports_ids[i];
2079 fwd_streams[i]->rx_queue = 0;
2080 fwd_streams[i]->tx_port =
2081 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2082 fwd_streams[i]->tx_queue = 0;
2083 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2084 fwd_streams[i]->retry_enabled = retry_enabled;
2089 * For the RSS forwarding test all streams distributed over lcores. Each stream
2090 * being composed of a RX queue to poll on a RX port for input messages,
2091 * associated with a TX queue of a TX port where to send forwarded packets.
2094 rss_fwd_config_setup(void)
2105 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2106 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2107 cur_fwd_config.nb_fwd_streams =
2108 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2110 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2111 cur_fwd_config.nb_fwd_lcores =
2112 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2114 /* reinitialize forwarding streams */
2117 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2119 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2120 struct fwd_stream *fs;
2122 fs = fwd_streams[sm_id];
2123 txp = fwd_topology_tx_port_get(rxp);
2124 fs->rx_port = fwd_ports_ids[rxp];
2126 fs->tx_port = fwd_ports_ids[txp];
2128 fs->peer_addr = fs->tx_port;
2129 fs->retry_enabled = retry_enabled;
2131 if (rxp < nb_fwd_ports)
2139 * For the DCB forwarding test, each core is assigned on each traffic class.
2141 * Each core is assigned a multi-stream, each stream being composed of
2142 * a RX queue to poll on a RX port for input messages, associated with
2143 * a TX queue of a TX port where to send forwarded packets. All RX and
2144 * TX queues are mapping to the same traffic class.
2145 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2149 dcb_fwd_config_setup(void)
2151 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2152 portid_t txp, rxp = 0;
2153 queueid_t txq, rxq = 0;
2155 uint16_t nb_rx_queue, nb_tx_queue;
2156 uint16_t i, j, k, sm_id = 0;
2159 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2160 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2161 cur_fwd_config.nb_fwd_streams =
2162 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2164 /* reinitialize forwarding streams */
2168 /* get the dcb info on the first RX and TX ports */
2169 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2170 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2172 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2173 fwd_lcores[lc_id]->stream_nb = 0;
2174 fwd_lcores[lc_id]->stream_idx = sm_id;
2175 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2176 /* if the nb_queue is zero, means this tc is
2177 * not enabled on the POOL
2179 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2181 k = fwd_lcores[lc_id]->stream_nb +
2182 fwd_lcores[lc_id]->stream_idx;
2183 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2184 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2185 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2186 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2187 for (j = 0; j < nb_rx_queue; j++) {
2188 struct fwd_stream *fs;
2190 fs = fwd_streams[k + j];
2191 fs->rx_port = fwd_ports_ids[rxp];
2192 fs->rx_queue = rxq + j;
2193 fs->tx_port = fwd_ports_ids[txp];
2194 fs->tx_queue = txq + j % nb_tx_queue;
2195 fs->peer_addr = fs->tx_port;
2196 fs->retry_enabled = retry_enabled;
2198 fwd_lcores[lc_id]->stream_nb +=
2199 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2201 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2204 if (tc < rxp_dcb_info.nb_tcs)
2206 /* Restart from TC 0 on next RX port */
2208 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2210 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2213 if (rxp >= nb_fwd_ports)
2215 /* get the dcb information on next RX and TX ports */
2216 if ((rxp & 0x1) == 0)
2217 txp = (portid_t) (rxp + 1);
2219 txp = (portid_t) (rxp - 1);
2220 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2221 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2226 icmp_echo_config_setup(void)
2233 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2234 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2235 (nb_txq * nb_fwd_ports);
2237 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2238 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2239 cur_fwd_config.nb_fwd_streams =
2240 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2241 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2242 cur_fwd_config.nb_fwd_lcores =
2243 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2244 if (verbose_level > 0) {
2245 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2247 cur_fwd_config.nb_fwd_lcores,
2248 cur_fwd_config.nb_fwd_ports,
2249 cur_fwd_config.nb_fwd_streams);
2252 /* reinitialize forwarding streams */
2254 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2256 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2257 if (verbose_level > 0)
2258 printf(" core=%d: \n", lc_id);
2259 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2260 struct fwd_stream *fs;
2261 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2262 fs->rx_port = fwd_ports_ids[rxp];
2264 fs->tx_port = fs->rx_port;
2266 fs->peer_addr = fs->tx_port;
2267 fs->retry_enabled = retry_enabled;
2268 if (verbose_level > 0)
2269 printf(" stream=%d port=%d rxq=%d txq=%d\n",
2270 sm_id, fs->rx_port, fs->rx_queue,
2272 rxq = (queueid_t) (rxq + 1);
2273 if (rxq == nb_rxq) {
2275 rxp = (portid_t) (rxp + 1);
2281 #if defined RTE_LIBRTE_PMD_SOFTNIC
2283 softnic_fwd_config_setup(void)
2285 struct rte_port *port;
2286 portid_t pid, softnic_portid;
2288 uint8_t softnic_enable = 0;
2290 RTE_ETH_FOREACH_DEV(pid) {
2292 const char *driver = port->dev_info.driver_name;
2294 if (strcmp(driver, "net_softnic") == 0) {
2295 softnic_portid = pid;
2301 if (softnic_enable == 0) {
2302 printf("Softnic mode not configured(%s)!\n", __func__);
2306 cur_fwd_config.nb_fwd_ports = 1;
2307 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
2309 /* Re-initialize forwarding streams */
2313 * In the softnic forwarding test, the number of forwarding cores
2314 * is set to one and remaining are used for softnic packet processing.
2316 cur_fwd_config.nb_fwd_lcores = 1;
2317 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2319 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
2320 fwd_streams[i]->rx_port = softnic_portid;
2321 fwd_streams[i]->rx_queue = i;
2322 fwd_streams[i]->tx_port = softnic_portid;
2323 fwd_streams[i]->tx_queue = i;
2324 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2325 fwd_streams[i]->retry_enabled = retry_enabled;
2331 fwd_config_setup(void)
2333 cur_fwd_config.fwd_eng = cur_fwd_eng;
2334 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2335 icmp_echo_config_setup();
2339 #if defined RTE_LIBRTE_PMD_SOFTNIC
2340 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
2341 softnic_fwd_config_setup();
2346 if ((nb_rxq > 1) && (nb_txq > 1)){
2348 dcb_fwd_config_setup();
2350 rss_fwd_config_setup();
2353 simple_fwd_config_setup();
2357 mp_alloc_to_str(uint8_t mode)
2360 case MP_ALLOC_NATIVE:
2366 case MP_ALLOC_XMEM_HUGE:
2374 pkt_fwd_config_display(struct fwd_config *cfg)
2376 struct fwd_stream *fs;
2380 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2381 "NUMA support %s, MP allocation mode: %s\n",
2382 cfg->fwd_eng->fwd_mode_name,
2383 retry_enabled == 0 ? "" : " with retry",
2384 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2385 numa_support == 1 ? "enabled" : "disabled",
2386 mp_alloc_to_str(mp_alloc_type));
2389 printf("TX retry num: %u, delay between TX retries: %uus\n",
2390 burst_tx_retry_num, burst_tx_delay_time);
2391 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2392 printf("Logical Core %u (socket %u) forwards packets on "
2394 fwd_lcores_cpuids[lc_id],
2395 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2396 fwd_lcores[lc_id]->stream_nb);
2397 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2398 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2399 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
2400 "P=%d/Q=%d (socket %u) ",
2401 fs->rx_port, fs->rx_queue,
2402 ports[fs->rx_port].socket_id,
2403 fs->tx_port, fs->tx_queue,
2404 ports[fs->tx_port].socket_id);
2405 print_ethaddr("peer=",
2406 &peer_eth_addrs[fs->peer_addr]);
2414 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2416 struct rte_ether_addr new_peer_addr;
2417 if (!rte_eth_dev_is_valid_port(port_id)) {
2418 printf("Error: Invalid port number %i\n", port_id);
2421 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
2422 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2425 peer_eth_addrs[port_id] = new_peer_addr;
2429 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2432 unsigned int lcore_cpuid;
2437 for (i = 0; i < nb_lc; i++) {
2438 lcore_cpuid = lcorelist[i];
2439 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2440 printf("lcore %u not enabled\n", lcore_cpuid);
2443 if (lcore_cpuid == rte_get_master_lcore()) {
2444 printf("lcore %u cannot be masked on for running "
2445 "packet forwarding, which is the master lcore "
2446 "and reserved for command line parsing only\n",
2451 fwd_lcores_cpuids[i] = lcore_cpuid;
2453 if (record_now == 0) {
2457 nb_cfg_lcores = (lcoreid_t) nb_lc;
2458 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2459 printf("previous number of forwarding cores %u - changed to "
2460 "number of configured cores %u\n",
2461 (unsigned int) nb_fwd_lcores, nb_lc);
2462 nb_fwd_lcores = (lcoreid_t) nb_lc;
2469 set_fwd_lcores_mask(uint64_t lcoremask)
2471 unsigned int lcorelist[64];
2475 if (lcoremask == 0) {
2476 printf("Invalid NULL mask of cores\n");
2480 for (i = 0; i < 64; i++) {
2481 if (! ((uint64_t)(1ULL << i) & lcoremask))
2483 lcorelist[nb_lc++] = i;
2485 return set_fwd_lcores_list(lcorelist, nb_lc);
2489 set_fwd_lcores_number(uint16_t nb_lc)
2491 if (nb_lc > nb_cfg_lcores) {
2492 printf("nb fwd cores %u > %u (max. number of configured "
2493 "lcores) - ignored\n",
2494 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2497 nb_fwd_lcores = (lcoreid_t) nb_lc;
2498 printf("Number of forwarding cores set to %u\n",
2499 (unsigned int) nb_fwd_lcores);
2503 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2511 for (i = 0; i < nb_pt; i++) {
2512 port_id = (portid_t) portlist[i];
2513 if (port_id_is_invalid(port_id, ENABLED_WARN))
2516 fwd_ports_ids[i] = port_id;
2518 if (record_now == 0) {
2522 nb_cfg_ports = (portid_t) nb_pt;
2523 if (nb_fwd_ports != (portid_t) nb_pt) {
2524 printf("previous number of forwarding ports %u - changed to "
2525 "number of configured ports %u\n",
2526 (unsigned int) nb_fwd_ports, nb_pt);
2527 nb_fwd_ports = (portid_t) nb_pt;
2532 set_fwd_ports_mask(uint64_t portmask)
2534 unsigned int portlist[64];
2538 if (portmask == 0) {
2539 printf("Invalid NULL mask of ports\n");
2543 RTE_ETH_FOREACH_DEV(i) {
2544 if (! ((uint64_t)(1ULL << i) & portmask))
2546 portlist[nb_pt++] = i;
2548 set_fwd_ports_list(portlist, nb_pt);
2552 set_fwd_ports_number(uint16_t nb_pt)
2554 if (nb_pt > nb_cfg_ports) {
2555 printf("nb fwd ports %u > %u (number of configured "
2556 "ports) - ignored\n",
2557 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2560 nb_fwd_ports = (portid_t) nb_pt;
2561 printf("Number of forwarding ports set to %u\n",
2562 (unsigned int) nb_fwd_ports);
2566 port_is_forwarding(portid_t port_id)
2570 if (port_id_is_invalid(port_id, ENABLED_WARN))
2573 for (i = 0; i < nb_fwd_ports; i++) {
2574 if (fwd_ports_ids[i] == port_id)
2582 set_nb_pkt_per_burst(uint16_t nb)
2584 if (nb > MAX_PKT_BURST) {
2585 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2587 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2590 nb_pkt_per_burst = nb;
2591 printf("Number of packets per burst set to %u\n",
2592 (unsigned int) nb_pkt_per_burst);
2596 tx_split_get_name(enum tx_pkt_split split)
2600 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2601 if (tx_split_name[i].split == split)
2602 return tx_split_name[i].name;
2608 set_tx_pkt_split(const char *name)
2612 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2613 if (strcmp(tx_split_name[i].name, name) == 0) {
2614 tx_pkt_split = tx_split_name[i].split;
2618 printf("unknown value: \"%s\"\n", name);
2622 show_tx_pkt_segments(void)
2628 split = tx_split_get_name(tx_pkt_split);
2630 printf("Number of segments: %u\n", n);
2631 printf("Segment sizes: ");
2632 for (i = 0; i != n - 1; i++)
2633 printf("%hu,", tx_pkt_seg_lengths[i]);
2634 printf("%hu\n", tx_pkt_seg_lengths[i]);
2635 printf("Split packet: %s\n", split);
2639 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2641 uint16_t tx_pkt_len;
2644 if (nb_segs >= (unsigned) nb_txd) {
2645 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2646 nb_segs, (unsigned int) nb_txd);
2651 * Check that each segment length is greater or equal than
2652 * the mbuf data sise.
2653 * Check also that the total packet length is greater or equal than the
2654 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
2658 for (i = 0; i < nb_segs; i++) {
2659 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2660 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2661 i, seg_lengths[i], (unsigned) mbuf_data_size);
2664 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2666 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
2667 printf("total packet length=%u < %d - give up\n",
2668 (unsigned) tx_pkt_len,
2669 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
2673 for (i = 0; i < nb_segs; i++)
2674 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2676 tx_pkt_length = tx_pkt_len;
2677 tx_pkt_nb_segs = (uint8_t) nb_segs;
2681 setup_gro(const char *onoff, portid_t port_id)
2683 if (!rte_eth_dev_is_valid_port(port_id)) {
2684 printf("invalid port id %u\n", port_id);
2687 if (test_done == 0) {
2688 printf("Before enable/disable GRO,"
2689 " please stop forwarding first\n");
2692 if (strcmp(onoff, "on") == 0) {
2693 if (gro_ports[port_id].enable != 0) {
2694 printf("Port %u has enabled GRO. Please"
2695 " disable GRO first\n", port_id);
2698 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2699 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
2700 gro_ports[port_id].param.max_flow_num =
2701 GRO_DEFAULT_FLOW_NUM;
2702 gro_ports[port_id].param.max_item_per_flow =
2703 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
2705 gro_ports[port_id].enable = 1;
2707 if (gro_ports[port_id].enable == 0) {
2708 printf("Port %u has disabled GRO\n", port_id);
2711 gro_ports[port_id].enable = 0;
2716 setup_gro_flush_cycles(uint8_t cycles)
2718 if (test_done == 0) {
2719 printf("Before change flush interval for GRO,"
2720 " please stop forwarding first.\n");
2724 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
2725 GRO_DEFAULT_FLUSH_CYCLES) {
2726 printf("The flushing cycle be in the range"
2727 " of 1 to %u. Revert to the default"
2729 GRO_MAX_FLUSH_CYCLES,
2730 GRO_DEFAULT_FLUSH_CYCLES);
2731 cycles = GRO_DEFAULT_FLUSH_CYCLES;
2734 gro_flush_cycles = cycles;
2738 show_gro(portid_t port_id)
2740 struct rte_gro_param *param;
2741 uint32_t max_pkts_num;
2743 param = &gro_ports[port_id].param;
2745 if (!rte_eth_dev_is_valid_port(port_id)) {
2746 printf("Invalid port id %u.\n", port_id);
2749 if (gro_ports[port_id].enable) {
2750 printf("GRO type: TCP/IPv4\n");
2751 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2752 max_pkts_num = param->max_flow_num *
2753 param->max_item_per_flow;
2755 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
2756 printf("Max number of packets to perform GRO: %u\n",
2758 printf("Flushing cycles: %u\n", gro_flush_cycles);
2760 printf("Port %u doesn't enable GRO.\n", port_id);
2764 setup_gso(const char *mode, portid_t port_id)
2766 if (!rte_eth_dev_is_valid_port(port_id)) {
2767 printf("invalid port id %u\n", port_id);
2770 if (strcmp(mode, "on") == 0) {
2771 if (test_done == 0) {
2772 printf("before enabling GSO,"
2773 " please stop forwarding first\n");
2776 gso_ports[port_id].enable = 1;
2777 } else if (strcmp(mode, "off") == 0) {
2778 if (test_done == 0) {
2779 printf("before disabling GSO,"
2780 " please stop forwarding first\n");
2783 gso_ports[port_id].enable = 0;
2788 list_pkt_forwarding_modes(void)
2790 static char fwd_modes[128] = "";
2791 const char *separator = "|";
2792 struct fwd_engine *fwd_eng;
2795 if (strlen (fwd_modes) == 0) {
2796 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2797 strncat(fwd_modes, fwd_eng->fwd_mode_name,
2798 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2799 strncat(fwd_modes, separator,
2800 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2802 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2809 list_pkt_forwarding_retry_modes(void)
2811 static char fwd_modes[128] = "";
2812 const char *separator = "|";
2813 struct fwd_engine *fwd_eng;
2816 if (strlen(fwd_modes) == 0) {
2817 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2818 if (fwd_eng == &rx_only_engine)
2820 strncat(fwd_modes, fwd_eng->fwd_mode_name,
2822 strlen(fwd_modes) - 1);
2823 strncat(fwd_modes, separator,
2825 strlen(fwd_modes) - 1);
2827 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2834 set_pkt_forwarding_mode(const char *fwd_mode_name)
2836 struct fwd_engine *fwd_eng;
2840 while ((fwd_eng = fwd_engines[i]) != NULL) {
2841 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
2842 printf("Set %s packet forwarding mode%s\n",
2844 retry_enabled == 0 ? "" : " with retry");
2845 cur_fwd_eng = fwd_eng;
2850 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
2854 add_rx_dump_callbacks(portid_t portid)
2856 struct rte_eth_dev_info dev_info;
2860 if (port_id_is_invalid(portid, ENABLED_WARN))
2863 ret = eth_dev_info_get_print_err(portid, &dev_info);
2867 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
2868 if (!ports[portid].rx_dump_cb[queue])
2869 ports[portid].rx_dump_cb[queue] =
2870 rte_eth_add_rx_callback(portid, queue,
2871 dump_rx_pkts, NULL);
2875 add_tx_dump_callbacks(portid_t portid)
2877 struct rte_eth_dev_info dev_info;
2881 if (port_id_is_invalid(portid, ENABLED_WARN))
2884 ret = eth_dev_info_get_print_err(portid, &dev_info);
2888 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
2889 if (!ports[portid].tx_dump_cb[queue])
2890 ports[portid].tx_dump_cb[queue] =
2891 rte_eth_add_tx_callback(portid, queue,
2892 dump_tx_pkts, NULL);
2896 remove_rx_dump_callbacks(portid_t portid)
2898 struct rte_eth_dev_info dev_info;
2902 if (port_id_is_invalid(portid, ENABLED_WARN))
2905 ret = eth_dev_info_get_print_err(portid, &dev_info);
2909 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
2910 if (ports[portid].rx_dump_cb[queue]) {
2911 rte_eth_remove_rx_callback(portid, queue,
2912 ports[portid].rx_dump_cb[queue]);
2913 ports[portid].rx_dump_cb[queue] = NULL;
2918 remove_tx_dump_callbacks(portid_t portid)
2920 struct rte_eth_dev_info dev_info;
2924 if (port_id_is_invalid(portid, ENABLED_WARN))
2927 ret = eth_dev_info_get_print_err(portid, &dev_info);
2931 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
2932 if (ports[portid].tx_dump_cb[queue]) {
2933 rte_eth_remove_tx_callback(portid, queue,
2934 ports[portid].tx_dump_cb[queue]);
2935 ports[portid].tx_dump_cb[queue] = NULL;
2940 configure_rxtx_dump_callbacks(uint16_t verbose)
2944 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2945 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
2949 RTE_ETH_FOREACH_DEV(portid)
2951 if (verbose == 1 || verbose > 2)
2952 add_rx_dump_callbacks(portid);
2954 remove_rx_dump_callbacks(portid);
2956 add_tx_dump_callbacks(portid);
2958 remove_tx_dump_callbacks(portid);
2963 set_verbose_level(uint16_t vb_level)
2965 printf("Change verbose level from %u to %u\n",
2966 (unsigned int) verbose_level, (unsigned int) vb_level);
2967 verbose_level = vb_level;
2968 configure_rxtx_dump_callbacks(verbose_level);
2972 vlan_extend_set(portid_t port_id, int on)
2976 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2978 if (port_id_is_invalid(port_id, ENABLED_WARN))
2981 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2984 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
2985 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2987 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
2988 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2991 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2993 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
2994 "diag=%d\n", port_id, on, diag);
2995 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2999 rx_vlan_strip_set(portid_t port_id, int on)
3003 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3005 if (port_id_is_invalid(port_id, ENABLED_WARN))
3008 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3011 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
3012 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3014 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
3015 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3018 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3020 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
3021 "diag=%d\n", port_id, on, diag);
3022 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3026 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
3030 if (port_id_is_invalid(port_id, ENABLED_WARN))
3033 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
3035 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
3036 "diag=%d\n", port_id, queue_id, on, diag);
3040 rx_vlan_filter_set(portid_t port_id, int on)
3044 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3046 if (port_id_is_invalid(port_id, ENABLED_WARN))
3049 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3052 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
3053 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3055 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
3056 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3059 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3061 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
3062 "diag=%d\n", port_id, on, diag);
3063 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3067 rx_vlan_qinq_strip_set(portid_t port_id, int on)
3071 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3073 if (port_id_is_invalid(port_id, ENABLED_WARN))
3076 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3079 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
3080 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3082 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
3083 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3086 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3088 printf("%s(port_pi=%d, on=%d) failed "
3089 "diag=%d\n", __func__, port_id, on, diag);
3090 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3094 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
3098 if (port_id_is_invalid(port_id, ENABLED_WARN))
3100 if (vlan_id_is_invalid(vlan_id))
3102 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
3105 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
3107 port_id, vlan_id, on, diag);
3112 rx_vlan_all_filter_set(portid_t port_id, int on)
3116 if (port_id_is_invalid(port_id, ENABLED_WARN))
3118 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
3119 if (rx_vft_set(port_id, vlan_id, on))
3125 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
3129 if (port_id_is_invalid(port_id, ENABLED_WARN))
3132 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
3136 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
3138 port_id, vlan_type, tp_id, diag);
3142 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
3144 struct rte_eth_dev_info dev_info;
3147 if (port_id_is_invalid(port_id, ENABLED_WARN))
3149 if (vlan_id_is_invalid(vlan_id))
3152 if (ports[port_id].dev_conf.txmode.offloads &
3153 DEV_TX_OFFLOAD_QINQ_INSERT) {
3154 printf("Error, as QinQ has been enabled.\n");
3158 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3162 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
3163 printf("Error: vlan insert is not supported by port %d\n",
3168 tx_vlan_reset(port_id);
3169 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
3170 ports[port_id].tx_vlan_id = vlan_id;
3174 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
3176 struct rte_eth_dev_info dev_info;
3179 if (port_id_is_invalid(port_id, ENABLED_WARN))
3181 if (vlan_id_is_invalid(vlan_id))
3183 if (vlan_id_is_invalid(vlan_id_outer))
3186 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3190 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
3191 printf("Error: qinq insert not supported by port %d\n",
3196 tx_vlan_reset(port_id);
3197 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
3198 DEV_TX_OFFLOAD_QINQ_INSERT);
3199 ports[port_id].tx_vlan_id = vlan_id;
3200 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
3204 tx_vlan_reset(portid_t port_id)
3206 if (port_id_is_invalid(port_id, ENABLED_WARN))
3208 ports[port_id].dev_conf.txmode.offloads &=
3209 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
3210 DEV_TX_OFFLOAD_QINQ_INSERT);
3211 ports[port_id].tx_vlan_id = 0;
3212 ports[port_id].tx_vlan_id_outer = 0;
3216 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
3218 if (port_id_is_invalid(port_id, ENABLED_WARN))
3221 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
3225 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
3228 uint8_t existing_mapping_found = 0;
3230 if (port_id_is_invalid(port_id, ENABLED_WARN))
3233 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
3236 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
3237 printf("map_value not in required range 0..%d\n",
3238 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
3242 if (!is_rx) { /*then tx*/
3243 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3244 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3245 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
3246 tx_queue_stats_mappings[i].stats_counter_id = map_value;
3247 existing_mapping_found = 1;
3251 if (!existing_mapping_found) { /* A new additional mapping... */
3252 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
3253 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
3254 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
3255 nb_tx_queue_stats_mappings++;
3259 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3260 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3261 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
3262 rx_queue_stats_mappings[i].stats_counter_id = map_value;
3263 existing_mapping_found = 1;
3267 if (!existing_mapping_found) { /* A new additional mapping... */
3268 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
3269 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
3270 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
3271 nb_rx_queue_stats_mappings++;
3277 set_xstats_hide_zero(uint8_t on_off)
3279 xstats_hide_zero = on_off;
3283 print_fdir_mask(struct rte_eth_fdir_masks *mask)
3285 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
3287 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3288 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
3289 " tunnel_id: 0x%08x",
3290 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
3291 rte_be_to_cpu_32(mask->tunnel_id_mask));
3292 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3293 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
3294 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
3295 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
3297 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
3298 rte_be_to_cpu_16(mask->src_port_mask),
3299 rte_be_to_cpu_16(mask->dst_port_mask));
3301 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3302 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
3303 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
3304 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
3305 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
3307 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3308 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
3309 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
3310 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
3311 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
3318 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3320 struct rte_eth_flex_payload_cfg *cfg;
3323 for (i = 0; i < flex_conf->nb_payloads; i++) {
3324 cfg = &flex_conf->flex_set[i];
3325 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3327 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3328 printf("\n L2_PAYLOAD: ");
3329 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3330 printf("\n L3_PAYLOAD: ");
3331 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3332 printf("\n L4_PAYLOAD: ");
3334 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
3335 for (j = 0; j < num; j++)
3336 printf(" %-5u", cfg->src_offset[j]);
3342 flowtype_to_str(uint16_t flow_type)
3344 struct flow_type_info {
3350 static struct flow_type_info flowtype_str_table[] = {
3351 {"raw", RTE_ETH_FLOW_RAW},
3352 {"ipv4", RTE_ETH_FLOW_IPV4},
3353 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3354 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3355 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3356 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3357 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3358 {"ipv6", RTE_ETH_FLOW_IPV6},
3359 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3360 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3361 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3362 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3363 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3364 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3365 {"port", RTE_ETH_FLOW_PORT},
3366 {"vxlan", RTE_ETH_FLOW_VXLAN},
3367 {"geneve", RTE_ETH_FLOW_GENEVE},
3368 {"nvgre", RTE_ETH_FLOW_NVGRE},
3369 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
3372 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3373 if (flowtype_str_table[i].ftype == flow_type)
3374 return flowtype_str_table[i].str;
3381 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3383 struct rte_eth_fdir_flex_mask *mask;
3387 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3388 mask = &flex_conf->flex_mask[i];
3389 p = flowtype_to_str(mask->flow_type);
3390 printf("\n %s:\t", p ? p : "unknown");
3391 for (j = 0; j < num; j++)
3392 printf(" %02x", mask->mask[j]);
3398 print_fdir_flow_type(uint32_t flow_types_mask)
3403 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3404 if (!(flow_types_mask & (1 << i)))
3406 p = flowtype_to_str(i);
3416 fdir_get_infos(portid_t port_id)
3418 struct rte_eth_fdir_stats fdir_stat;
3419 struct rte_eth_fdir_info fdir_info;
3422 static const char *fdir_stats_border = "########################";
3424 if (port_id_is_invalid(port_id, ENABLED_WARN))
3426 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3428 printf("\n FDIR is not supported on port %-2d\n",
3433 memset(&fdir_info, 0, sizeof(fdir_info));
3434 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3435 RTE_ETH_FILTER_INFO, &fdir_info);
3436 memset(&fdir_stat, 0, sizeof(fdir_stat));
3437 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3438 RTE_ETH_FILTER_STATS, &fdir_stat);
3439 printf("\n %s FDIR infos for port %-2d %s\n",
3440 fdir_stats_border, port_id, fdir_stats_border);
3442 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3443 printf(" PERFECT\n");
3444 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3445 printf(" PERFECT-MAC-VLAN\n");
3446 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3447 printf(" PERFECT-TUNNEL\n");
3448 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3449 printf(" SIGNATURE\n");
3451 printf(" DISABLE\n");
3452 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3453 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3454 printf(" SUPPORTED FLOW TYPE: ");
3455 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3457 printf(" FLEX PAYLOAD INFO:\n");
3458 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
3459 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
3460 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
3461 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3462 fdir_info.flex_payload_unit,
3463 fdir_info.max_flex_payload_segment_num,
3464 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3466 print_fdir_mask(&fdir_info.mask);
3467 if (fdir_info.flex_conf.nb_payloads > 0) {
3468 printf(" FLEX PAYLOAD SRC OFFSET:");
3469 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3471 if (fdir_info.flex_conf.nb_flexmasks > 0) {
3472 printf(" FLEX MASK CFG:");
3473 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3475 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
3476 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3477 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
3478 fdir_info.guarant_spc, fdir_info.best_spc);
3479 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
3480 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
3481 " add: %-10"PRIu64" remove: %"PRIu64"\n"
3482 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
3483 fdir_stat.collision, fdir_stat.free,
3484 fdir_stat.maxhash, fdir_stat.maxlen,
3485 fdir_stat.add, fdir_stat.remove,
3486 fdir_stat.f_add, fdir_stat.f_remove);
3487 printf(" %s############################%s\n",
3488 fdir_stats_border, fdir_stats_border);
3492 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3494 struct rte_port *port;
3495 struct rte_eth_fdir_flex_conf *flex_conf;
3498 port = &ports[port_id];
3499 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3500 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3501 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3506 if (i >= RTE_ETH_FLOW_MAX) {
3507 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3508 idx = flex_conf->nb_flexmasks;
3509 flex_conf->nb_flexmasks++;
3511 printf("The flex mask table is full. Can not set flex"
3512 " mask for flow_type(%u).", cfg->flow_type);
3516 rte_memcpy(&flex_conf->flex_mask[idx],
3518 sizeof(struct rte_eth_fdir_flex_mask));
3522 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3524 struct rte_port *port;
3525 struct rte_eth_fdir_flex_conf *flex_conf;
3528 port = &ports[port_id];
3529 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3530 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3531 if (cfg->type == flex_conf->flex_set[i].type) {
3536 if (i >= RTE_ETH_PAYLOAD_MAX) {
3537 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3538 idx = flex_conf->nb_payloads;
3539 flex_conf->nb_payloads++;
3541 printf("The flex payload table is full. Can not set"
3542 " flex payload for type(%u).", cfg->type);
3546 rte_memcpy(&flex_conf->flex_set[idx],
3548 sizeof(struct rte_eth_flex_payload_cfg));
3553 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3555 #ifdef RTE_LIBRTE_IXGBE_PMD
3559 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3561 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3565 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3566 is_rx ? "rx" : "tx", port_id, diag);
3569 printf("VF %s setting not supported for port %d\n",
3570 is_rx ? "Rx" : "Tx", port_id);
3576 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3579 struct rte_eth_link link;
3582 if (port_id_is_invalid(port_id, ENABLED_WARN))
3584 ret = eth_link_get_nowait_print_err(port_id, &link);
3587 if (rate > link.link_speed) {
3588 printf("Invalid rate value:%u bigger than link speed: %u\n",
3589 rate, link.link_speed);
3592 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3595 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3601 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3603 int diag = -ENOTSUP;
3607 RTE_SET_USED(q_msk);
3609 #ifdef RTE_LIBRTE_IXGBE_PMD
3610 if (diag == -ENOTSUP)
3611 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3614 #ifdef RTE_LIBRTE_BNXT_PMD
3615 if (diag == -ENOTSUP)
3616 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3621 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3627 * Functions to manage the set of filtered Multicast MAC addresses.
3629 * A pool of filtered multicast MAC addresses is associated with each port.
3630 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3631 * The address of the pool and the number of valid multicast MAC addresses
3632 * recorded in the pool are stored in the fields "mc_addr_pool" and
3633 * "mc_addr_nb" of the "rte_port" data structure.
3635 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3636 * to be supplied a contiguous array of multicast MAC addresses.
3637 * To comply with this constraint, the set of multicast addresses recorded
3638 * into the pool are systematically compacted at the beginning of the pool.
3639 * Hence, when a multicast address is removed from the pool, all following
3640 * addresses, if any, are copied back to keep the set contiguous.
3642 #define MCAST_POOL_INC 32
3645 mcast_addr_pool_extend(struct rte_port *port)
3647 struct rte_ether_addr *mc_pool;
3648 size_t mc_pool_size;
3651 * If a free entry is available at the end of the pool, just
3652 * increment the number of recorded multicast addresses.
3654 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3660 * [re]allocate a pool with MCAST_POOL_INC more entries.
3661 * The previous test guarantees that port->mc_addr_nb is a multiple
3662 * of MCAST_POOL_INC.
3664 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
3666 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
3668 if (mc_pool == NULL) {
3669 printf("allocation of pool of %u multicast addresses failed\n",
3670 port->mc_addr_nb + MCAST_POOL_INC);
3674 port->mc_addr_pool = mc_pool;
3681 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3684 if (addr_idx == port->mc_addr_nb) {
3685 /* No need to recompact the set of multicast addressses. */
3686 if (port->mc_addr_nb == 0) {
3687 /* free the pool of multicast addresses. */
3688 free(port->mc_addr_pool);
3689 port->mc_addr_pool = NULL;
3693 memmove(&port->mc_addr_pool[addr_idx],
3694 &port->mc_addr_pool[addr_idx + 1],
3695 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
3699 eth_port_multicast_addr_list_set(portid_t port_id)
3701 struct rte_port *port;
3704 port = &ports[port_id];
3705 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3709 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3710 port->mc_addr_nb, port_id, -diag);
3714 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
3716 struct rte_port *port;
3719 if (port_id_is_invalid(port_id, ENABLED_WARN))
3722 port = &ports[port_id];
3725 * Check that the added multicast MAC address is not already recorded
3726 * in the pool of multicast addresses.
3728 for (i = 0; i < port->mc_addr_nb; i++) {
3729 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3730 printf("multicast address already filtered by port\n");
3735 if (mcast_addr_pool_extend(port) != 0)
3737 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
3738 eth_port_multicast_addr_list_set(port_id);
3742 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
3744 struct rte_port *port;
3747 if (port_id_is_invalid(port_id, ENABLED_WARN))
3750 port = &ports[port_id];
3753 * Search the pool of multicast MAC addresses for the removed address.
3755 for (i = 0; i < port->mc_addr_nb; i++) {
3756 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3759 if (i == port->mc_addr_nb) {
3760 printf("multicast address not filtered by port %d\n", port_id);
3764 mcast_addr_pool_remove(port, i);
3765 eth_port_multicast_addr_list_set(port_id);
3769 port_dcb_info_display(portid_t port_id)
3771 struct rte_eth_dcb_info dcb_info;
3774 static const char *border = "================";
3776 if (port_id_is_invalid(port_id, ENABLED_WARN))
3779 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3781 printf("\n Failed to get dcb infos on port %-2d\n",
3785 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
3786 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
3788 for (i = 0; i < dcb_info.nb_tcs; i++)
3790 printf("\n Priority : ");
3791 for (i = 0; i < dcb_info.nb_tcs; i++)
3792 printf("\t%4d", dcb_info.prio_tc[i]);
3793 printf("\n BW percent :");
3794 for (i = 0; i < dcb_info.nb_tcs; i++)
3795 printf("\t%4d%%", dcb_info.tc_bws[i]);
3796 printf("\n RXQ base : ");
3797 for (i = 0; i < dcb_info.nb_tcs; i++)
3798 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
3799 printf("\n RXQ number :");
3800 for (i = 0; i < dcb_info.nb_tcs; i++)
3801 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
3802 printf("\n TXQ base : ");
3803 for (i = 0; i < dcb_info.nb_tcs; i++)
3804 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
3805 printf("\n TXQ number :");
3806 for (i = 0; i < dcb_info.nb_tcs; i++)
3807 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
3812 open_file(const char *file_path, uint32_t *size)
3814 int fd = open(file_path, O_RDONLY);
3816 uint8_t *buf = NULL;
3824 printf("%s: Failed to open %s\n", __func__, file_path);
3828 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
3830 printf("%s: File operations failed\n", __func__);
3834 pkg_size = st_buf.st_size;
3837 printf("%s: File operations failed\n", __func__);
3841 buf = (uint8_t *)malloc(pkg_size);
3844 printf("%s: Failed to malloc memory\n", __func__);
3848 ret = read(fd, buf, pkg_size);
3851 printf("%s: File read operation failed\n", __func__);
3865 save_file(const char *file_path, uint8_t *buf, uint32_t size)
3867 FILE *fh = fopen(file_path, "wb");
3870 printf("%s: Failed to open %s\n", __func__, file_path);
3874 if (fwrite(buf, 1, size, fh) != size) {
3876 printf("%s: File write operation failed\n", __func__);
3886 close_file(uint8_t *buf)
3897 port_queue_region_info_display(portid_t port_id, void *buf)
3899 #ifdef RTE_LIBRTE_I40E_PMD
3901 struct rte_pmd_i40e_queue_regions *info =
3902 (struct rte_pmd_i40e_queue_regions *)buf;
3903 static const char *queue_region_info_stats_border = "-------";
3905 if (!info->queue_region_number)
3906 printf("there is no region has been set before");
3908 printf("\n %s All queue region info for port=%2d %s",
3909 queue_region_info_stats_border, port_id,
3910 queue_region_info_stats_border);
3911 printf("\n queue_region_number: %-14u \n",
3912 info->queue_region_number);
3914 for (i = 0; i < info->queue_region_number; i++) {
3915 printf("\n region_id: %-14u queue_number: %-14u "
3916 "queue_start_index: %-14u \n",
3917 info->region[i].region_id,
3918 info->region[i].queue_num,
3919 info->region[i].queue_start_index);
3921 printf(" user_priority_num is %-14u :",
3922 info->region[i].user_priority_num);
3923 for (j = 0; j < info->region[i].user_priority_num; j++)
3924 printf(" %-14u ", info->region[i].user_priority[j]);
3926 printf("\n flowtype_num is %-14u :",
3927 info->region[i].flowtype_num);
3928 for (j = 0; j < info->region[i].flowtype_num; j++)
3929 printf(" %-14u ", info->region[i].hw_flowtype[j]);
3932 RTE_SET_USED(port_id);