1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
52 #include <rte_config.h>
56 static char *flowtype_to_str(uint16_t flow_type);
59 enum tx_pkt_split split;
63 .split = TX_PKT_SPLIT_OFF,
67 .split = TX_PKT_SPLIT_ON,
71 .split = TX_PKT_SPLIT_RND,
76 const struct rss_type_info rss_type_table[] = {
77 { "all", ETH_RSS_IP | ETH_RSS_TCP |
78 ETH_RSS_UDP | ETH_RSS_SCTP |
81 { "ipv4", ETH_RSS_IPV4 },
82 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
83 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
84 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
85 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
86 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
87 { "ipv6", ETH_RSS_IPV6 },
88 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
89 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
90 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
91 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
92 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
93 { "l2-payload", ETH_RSS_L2_PAYLOAD },
94 { "ipv6-ex", ETH_RSS_IPV6_EX },
95 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
96 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
97 { "port", ETH_RSS_PORT },
98 { "vxlan", ETH_RSS_VXLAN },
99 { "geneve", ETH_RSS_GENEVE },
100 { "nvgre", ETH_RSS_NVGRE },
101 { "ip", ETH_RSS_IP },
102 { "udp", ETH_RSS_UDP },
103 { "tcp", ETH_RSS_TCP },
104 { "sctp", ETH_RSS_SCTP },
105 { "tunnel", ETH_RSS_TUNNEL },
106 { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
107 { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
108 { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
109 { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
114 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
116 char buf[RTE_ETHER_ADDR_FMT_SIZE];
117 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
118 printf("%s%s", name, buf);
122 nic_stats_display(portid_t port_id)
124 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
125 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
126 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
127 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
128 static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
129 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
131 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
132 struct rte_eth_stats stats;
133 struct rte_port *port = &ports[port_id];
136 static const char *nic_stats_border = "########################";
138 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
142 rte_eth_stats_get(port_id, &stats);
143 printf("\n %s NIC statistics for port %-2d %s\n",
144 nic_stats_border, port_id, nic_stats_border);
146 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
147 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
149 stats.ipackets, stats.imissed, stats.ibytes);
150 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
151 printf(" RX-nombuf: %-10"PRIu64"\n",
153 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
155 stats.opackets, stats.oerrors, stats.obytes);
158 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
159 " RX-bytes: %10"PRIu64"\n",
160 stats.ipackets, stats.ierrors, stats.ibytes);
161 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
162 printf(" RX-nombuf: %10"PRIu64"\n",
164 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
165 " TX-bytes: %10"PRIu64"\n",
166 stats.opackets, stats.oerrors, stats.obytes);
169 if (port->rx_queue_stats_mapping_enabled) {
171 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
172 printf(" Stats reg %2d RX-packets: %10"PRIu64
173 " RX-errors: %10"PRIu64
174 " RX-bytes: %10"PRIu64"\n",
175 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
178 if (port->tx_queue_stats_mapping_enabled) {
180 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
181 printf(" Stats reg %2d TX-packets: %10"PRIu64
182 " TX-bytes: %10"PRIu64"\n",
183 i, stats.q_opackets[i], stats.q_obytes[i]);
187 diff_cycles = prev_cycles[port_id];
188 prev_cycles[port_id] = rte_rdtsc();
190 diff_cycles = prev_cycles[port_id] - diff_cycles;
192 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
193 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
194 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
195 (stats.opackets - prev_pkts_tx[port_id]) : 0;
196 prev_pkts_rx[port_id] = stats.ipackets;
197 prev_pkts_tx[port_id] = stats.opackets;
198 mpps_rx = diff_cycles > 0 ?
199 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
200 mpps_tx = diff_cycles > 0 ?
201 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
203 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
204 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
205 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
206 (stats.obytes - prev_bytes_tx[port_id]) : 0;
207 prev_bytes_rx[port_id] = stats.ibytes;
208 prev_bytes_tx[port_id] = stats.obytes;
209 mbps_rx = diff_cycles > 0 ?
210 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0;
211 mbps_tx = diff_cycles > 0 ?
212 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0;
214 printf("\n Throughput (since last show)\n");
215 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
216 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
217 mpps_tx, mbps_tx * 8);
219 printf(" %s############################%s\n",
220 nic_stats_border, nic_stats_border);
224 nic_stats_clear(portid_t port_id)
226 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
230 rte_eth_stats_reset(port_id);
231 printf("\n NIC statistics for port %d cleared\n", port_id);
235 nic_xstats_display(portid_t port_id)
237 struct rte_eth_xstat *xstats;
238 int cnt_xstats, idx_xstat;
239 struct rte_eth_xstat_name *xstats_names;
241 printf("###### NIC extended statistics for port %-2d\n", port_id);
242 if (!rte_eth_dev_is_valid_port(port_id)) {
243 printf("Error: Invalid port number %i\n", port_id);
248 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
249 if (cnt_xstats < 0) {
250 printf("Error: Cannot get count of xstats\n");
254 /* Get id-name lookup table */
255 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
256 if (xstats_names == NULL) {
257 printf("Cannot allocate memory for xstats lookup\n");
260 if (cnt_xstats != rte_eth_xstats_get_names(
261 port_id, xstats_names, cnt_xstats)) {
262 printf("Error: Cannot get xstats lookup\n");
267 /* Get stats themselves */
268 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
269 if (xstats == NULL) {
270 printf("Cannot allocate memory for xstats\n");
274 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
275 printf("Error: Unable to get xstats\n");
282 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
283 if (xstats_hide_zero && !xstats[idx_xstat].value)
285 printf("%s: %"PRIu64"\n",
286 xstats_names[idx_xstat].name,
287 xstats[idx_xstat].value);
294 nic_xstats_clear(portid_t port_id)
298 ret = rte_eth_xstats_reset(port_id);
300 printf("%s: Error: failed to reset xstats (port %u): %s",
301 __func__, port_id, strerror(ret));
306 nic_stats_mapping_display(portid_t port_id)
308 struct rte_port *port = &ports[port_id];
311 static const char *nic_stats_mapping_border = "########################";
313 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
318 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
319 printf("Port id %d - either does not support queue statistic mapping or"
320 " no queue statistic mapping set\n", port_id);
324 printf("\n %s NIC statistics mapping for port %-2d %s\n",
325 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
327 if (port->rx_queue_stats_mapping_enabled) {
328 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
329 if (rx_queue_stats_mappings[i].port_id == port_id) {
330 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
331 rx_queue_stats_mappings[i].queue_id,
332 rx_queue_stats_mappings[i].stats_counter_id);
339 if (port->tx_queue_stats_mapping_enabled) {
340 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
341 if (tx_queue_stats_mappings[i].port_id == port_id) {
342 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
343 tx_queue_stats_mappings[i].queue_id,
344 tx_queue_stats_mappings[i].stats_counter_id);
349 printf(" %s####################################%s\n",
350 nic_stats_mapping_border, nic_stats_mapping_border);
354 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
356 struct rte_eth_burst_mode mode;
357 struct rte_eth_rxq_info qinfo;
359 static const char *info_border = "*********************";
361 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
363 printf("Failed to retrieve information for port: %u, "
364 "RX queue: %hu\nerror desc: %s(%d)\n",
365 port_id, queue_id, strerror(-rc), rc);
369 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
370 info_border, port_id, queue_id, info_border);
372 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
373 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
374 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
375 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
376 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
377 printf("\nRX drop packets: %s",
378 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
379 printf("\nRX deferred start: %s",
380 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
381 printf("\nRX scattered packets: %s",
382 (qinfo.scattered_rx != 0) ? "on" : "off");
383 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
385 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
386 printf("\nBurst mode: %s%s",
388 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
389 " (per queue)" : "");
395 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
397 struct rte_eth_burst_mode mode;
398 struct rte_eth_txq_info qinfo;
400 static const char *info_border = "*********************";
402 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
404 printf("Failed to retrieve information for port: %u, "
405 "TX queue: %hu\nerror desc: %s(%d)\n",
406 port_id, queue_id, strerror(-rc), rc);
410 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
411 info_border, port_id, queue_id, info_border);
413 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
414 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
415 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
416 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
417 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
418 printf("\nTX deferred start: %s",
419 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
420 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
422 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
423 printf("\nBurst mode: %s%s",
425 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
426 " (per queue)" : "");
431 static int bus_match_all(const struct rte_bus *bus, const void *data)
439 device_infos_display(const char *identifier)
441 static const char *info_border = "*********************";
442 struct rte_bus *start = NULL, *next;
443 struct rte_dev_iterator dev_iter;
444 char name[RTE_ETH_NAME_MAX_LEN];
445 struct rte_ether_addr mac_addr;
446 struct rte_device *dev;
447 struct rte_devargs da;
451 memset(&da, 0, sizeof(da));
455 if (rte_devargs_parsef(&da, "%s", identifier)) {
456 printf("cannot parse identifier\n");
463 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
466 if (identifier && da.bus != next)
469 /* Skip buses that don't have iterate method */
470 if (!next->dev_iterate)
473 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
474 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
478 /* Check for matching device if identifier is present */
480 strncmp(da.name, dev->name, strlen(dev->name)))
482 printf("\n%s Infos for device %s %s\n",
483 info_border, dev->name, info_border);
484 printf("Bus name: %s", dev->bus->name);
485 printf("\nDriver name: %s", dev->driver->name);
486 printf("\nDevargs: %s",
487 dev->devargs ? dev->devargs->args : "");
488 printf("\nConnect to socket: %d", dev->numa_node);
491 /* List ports with matching device name */
492 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
493 printf("\n\tPort id: %-2d", port_id);
494 if (eth_macaddr_get_print_err(port_id,
496 print_ethaddr("\n\tMAC address: ",
498 rte_eth_dev_get_name_by_port(port_id, name);
499 printf("\n\tDevice name: %s", name);
507 port_infos_display(portid_t port_id)
509 struct rte_port *port;
510 struct rte_ether_addr mac_addr;
511 struct rte_eth_link link;
512 struct rte_eth_dev_info dev_info;
514 struct rte_mempool * mp;
515 static const char *info_border = "*********************";
517 char name[RTE_ETH_NAME_MAX_LEN];
520 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
524 port = &ports[port_id];
525 ret = eth_link_get_nowait_print_err(port_id, &link);
529 ret = eth_dev_info_get_print_err(port_id, &dev_info);
533 printf("\n%s Infos for port %-2d %s\n",
534 info_border, port_id, info_border);
535 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
536 print_ethaddr("MAC address: ", &mac_addr);
537 rte_eth_dev_get_name_by_port(port_id, name);
538 printf("\nDevice name: %s", name);
539 printf("\nDriver name: %s", dev_info.driver_name);
540 if (dev_info.device->devargs && dev_info.device->devargs->args)
541 printf("\nDevargs: %s", dev_info.device->devargs->args);
542 printf("\nConnect to socket: %u", port->socket_id);
544 if (port_numa[port_id] != NUMA_NO_CONFIG) {
545 mp = mbuf_pool_find(port_numa[port_id]);
547 printf("\nmemory allocation on the socket: %d",
550 printf("\nmemory allocation on the socket: %u",port->socket_id);
552 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
553 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
554 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
555 ("full-duplex") : ("half-duplex"));
557 if (!rte_eth_dev_get_mtu(port_id, &mtu))
558 printf("MTU: %u\n", mtu);
560 printf("Promiscuous mode: %s\n",
561 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
562 printf("Allmulticast mode: %s\n",
563 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
564 printf("Maximum number of MAC addresses: %u\n",
565 (unsigned int)(port->dev_info.max_mac_addrs));
566 printf("Maximum number of MAC addresses of hash filtering: %u\n",
567 (unsigned int)(port->dev_info.max_hash_mac_addrs));
569 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
570 if (vlan_offload >= 0){
571 printf("VLAN offload: \n");
572 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
573 printf(" strip on, ");
575 printf(" strip off, ");
577 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
578 printf("filter on, ");
580 printf("filter off, ");
582 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
583 printf("extend on, ");
585 printf("extend off, ");
587 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
588 printf("qinq strip on\n");
590 printf("qinq strip off\n");
593 if (dev_info.hash_key_size > 0)
594 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
595 if (dev_info.reta_size > 0)
596 printf("Redirection table size: %u\n", dev_info.reta_size);
597 if (!dev_info.flow_type_rss_offloads)
598 printf("No RSS offload flow type is supported.\n");
603 printf("Supported RSS offload flow types:\n");
604 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
605 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
606 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
608 p = flowtype_to_str(i);
612 printf(" user defined %d\n", i);
616 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
617 printf("Maximum configurable length of RX packet: %u\n",
618 dev_info.max_rx_pktlen);
619 if (dev_info.max_vfs)
620 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
621 if (dev_info.max_vmdq_pools)
622 printf("Maximum number of VMDq pools: %u\n",
623 dev_info.max_vmdq_pools);
625 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
626 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
627 printf("Max possible number of RXDs per queue: %hu\n",
628 dev_info.rx_desc_lim.nb_max);
629 printf("Min possible number of RXDs per queue: %hu\n",
630 dev_info.rx_desc_lim.nb_min);
631 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
633 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
634 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
635 printf("Max possible number of TXDs per queue: %hu\n",
636 dev_info.tx_desc_lim.nb_max);
637 printf("Min possible number of TXDs per queue: %hu\n",
638 dev_info.tx_desc_lim.nb_min);
639 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
640 printf("Max segment number per packet: %hu\n",
641 dev_info.tx_desc_lim.nb_seg_max);
642 printf("Max segment number per MTU/TSO: %hu\n",
643 dev_info.tx_desc_lim.nb_mtu_seg_max);
645 /* Show switch info only if valid switch domain and port id is set */
646 if (dev_info.switch_info.domain_id !=
647 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
648 if (dev_info.switch_info.name)
649 printf("Switch name: %s\n", dev_info.switch_info.name);
651 printf("Switch domain Id: %u\n",
652 dev_info.switch_info.domain_id);
653 printf("Switch Port Id: %u\n",
654 dev_info.switch_info.port_id);
659 port_summary_header_display(void)
661 uint16_t port_number;
663 port_number = rte_eth_dev_count_avail();
664 printf("Number of available ports: %i\n", port_number);
665 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
666 "Driver", "Status", "Link");
670 port_summary_display(portid_t port_id)
672 struct rte_ether_addr mac_addr;
673 struct rte_eth_link link;
674 struct rte_eth_dev_info dev_info;
675 char name[RTE_ETH_NAME_MAX_LEN];
678 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
683 ret = eth_link_get_nowait_print_err(port_id, &link);
687 ret = eth_dev_info_get_print_err(port_id, &dev_info);
691 rte_eth_dev_get_name_by_port(port_id, name);
692 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
696 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n",
697 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
698 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
699 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
700 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
701 (unsigned int) link.link_speed);
705 port_offload_cap_display(portid_t port_id)
707 struct rte_eth_dev_info dev_info;
708 static const char *info_border = "************";
711 if (port_id_is_invalid(port_id, ENABLED_WARN))
714 ret = eth_dev_info_get_print_err(port_id, &dev_info);
718 printf("\n%s Port %d supported offload features: %s\n",
719 info_border, port_id, info_border);
721 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
722 printf("VLAN stripped: ");
723 if (ports[port_id].dev_conf.rxmode.offloads &
724 DEV_RX_OFFLOAD_VLAN_STRIP)
730 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
731 printf("Double VLANs stripped: ");
732 if (ports[port_id].dev_conf.rxmode.offloads &
733 DEV_RX_OFFLOAD_QINQ_STRIP)
739 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
740 printf("RX IPv4 checksum: ");
741 if (ports[port_id].dev_conf.rxmode.offloads &
742 DEV_RX_OFFLOAD_IPV4_CKSUM)
748 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
749 printf("RX UDP checksum: ");
750 if (ports[port_id].dev_conf.rxmode.offloads &
751 DEV_RX_OFFLOAD_UDP_CKSUM)
757 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
758 printf("RX TCP checksum: ");
759 if (ports[port_id].dev_conf.rxmode.offloads &
760 DEV_RX_OFFLOAD_TCP_CKSUM)
766 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
767 printf("RX SCTP checksum: ");
768 if (ports[port_id].dev_conf.rxmode.offloads &
769 DEV_RX_OFFLOAD_SCTP_CKSUM)
775 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
776 printf("RX Outer IPv4 checksum: ");
777 if (ports[port_id].dev_conf.rxmode.offloads &
778 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
784 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
785 printf("RX Outer UDP checksum: ");
786 if (ports[port_id].dev_conf.rxmode.offloads &
787 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
793 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
794 printf("Large receive offload: ");
795 if (ports[port_id].dev_conf.rxmode.offloads &
796 DEV_RX_OFFLOAD_TCP_LRO)
802 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
803 printf("HW timestamp: ");
804 if (ports[port_id].dev_conf.rxmode.offloads &
805 DEV_RX_OFFLOAD_TIMESTAMP)
811 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
812 printf("Rx Keep CRC: ");
813 if (ports[port_id].dev_conf.rxmode.offloads &
814 DEV_RX_OFFLOAD_KEEP_CRC)
820 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
821 printf("RX offload security: ");
822 if (ports[port_id].dev_conf.rxmode.offloads &
823 DEV_RX_OFFLOAD_SECURITY)
829 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
830 printf("VLAN insert: ");
831 if (ports[port_id].dev_conf.txmode.offloads &
832 DEV_TX_OFFLOAD_VLAN_INSERT)
838 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
839 printf("Double VLANs insert: ");
840 if (ports[port_id].dev_conf.txmode.offloads &
841 DEV_TX_OFFLOAD_QINQ_INSERT)
847 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
848 printf("TX IPv4 checksum: ");
849 if (ports[port_id].dev_conf.txmode.offloads &
850 DEV_TX_OFFLOAD_IPV4_CKSUM)
856 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
857 printf("TX UDP checksum: ");
858 if (ports[port_id].dev_conf.txmode.offloads &
859 DEV_TX_OFFLOAD_UDP_CKSUM)
865 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
866 printf("TX TCP checksum: ");
867 if (ports[port_id].dev_conf.txmode.offloads &
868 DEV_TX_OFFLOAD_TCP_CKSUM)
874 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
875 printf("TX SCTP checksum: ");
876 if (ports[port_id].dev_conf.txmode.offloads &
877 DEV_TX_OFFLOAD_SCTP_CKSUM)
883 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
884 printf("TX Outer IPv4 checksum: ");
885 if (ports[port_id].dev_conf.txmode.offloads &
886 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
892 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
893 printf("TX TCP segmentation: ");
894 if (ports[port_id].dev_conf.txmode.offloads &
895 DEV_TX_OFFLOAD_TCP_TSO)
901 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
902 printf("TX UDP segmentation: ");
903 if (ports[port_id].dev_conf.txmode.offloads &
904 DEV_TX_OFFLOAD_UDP_TSO)
910 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
911 printf("TSO for VXLAN tunnel packet: ");
912 if (ports[port_id].dev_conf.txmode.offloads &
913 DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
919 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
920 printf("TSO for GRE tunnel packet: ");
921 if (ports[port_id].dev_conf.txmode.offloads &
922 DEV_TX_OFFLOAD_GRE_TNL_TSO)
928 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
929 printf("TSO for IPIP tunnel packet: ");
930 if (ports[port_id].dev_conf.txmode.offloads &
931 DEV_TX_OFFLOAD_IPIP_TNL_TSO)
937 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
938 printf("TSO for GENEVE tunnel packet: ");
939 if (ports[port_id].dev_conf.txmode.offloads &
940 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
946 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
947 printf("IP tunnel TSO: ");
948 if (ports[port_id].dev_conf.txmode.offloads &
949 DEV_TX_OFFLOAD_IP_TNL_TSO)
955 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
956 printf("UDP tunnel TSO: ");
957 if (ports[port_id].dev_conf.txmode.offloads &
958 DEV_TX_OFFLOAD_UDP_TNL_TSO)
964 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
965 printf("TX Outer UDP checksum: ");
966 if (ports[port_id].dev_conf.txmode.offloads &
967 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
976 port_id_is_invalid(portid_t port_id, enum print_warning warning)
980 if (port_id == (portid_t)RTE_PORT_ALL)
983 RTE_ETH_FOREACH_DEV(pid)
987 if (warning == ENABLED_WARN)
988 printf("Invalid port %d\n", port_id);
993 void print_valid_ports(void)
997 printf("The valid ports array is [");
998 RTE_ETH_FOREACH_DEV(pid) {
1005 vlan_id_is_invalid(uint16_t vlan_id)
1009 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1014 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1016 const struct rte_pci_device *pci_dev;
1017 const struct rte_bus *bus;
1020 if (reg_off & 0x3) {
1021 printf("Port register offset 0x%X not aligned on a 4-byte "
1027 if (!ports[port_id].dev_info.device) {
1028 printf("Invalid device\n");
1032 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1033 if (bus && !strcmp(bus->name, "pci")) {
1034 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1036 printf("Not a PCI device\n");
1040 pci_len = pci_dev->mem_resource[0].len;
1041 if (reg_off >= pci_len) {
1042 printf("Port %d: register offset %u (0x%X) out of port PCI "
1043 "resource (length=%"PRIu64")\n",
1044 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
1051 reg_bit_pos_is_invalid(uint8_t bit_pos)
1055 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
1059 #define display_port_and_reg_off(port_id, reg_off) \
1060 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1063 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1065 display_port_and_reg_off(port_id, (unsigned)reg_off);
1066 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1070 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1075 if (port_id_is_invalid(port_id, ENABLED_WARN))
1077 if (port_reg_off_is_invalid(port_id, reg_off))
1079 if (reg_bit_pos_is_invalid(bit_x))
1081 reg_v = port_id_pci_reg_read(port_id, reg_off);
1082 display_port_and_reg_off(port_id, (unsigned)reg_off);
1083 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1087 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1088 uint8_t bit1_pos, uint8_t bit2_pos)
1094 if (port_id_is_invalid(port_id, ENABLED_WARN))
1096 if (port_reg_off_is_invalid(port_id, reg_off))
1098 if (reg_bit_pos_is_invalid(bit1_pos))
1100 if (reg_bit_pos_is_invalid(bit2_pos))
1102 if (bit1_pos > bit2_pos)
1103 l_bit = bit2_pos, h_bit = bit1_pos;
1105 l_bit = bit1_pos, h_bit = bit2_pos;
1107 reg_v = port_id_pci_reg_read(port_id, reg_off);
1110 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1111 display_port_and_reg_off(port_id, (unsigned)reg_off);
1112 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1113 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1117 port_reg_display(portid_t port_id, uint32_t reg_off)
1121 if (port_id_is_invalid(port_id, ENABLED_WARN))
1123 if (port_reg_off_is_invalid(port_id, reg_off))
1125 reg_v = port_id_pci_reg_read(port_id, reg_off);
1126 display_port_reg_value(port_id, reg_off, reg_v);
1130 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1135 if (port_id_is_invalid(port_id, ENABLED_WARN))
1137 if (port_reg_off_is_invalid(port_id, reg_off))
1139 if (reg_bit_pos_is_invalid(bit_pos))
1142 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1145 reg_v = port_id_pci_reg_read(port_id, reg_off);
1147 reg_v &= ~(1 << bit_pos);
1149 reg_v |= (1 << bit_pos);
1150 port_id_pci_reg_write(port_id, reg_off, reg_v);
1151 display_port_reg_value(port_id, reg_off, reg_v);
1155 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1156 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1163 if (port_id_is_invalid(port_id, ENABLED_WARN))
1165 if (port_reg_off_is_invalid(port_id, reg_off))
1167 if (reg_bit_pos_is_invalid(bit1_pos))
1169 if (reg_bit_pos_is_invalid(bit2_pos))
1171 if (bit1_pos > bit2_pos)
1172 l_bit = bit2_pos, h_bit = bit1_pos;
1174 l_bit = bit1_pos, h_bit = bit2_pos;
1176 if ((h_bit - l_bit) < 31)
1177 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1181 if (value > max_v) {
1182 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1183 (unsigned)value, (unsigned)value,
1184 (unsigned)max_v, (unsigned)max_v);
1187 reg_v = port_id_pci_reg_read(port_id, reg_off);
1188 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1189 reg_v |= (value << l_bit); /* Set changed bits */
1190 port_id_pci_reg_write(port_id, reg_off, reg_v);
1191 display_port_reg_value(port_id, reg_off, reg_v);
1195 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1197 if (port_id_is_invalid(port_id, ENABLED_WARN))
1199 if (port_reg_off_is_invalid(port_id, reg_off))
1201 port_id_pci_reg_write(port_id, reg_off, reg_v);
1202 display_port_reg_value(port_id, reg_off, reg_v);
1206 port_mtu_set(portid_t port_id, uint16_t mtu)
1209 struct rte_eth_dev_info dev_info;
1212 if (port_id_is_invalid(port_id, ENABLED_WARN))
1215 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1219 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1220 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1221 mtu, dev_info.min_mtu, dev_info.max_mtu);
1224 diag = rte_eth_dev_set_mtu(port_id, mtu);
1227 printf("Set MTU failed. diag=%d\n", diag);
1230 /* Generic flow management functions. */
1232 /** Generate a port_flow entry from attributes/pattern/actions. */
1233 static struct port_flow *
1234 port_flow_new(const struct rte_flow_attr *attr,
1235 const struct rte_flow_item *pattern,
1236 const struct rte_flow_action *actions,
1237 struct rte_flow_error *error)
1239 const struct rte_flow_conv_rule rule = {
1241 .pattern_ro = pattern,
1242 .actions_ro = actions,
1244 struct port_flow *pf;
1247 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1250 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1253 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1257 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1264 /** Print a message out of a flow error. */
1266 port_flow_complain(struct rte_flow_error *error)
1268 static const char *const errstrlist[] = {
1269 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1270 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1271 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1272 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1273 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1274 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1275 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1276 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1277 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1278 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1279 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1280 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1281 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1282 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1283 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1284 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1285 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1289 int err = rte_errno;
1291 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1292 !errstrlist[error->type])
1293 errstr = "unknown type";
1295 errstr = errstrlist[error->type];
1296 printf("Caught error type %d (%s): %s%s: %s\n",
1297 error->type, errstr,
1298 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1299 error->cause), buf) : "",
1300 error->message ? error->message : "(no stated reason)",
1305 /** Validate flow rule. */
1307 port_flow_validate(portid_t port_id,
1308 const struct rte_flow_attr *attr,
1309 const struct rte_flow_item *pattern,
1310 const struct rte_flow_action *actions)
1312 struct rte_flow_error error;
1314 /* Poisoning to make sure PMDs update it in case of error. */
1315 memset(&error, 0x11, sizeof(error));
1316 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1317 return port_flow_complain(&error);
1318 printf("Flow rule validated\n");
1322 /** Create flow rule. */
1324 port_flow_create(portid_t port_id,
1325 const struct rte_flow_attr *attr,
1326 const struct rte_flow_item *pattern,
1327 const struct rte_flow_action *actions)
1329 struct rte_flow *flow;
1330 struct rte_port *port;
1331 struct port_flow *pf;
1333 struct rte_flow_error error;
1335 /* Poisoning to make sure PMDs update it in case of error. */
1336 memset(&error, 0x22, sizeof(error));
1337 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1339 return port_flow_complain(&error);
1340 port = &ports[port_id];
1341 if (port->flow_list) {
1342 if (port->flow_list->id == UINT32_MAX) {
1343 printf("Highest rule ID is already assigned, delete"
1345 rte_flow_destroy(port_id, flow, NULL);
1348 id = port->flow_list->id + 1;
1351 pf = port_flow_new(attr, pattern, actions, &error);
1353 rte_flow_destroy(port_id, flow, NULL);
1354 return port_flow_complain(&error);
1356 pf->next = port->flow_list;
1359 port->flow_list = pf;
1360 printf("Flow rule #%u created\n", pf->id);
1364 /** Destroy a number of flow rules. */
1366 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1368 struct rte_port *port;
1369 struct port_flow **tmp;
1373 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1374 port_id == (portid_t)RTE_PORT_ALL)
1376 port = &ports[port_id];
1377 tmp = &port->flow_list;
1381 for (i = 0; i != n; ++i) {
1382 struct rte_flow_error error;
1383 struct port_flow *pf = *tmp;
1385 if (rule[i] != pf->id)
1388 * Poisoning to make sure PMDs update it in case
1391 memset(&error, 0x33, sizeof(error));
1392 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1393 ret = port_flow_complain(&error);
1396 printf("Flow rule #%u destroyed\n", pf->id);
1402 tmp = &(*tmp)->next;
1408 /** Remove all flow rules. */
1410 port_flow_flush(portid_t port_id)
1412 struct rte_flow_error error;
1413 struct rte_port *port;
1416 /* Poisoning to make sure PMDs update it in case of error. */
1417 memset(&error, 0x44, sizeof(error));
1418 if (rte_flow_flush(port_id, &error)) {
1419 ret = port_flow_complain(&error);
1420 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1421 port_id == (portid_t)RTE_PORT_ALL)
1424 port = &ports[port_id];
1425 while (port->flow_list) {
1426 struct port_flow *pf = port->flow_list->next;
1428 free(port->flow_list);
1429 port->flow_list = pf;
1434 /** Query a flow rule. */
1436 port_flow_query(portid_t port_id, uint32_t rule,
1437 const struct rte_flow_action *action)
1439 struct rte_flow_error error;
1440 struct rte_port *port;
1441 struct port_flow *pf;
1444 struct rte_flow_query_count count;
1448 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1449 port_id == (portid_t)RTE_PORT_ALL)
1451 port = &ports[port_id];
1452 for (pf = port->flow_list; pf; pf = pf->next)
1456 printf("Flow rule #%u not found\n", rule);
1459 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1460 &name, sizeof(name),
1461 (void *)(uintptr_t)action->type, &error);
1463 return port_flow_complain(&error);
1464 switch (action->type) {
1465 case RTE_FLOW_ACTION_TYPE_COUNT:
1468 printf("Cannot query action type %d (%s)\n",
1469 action->type, name);
1472 /* Poisoning to make sure PMDs update it in case of error. */
1473 memset(&error, 0x55, sizeof(error));
1474 memset(&query, 0, sizeof(query));
1475 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1476 return port_flow_complain(&error);
1477 switch (action->type) {
1478 case RTE_FLOW_ACTION_TYPE_COUNT:
1482 " hits: %" PRIu64 "\n"
1483 " bytes: %" PRIu64 "\n",
1485 query.count.hits_set,
1486 query.count.bytes_set,
1491 printf("Cannot display result for action type %d (%s)\n",
1492 action->type, name);
1498 /** List flow rules. */
1500 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1502 struct rte_port *port;
1503 struct port_flow *pf;
1504 struct port_flow *list = NULL;
1507 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1508 port_id == (portid_t)RTE_PORT_ALL)
1510 port = &ports[port_id];
1511 if (!port->flow_list)
1513 /* Sort flows by group, priority and ID. */
1514 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1515 struct port_flow **tmp;
1516 const struct rte_flow_attr *curr = pf->rule.attr;
1519 /* Filter out unwanted groups. */
1520 for (i = 0; i != n; ++i)
1521 if (curr->group == group[i])
1526 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1527 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1529 if (curr->group > comp->group ||
1530 (curr->group == comp->group &&
1531 curr->priority > comp->priority) ||
1532 (curr->group == comp->group &&
1533 curr->priority == comp->priority &&
1534 pf->id > (*tmp)->id))
1541 printf("ID\tGroup\tPrio\tAttr\tRule\n");
1542 for (pf = list; pf != NULL; pf = pf->tmp) {
1543 const struct rte_flow_item *item = pf->rule.pattern;
1544 const struct rte_flow_action *action = pf->rule.actions;
1547 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1549 pf->rule.attr->group,
1550 pf->rule.attr->priority,
1551 pf->rule.attr->ingress ? 'i' : '-',
1552 pf->rule.attr->egress ? 'e' : '-',
1553 pf->rule.attr->transfer ? 't' : '-');
1554 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1555 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1556 &name, sizeof(name),
1557 (void *)(uintptr_t)item->type,
1560 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1561 printf("%s ", name);
1565 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1566 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1567 &name, sizeof(name),
1568 (void *)(uintptr_t)action->type,
1571 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1572 printf(" %s", name);
1579 /** Restrict ingress traffic to the defined flow rules. */
1581 port_flow_isolate(portid_t port_id, int set)
1583 struct rte_flow_error error;
1585 /* Poisoning to make sure PMDs update it in case of error. */
1586 memset(&error, 0x66, sizeof(error));
1587 if (rte_flow_isolate(port_id, set, &error))
1588 return port_flow_complain(&error);
1589 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1591 set ? "now restricted" : "not restricted anymore");
1596 * RX/TX ring descriptors display functions.
1599 rx_queue_id_is_invalid(queueid_t rxq_id)
1601 if (rxq_id < nb_rxq)
1603 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1608 tx_queue_id_is_invalid(queueid_t txq_id)
1610 if (txq_id < nb_txq)
1612 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1617 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1619 if (rxdesc_id < nb_rxd)
1621 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1627 tx_desc_id_is_invalid(uint16_t txdesc_id)
1629 if (txdesc_id < nb_txd)
1631 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1636 static const struct rte_memzone *
1637 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1639 char mz_name[RTE_MEMZONE_NAMESIZE];
1640 const struct rte_memzone *mz;
1642 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
1643 port_id, q_id, ring_name);
1644 mz = rte_memzone_lookup(mz_name);
1646 printf("%s ring memory zoneof (port %d, queue %d) not"
1647 "found (zone name = %s\n",
1648 ring_name, port_id, q_id, mz_name);
1652 union igb_ring_dword {
1655 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1665 struct igb_ring_desc_32_bytes {
1666 union igb_ring_dword lo_dword;
1667 union igb_ring_dword hi_dword;
1668 union igb_ring_dword resv1;
1669 union igb_ring_dword resv2;
1672 struct igb_ring_desc_16_bytes {
1673 union igb_ring_dword lo_dword;
1674 union igb_ring_dword hi_dword;
1678 ring_rxd_display_dword(union igb_ring_dword dword)
1680 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1681 (unsigned)dword.words.hi);
1685 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1686 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1689 __rte_unused portid_t port_id,
1693 struct igb_ring_desc_16_bytes *ring =
1694 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1695 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1697 struct rte_eth_dev_info dev_info;
1699 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1703 if (strstr(dev_info.driver_name, "i40e") != NULL) {
1704 /* 32 bytes RX descriptor, i40e only */
1705 struct igb_ring_desc_32_bytes *ring =
1706 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1707 ring[desc_id].lo_dword.dword =
1708 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1709 ring_rxd_display_dword(ring[desc_id].lo_dword);
1710 ring[desc_id].hi_dword.dword =
1711 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1712 ring_rxd_display_dword(ring[desc_id].hi_dword);
1713 ring[desc_id].resv1.dword =
1714 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1715 ring_rxd_display_dword(ring[desc_id].resv1);
1716 ring[desc_id].resv2.dword =
1717 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1718 ring_rxd_display_dword(ring[desc_id].resv2);
1723 /* 16 bytes RX descriptor */
1724 ring[desc_id].lo_dword.dword =
1725 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1726 ring_rxd_display_dword(ring[desc_id].lo_dword);
1727 ring[desc_id].hi_dword.dword =
1728 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1729 ring_rxd_display_dword(ring[desc_id].hi_dword);
1733 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1735 struct igb_ring_desc_16_bytes *ring;
1736 struct igb_ring_desc_16_bytes txd;
1738 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1739 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1740 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1741 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1742 (unsigned)txd.lo_dword.words.lo,
1743 (unsigned)txd.lo_dword.words.hi,
1744 (unsigned)txd.hi_dword.words.lo,
1745 (unsigned)txd.hi_dword.words.hi);
1749 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1751 const struct rte_memzone *rx_mz;
1753 if (port_id_is_invalid(port_id, ENABLED_WARN))
1755 if (rx_queue_id_is_invalid(rxq_id))
1757 if (rx_desc_id_is_invalid(rxd_id))
1759 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1762 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1766 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1768 const struct rte_memzone *tx_mz;
1770 if (port_id_is_invalid(port_id, ENABLED_WARN))
1772 if (tx_queue_id_is_invalid(txq_id))
1774 if (tx_desc_id_is_invalid(txd_id))
1776 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1779 ring_tx_descriptor_display(tx_mz, txd_id);
1783 fwd_lcores_config_display(void)
1787 printf("List of forwarding lcores:");
1788 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1789 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1793 rxtx_config_display(void)
1798 printf(" %s packet forwarding%s packets/burst=%d\n",
1799 cur_fwd_eng->fwd_mode_name,
1800 retry_enabled == 0 ? "" : " with retry",
1803 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1804 printf(" packet len=%u - nb packet segments=%d\n",
1805 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1807 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
1808 nb_fwd_lcores, nb_fwd_ports);
1810 RTE_ETH_FOREACH_DEV(pid) {
1811 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
1812 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
1813 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
1814 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
1815 uint16_t nb_rx_desc_tmp;
1816 uint16_t nb_tx_desc_tmp;
1817 struct rte_eth_rxq_info rx_qinfo;
1818 struct rte_eth_txq_info tx_qinfo;
1821 /* per port config */
1822 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
1823 (unsigned int)pid, nb_rxq, nb_txq);
1825 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
1826 ports[pid].dev_conf.rxmode.offloads,
1827 ports[pid].dev_conf.txmode.offloads);
1829 /* per rx queue config only for first queue to be less verbose */
1830 for (qid = 0; qid < 1; qid++) {
1831 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
1833 nb_rx_desc_tmp = nb_rx_desc[qid];
1835 nb_rx_desc_tmp = rx_qinfo.nb_desc;
1837 printf(" RX queue: %d\n", qid);
1838 printf(" RX desc=%d - RX free threshold=%d\n",
1839 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
1840 printf(" RX threshold registers: pthresh=%d hthresh=%d "
1842 rx_conf[qid].rx_thresh.pthresh,
1843 rx_conf[qid].rx_thresh.hthresh,
1844 rx_conf[qid].rx_thresh.wthresh);
1845 printf(" RX Offloads=0x%"PRIx64"\n",
1846 rx_conf[qid].offloads);
1849 /* per tx queue config only for first queue to be less verbose */
1850 for (qid = 0; qid < 1; qid++) {
1851 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
1853 nb_tx_desc_tmp = nb_tx_desc[qid];
1855 nb_tx_desc_tmp = tx_qinfo.nb_desc;
1857 printf(" TX queue: %d\n", qid);
1858 printf(" TX desc=%d - TX free threshold=%d\n",
1859 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
1860 printf(" TX threshold registers: pthresh=%d hthresh=%d "
1862 tx_conf[qid].tx_thresh.pthresh,
1863 tx_conf[qid].tx_thresh.hthresh,
1864 tx_conf[qid].tx_thresh.wthresh);
1865 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
1866 tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
1872 port_rss_reta_info(portid_t port_id,
1873 struct rte_eth_rss_reta_entry64 *reta_conf,
1874 uint16_t nb_entries)
1876 uint16_t i, idx, shift;
1879 if (port_id_is_invalid(port_id, ENABLED_WARN))
1882 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1884 printf("Failed to get RSS RETA info, return code = %d\n", ret);
1888 for (i = 0; i < nb_entries; i++) {
1889 idx = i / RTE_RETA_GROUP_SIZE;
1890 shift = i % RTE_RETA_GROUP_SIZE;
1891 if (!(reta_conf[idx].mask & (1ULL << shift)))
1893 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1894 i, reta_conf[idx].reta[shift]);
1899 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1903 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
1905 struct rte_eth_rss_conf rss_conf = {0};
1906 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1910 struct rte_eth_dev_info dev_info;
1911 uint8_t hash_key_size;
1914 if (port_id_is_invalid(port_id, ENABLED_WARN))
1917 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1921 if (dev_info.hash_key_size > 0 &&
1922 dev_info.hash_key_size <= sizeof(rss_key))
1923 hash_key_size = dev_info.hash_key_size;
1925 printf("dev_info did not provide a valid hash key size\n");
1929 /* Get RSS hash key if asked to display it */
1930 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1931 rss_conf.rss_key_len = hash_key_size;
1932 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1936 printf("port index %d invalid\n", port_id);
1939 printf("operation not supported by device\n");
1942 printf("operation failed - diag=%d\n", diag);
1947 rss_hf = rss_conf.rss_hf;
1949 printf("RSS disabled\n");
1952 printf("RSS functions:\n ");
1953 for (i = 0; rss_type_table[i].str; i++) {
1954 if (rss_hf & rss_type_table[i].rss_type)
1955 printf("%s ", rss_type_table[i].str);
1960 printf("RSS key:\n");
1961 for (i = 0; i < hash_key_size; i++)
1962 printf("%02X", rss_key[i]);
1967 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1970 struct rte_eth_rss_conf rss_conf;
1974 rss_conf.rss_key = NULL;
1975 rss_conf.rss_key_len = hash_key_len;
1976 rss_conf.rss_hf = 0;
1977 for (i = 0; rss_type_table[i].str; i++) {
1978 if (!strcmp(rss_type_table[i].str, rss_type))
1979 rss_conf.rss_hf = rss_type_table[i].rss_type;
1981 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1983 rss_conf.rss_key = hash_key;
1984 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1991 printf("port index %d invalid\n", port_id);
1994 printf("operation not supported by device\n");
1997 printf("operation failed - diag=%d\n", diag);
2003 * Setup forwarding configuration for each logical core.
2006 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
2008 streamid_t nb_fs_per_lcore;
2016 nb_fs = cfg->nb_fwd_streams;
2017 nb_fc = cfg->nb_fwd_lcores;
2018 if (nb_fs <= nb_fc) {
2019 nb_fs_per_lcore = 1;
2022 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2023 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2026 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2028 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2029 fwd_lcores[lc_id]->stream_idx = sm_id;
2030 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2031 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2035 * Assign extra remaining streams, if any.
2037 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2038 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2039 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2040 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2041 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2046 fwd_topology_tx_port_get(portid_t rxp)
2048 static int warning_once = 1;
2050 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2052 switch (port_topology) {
2054 case PORT_TOPOLOGY_PAIRED:
2055 if ((rxp & 0x1) == 0) {
2056 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2059 printf("\nWarning! port-topology=paired"
2060 " and odd forward ports number,"
2061 " the last port will pair with"
2068 case PORT_TOPOLOGY_CHAINED:
2069 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2070 case PORT_TOPOLOGY_LOOP:
2076 simple_fwd_config_setup(void)
2080 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2081 cur_fwd_config.nb_fwd_streams =
2082 (streamid_t) cur_fwd_config.nb_fwd_ports;
2084 /* reinitialize forwarding streams */
2088 * In the simple forwarding test, the number of forwarding cores
2089 * must be lower or equal to the number of forwarding ports.
2091 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2092 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2093 cur_fwd_config.nb_fwd_lcores =
2094 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2095 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2097 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2098 fwd_streams[i]->rx_port = fwd_ports_ids[i];
2099 fwd_streams[i]->rx_queue = 0;
2100 fwd_streams[i]->tx_port =
2101 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2102 fwd_streams[i]->tx_queue = 0;
2103 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2104 fwd_streams[i]->retry_enabled = retry_enabled;
2109 * For the RSS forwarding test all streams distributed over lcores. Each stream
2110 * being composed of a RX queue to poll on a RX port for input messages,
2111 * associated with a TX queue of a TX port where to send forwarded packets.
2114 rss_fwd_config_setup(void)
2125 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2126 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2127 cur_fwd_config.nb_fwd_streams =
2128 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2130 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2131 cur_fwd_config.nb_fwd_lcores =
2132 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2134 /* reinitialize forwarding streams */
2137 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2139 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2140 struct fwd_stream *fs;
2142 fs = fwd_streams[sm_id];
2143 txp = fwd_topology_tx_port_get(rxp);
2144 fs->rx_port = fwd_ports_ids[rxp];
2146 fs->tx_port = fwd_ports_ids[txp];
2148 fs->peer_addr = fs->tx_port;
2149 fs->retry_enabled = retry_enabled;
2151 if (rxp < nb_fwd_ports)
2159 * For the DCB forwarding test, each core is assigned on each traffic class.
2161 * Each core is assigned a multi-stream, each stream being composed of
2162 * a RX queue to poll on a RX port for input messages, associated with
2163 * a TX queue of a TX port where to send forwarded packets. All RX and
2164 * TX queues are mapping to the same traffic class.
2165 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2169 dcb_fwd_config_setup(void)
2171 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2172 portid_t txp, rxp = 0;
2173 queueid_t txq, rxq = 0;
2175 uint16_t nb_rx_queue, nb_tx_queue;
2176 uint16_t i, j, k, sm_id = 0;
2179 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2180 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2181 cur_fwd_config.nb_fwd_streams =
2182 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2184 /* reinitialize forwarding streams */
2188 /* get the dcb info on the first RX and TX ports */
2189 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2190 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2192 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2193 fwd_lcores[lc_id]->stream_nb = 0;
2194 fwd_lcores[lc_id]->stream_idx = sm_id;
2195 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2196 /* if the nb_queue is zero, means this tc is
2197 * not enabled on the POOL
2199 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2201 k = fwd_lcores[lc_id]->stream_nb +
2202 fwd_lcores[lc_id]->stream_idx;
2203 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2204 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2205 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2206 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2207 for (j = 0; j < nb_rx_queue; j++) {
2208 struct fwd_stream *fs;
2210 fs = fwd_streams[k + j];
2211 fs->rx_port = fwd_ports_ids[rxp];
2212 fs->rx_queue = rxq + j;
2213 fs->tx_port = fwd_ports_ids[txp];
2214 fs->tx_queue = txq + j % nb_tx_queue;
2215 fs->peer_addr = fs->tx_port;
2216 fs->retry_enabled = retry_enabled;
2218 fwd_lcores[lc_id]->stream_nb +=
2219 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2221 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2224 if (tc < rxp_dcb_info.nb_tcs)
2226 /* Restart from TC 0 on next RX port */
2228 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2230 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2233 if (rxp >= nb_fwd_ports)
2235 /* get the dcb information on next RX and TX ports */
2236 if ((rxp & 0x1) == 0)
2237 txp = (portid_t) (rxp + 1);
2239 txp = (portid_t) (rxp - 1);
2240 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2241 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2246 icmp_echo_config_setup(void)
2253 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2254 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2255 (nb_txq * nb_fwd_ports);
2257 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2258 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2259 cur_fwd_config.nb_fwd_streams =
2260 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2261 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2262 cur_fwd_config.nb_fwd_lcores =
2263 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2264 if (verbose_level > 0) {
2265 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2267 cur_fwd_config.nb_fwd_lcores,
2268 cur_fwd_config.nb_fwd_ports,
2269 cur_fwd_config.nb_fwd_streams);
2272 /* reinitialize forwarding streams */
2274 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2276 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2277 if (verbose_level > 0)
2278 printf(" core=%d: \n", lc_id);
2279 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2280 struct fwd_stream *fs;
2281 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2282 fs->rx_port = fwd_ports_ids[rxp];
2284 fs->tx_port = fs->rx_port;
2286 fs->peer_addr = fs->tx_port;
2287 fs->retry_enabled = retry_enabled;
2288 if (verbose_level > 0)
2289 printf(" stream=%d port=%d rxq=%d txq=%d\n",
2290 sm_id, fs->rx_port, fs->rx_queue,
2292 rxq = (queueid_t) (rxq + 1);
2293 if (rxq == nb_rxq) {
2295 rxp = (portid_t) (rxp + 1);
2301 #if defined RTE_LIBRTE_PMD_SOFTNIC
2303 softnic_fwd_config_setup(void)
2305 struct rte_port *port;
2306 portid_t pid, softnic_portid;
2308 uint8_t softnic_enable = 0;
2310 RTE_ETH_FOREACH_DEV(pid) {
2312 const char *driver = port->dev_info.driver_name;
2314 if (strcmp(driver, "net_softnic") == 0) {
2315 softnic_portid = pid;
2321 if (softnic_enable == 0) {
2322 printf("Softnic mode not configured(%s)!\n", __func__);
2326 cur_fwd_config.nb_fwd_ports = 1;
2327 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
2329 /* Re-initialize forwarding streams */
2333 * In the softnic forwarding test, the number of forwarding cores
2334 * is set to one and remaining are used for softnic packet processing.
2336 cur_fwd_config.nb_fwd_lcores = 1;
2337 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2339 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
2340 fwd_streams[i]->rx_port = softnic_portid;
2341 fwd_streams[i]->rx_queue = i;
2342 fwd_streams[i]->tx_port = softnic_portid;
2343 fwd_streams[i]->tx_queue = i;
2344 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2345 fwd_streams[i]->retry_enabled = retry_enabled;
2351 fwd_config_setup(void)
2353 cur_fwd_config.fwd_eng = cur_fwd_eng;
2354 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2355 icmp_echo_config_setup();
2359 #if defined RTE_LIBRTE_PMD_SOFTNIC
2360 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
2361 softnic_fwd_config_setup();
2366 if ((nb_rxq > 1) && (nb_txq > 1)){
2368 dcb_fwd_config_setup();
2370 rss_fwd_config_setup();
2373 simple_fwd_config_setup();
2377 mp_alloc_to_str(uint8_t mode)
2380 case MP_ALLOC_NATIVE:
2386 case MP_ALLOC_XMEM_HUGE:
2394 pkt_fwd_config_display(struct fwd_config *cfg)
2396 struct fwd_stream *fs;
2400 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2401 "NUMA support %s, MP allocation mode: %s\n",
2402 cfg->fwd_eng->fwd_mode_name,
2403 retry_enabled == 0 ? "" : " with retry",
2404 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2405 numa_support == 1 ? "enabled" : "disabled",
2406 mp_alloc_to_str(mp_alloc_type));
2409 printf("TX retry num: %u, delay between TX retries: %uus\n",
2410 burst_tx_retry_num, burst_tx_delay_time);
2411 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2412 printf("Logical Core %u (socket %u) forwards packets on "
2414 fwd_lcores_cpuids[lc_id],
2415 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2416 fwd_lcores[lc_id]->stream_nb);
2417 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2418 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2419 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
2420 "P=%d/Q=%d (socket %u) ",
2421 fs->rx_port, fs->rx_queue,
2422 ports[fs->rx_port].socket_id,
2423 fs->tx_port, fs->tx_queue,
2424 ports[fs->tx_port].socket_id);
2425 print_ethaddr("peer=",
2426 &peer_eth_addrs[fs->peer_addr]);
2434 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2436 struct rte_ether_addr new_peer_addr;
2437 if (!rte_eth_dev_is_valid_port(port_id)) {
2438 printf("Error: Invalid port number %i\n", port_id);
2441 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
2442 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2445 peer_eth_addrs[port_id] = new_peer_addr;
2449 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2452 unsigned int lcore_cpuid;
2457 for (i = 0; i < nb_lc; i++) {
2458 lcore_cpuid = lcorelist[i];
2459 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2460 printf("lcore %u not enabled\n", lcore_cpuid);
2463 if (lcore_cpuid == rte_get_master_lcore()) {
2464 printf("lcore %u cannot be masked on for running "
2465 "packet forwarding, which is the master lcore "
2466 "and reserved for command line parsing only\n",
2471 fwd_lcores_cpuids[i] = lcore_cpuid;
2473 if (record_now == 0) {
2477 nb_cfg_lcores = (lcoreid_t) nb_lc;
2478 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2479 printf("previous number of forwarding cores %u - changed to "
2480 "number of configured cores %u\n",
2481 (unsigned int) nb_fwd_lcores, nb_lc);
2482 nb_fwd_lcores = (lcoreid_t) nb_lc;
2489 set_fwd_lcores_mask(uint64_t lcoremask)
2491 unsigned int lcorelist[64];
2495 if (lcoremask == 0) {
2496 printf("Invalid NULL mask of cores\n");
2500 for (i = 0; i < 64; i++) {
2501 if (! ((uint64_t)(1ULL << i) & lcoremask))
2503 lcorelist[nb_lc++] = i;
2505 return set_fwd_lcores_list(lcorelist, nb_lc);
2509 set_fwd_lcores_number(uint16_t nb_lc)
2511 if (nb_lc > nb_cfg_lcores) {
2512 printf("nb fwd cores %u > %u (max. number of configured "
2513 "lcores) - ignored\n",
2514 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2517 nb_fwd_lcores = (lcoreid_t) nb_lc;
2518 printf("Number of forwarding cores set to %u\n",
2519 (unsigned int) nb_fwd_lcores);
2523 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2531 for (i = 0; i < nb_pt; i++) {
2532 port_id = (portid_t) portlist[i];
2533 if (port_id_is_invalid(port_id, ENABLED_WARN))
2536 fwd_ports_ids[i] = port_id;
2538 if (record_now == 0) {
2542 nb_cfg_ports = (portid_t) nb_pt;
2543 if (nb_fwd_ports != (portid_t) nb_pt) {
2544 printf("previous number of forwarding ports %u - changed to "
2545 "number of configured ports %u\n",
2546 (unsigned int) nb_fwd_ports, nb_pt);
2547 nb_fwd_ports = (portid_t) nb_pt;
2552 set_fwd_ports_mask(uint64_t portmask)
2554 unsigned int portlist[64];
2558 if (portmask == 0) {
2559 printf("Invalid NULL mask of ports\n");
2563 RTE_ETH_FOREACH_DEV(i) {
2564 if (! ((uint64_t)(1ULL << i) & portmask))
2566 portlist[nb_pt++] = i;
2568 set_fwd_ports_list(portlist, nb_pt);
2572 set_fwd_ports_number(uint16_t nb_pt)
2574 if (nb_pt > nb_cfg_ports) {
2575 printf("nb fwd ports %u > %u (number of configured "
2576 "ports) - ignored\n",
2577 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2580 nb_fwd_ports = (portid_t) nb_pt;
2581 printf("Number of forwarding ports set to %u\n",
2582 (unsigned int) nb_fwd_ports);
2586 port_is_forwarding(portid_t port_id)
2590 if (port_id_is_invalid(port_id, ENABLED_WARN))
2593 for (i = 0; i < nb_fwd_ports; i++) {
2594 if (fwd_ports_ids[i] == port_id)
2602 set_nb_pkt_per_burst(uint16_t nb)
2604 if (nb > MAX_PKT_BURST) {
2605 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2607 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2610 nb_pkt_per_burst = nb;
2611 printf("Number of packets per burst set to %u\n",
2612 (unsigned int) nb_pkt_per_burst);
2616 tx_split_get_name(enum tx_pkt_split split)
2620 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2621 if (tx_split_name[i].split == split)
2622 return tx_split_name[i].name;
2628 set_tx_pkt_split(const char *name)
2632 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2633 if (strcmp(tx_split_name[i].name, name) == 0) {
2634 tx_pkt_split = tx_split_name[i].split;
2638 printf("unknown value: \"%s\"\n", name);
2642 show_tx_pkt_segments(void)
2648 split = tx_split_get_name(tx_pkt_split);
2650 printf("Number of segments: %u\n", n);
2651 printf("Segment sizes: ");
2652 for (i = 0; i != n - 1; i++)
2653 printf("%hu,", tx_pkt_seg_lengths[i]);
2654 printf("%hu\n", tx_pkt_seg_lengths[i]);
2655 printf("Split packet: %s\n", split);
2659 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2661 uint16_t tx_pkt_len;
2664 if (nb_segs >= (unsigned) nb_txd) {
2665 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2666 nb_segs, (unsigned int) nb_txd);
2671 * Check that each segment length is greater or equal than
2672 * the mbuf data sise.
2673 * Check also that the total packet length is greater or equal than the
2674 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
2678 for (i = 0; i < nb_segs; i++) {
2679 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2680 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2681 i, seg_lengths[i], (unsigned) mbuf_data_size);
2684 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2686 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
2687 printf("total packet length=%u < %d - give up\n",
2688 (unsigned) tx_pkt_len,
2689 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
2693 for (i = 0; i < nb_segs; i++)
2694 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2696 tx_pkt_length = tx_pkt_len;
2697 tx_pkt_nb_segs = (uint8_t) nb_segs;
2701 setup_gro(const char *onoff, portid_t port_id)
2703 if (!rte_eth_dev_is_valid_port(port_id)) {
2704 printf("invalid port id %u\n", port_id);
2707 if (test_done == 0) {
2708 printf("Before enable/disable GRO,"
2709 " please stop forwarding first\n");
2712 if (strcmp(onoff, "on") == 0) {
2713 if (gro_ports[port_id].enable != 0) {
2714 printf("Port %u has enabled GRO. Please"
2715 " disable GRO first\n", port_id);
2718 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2719 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
2720 gro_ports[port_id].param.max_flow_num =
2721 GRO_DEFAULT_FLOW_NUM;
2722 gro_ports[port_id].param.max_item_per_flow =
2723 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
2725 gro_ports[port_id].enable = 1;
2727 if (gro_ports[port_id].enable == 0) {
2728 printf("Port %u has disabled GRO\n", port_id);
2731 gro_ports[port_id].enable = 0;
2736 setup_gro_flush_cycles(uint8_t cycles)
2738 if (test_done == 0) {
2739 printf("Before change flush interval for GRO,"
2740 " please stop forwarding first.\n");
2744 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
2745 GRO_DEFAULT_FLUSH_CYCLES) {
2746 printf("The flushing cycle be in the range"
2747 " of 1 to %u. Revert to the default"
2749 GRO_MAX_FLUSH_CYCLES,
2750 GRO_DEFAULT_FLUSH_CYCLES);
2751 cycles = GRO_DEFAULT_FLUSH_CYCLES;
2754 gro_flush_cycles = cycles;
2758 show_gro(portid_t port_id)
2760 struct rte_gro_param *param;
2761 uint32_t max_pkts_num;
2763 param = &gro_ports[port_id].param;
2765 if (!rte_eth_dev_is_valid_port(port_id)) {
2766 printf("Invalid port id %u.\n", port_id);
2769 if (gro_ports[port_id].enable) {
2770 printf("GRO type: TCP/IPv4\n");
2771 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2772 max_pkts_num = param->max_flow_num *
2773 param->max_item_per_flow;
2775 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
2776 printf("Max number of packets to perform GRO: %u\n",
2778 printf("Flushing cycles: %u\n", gro_flush_cycles);
2780 printf("Port %u doesn't enable GRO.\n", port_id);
2784 setup_gso(const char *mode, portid_t port_id)
2786 if (!rte_eth_dev_is_valid_port(port_id)) {
2787 printf("invalid port id %u\n", port_id);
2790 if (strcmp(mode, "on") == 0) {
2791 if (test_done == 0) {
2792 printf("before enabling GSO,"
2793 " please stop forwarding first\n");
2796 gso_ports[port_id].enable = 1;
2797 } else if (strcmp(mode, "off") == 0) {
2798 if (test_done == 0) {
2799 printf("before disabling GSO,"
2800 " please stop forwarding first\n");
2803 gso_ports[port_id].enable = 0;
2808 list_pkt_forwarding_modes(void)
2810 static char fwd_modes[128] = "";
2811 const char *separator = "|";
2812 struct fwd_engine *fwd_eng;
2815 if (strlen (fwd_modes) == 0) {
2816 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2817 strncat(fwd_modes, fwd_eng->fwd_mode_name,
2818 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2819 strncat(fwd_modes, separator,
2820 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2822 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2829 list_pkt_forwarding_retry_modes(void)
2831 static char fwd_modes[128] = "";
2832 const char *separator = "|";
2833 struct fwd_engine *fwd_eng;
2836 if (strlen(fwd_modes) == 0) {
2837 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2838 if (fwd_eng == &rx_only_engine)
2840 strncat(fwd_modes, fwd_eng->fwd_mode_name,
2842 strlen(fwd_modes) - 1);
2843 strncat(fwd_modes, separator,
2845 strlen(fwd_modes) - 1);
2847 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2854 set_pkt_forwarding_mode(const char *fwd_mode_name)
2856 struct fwd_engine *fwd_eng;
2860 while ((fwd_eng = fwd_engines[i]) != NULL) {
2861 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
2862 printf("Set %s packet forwarding mode%s\n",
2864 retry_enabled == 0 ? "" : " with retry");
2865 cur_fwd_eng = fwd_eng;
2870 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
2874 add_rx_dump_callbacks(portid_t portid)
2876 struct rte_eth_dev_info dev_info;
2880 if (port_id_is_invalid(portid, ENABLED_WARN))
2883 ret = eth_dev_info_get_print_err(portid, &dev_info);
2887 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
2888 if (!ports[portid].rx_dump_cb[queue])
2889 ports[portid].rx_dump_cb[queue] =
2890 rte_eth_add_rx_callback(portid, queue,
2891 dump_rx_pkts, NULL);
2895 add_tx_dump_callbacks(portid_t portid)
2897 struct rte_eth_dev_info dev_info;
2901 if (port_id_is_invalid(portid, ENABLED_WARN))
2904 ret = eth_dev_info_get_print_err(portid, &dev_info);
2908 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
2909 if (!ports[portid].tx_dump_cb[queue])
2910 ports[portid].tx_dump_cb[queue] =
2911 rte_eth_add_tx_callback(portid, queue,
2912 dump_tx_pkts, NULL);
2916 remove_rx_dump_callbacks(portid_t portid)
2918 struct rte_eth_dev_info dev_info;
2922 if (port_id_is_invalid(portid, ENABLED_WARN))
2925 ret = eth_dev_info_get_print_err(portid, &dev_info);
2929 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
2930 if (ports[portid].rx_dump_cb[queue]) {
2931 rte_eth_remove_rx_callback(portid, queue,
2932 ports[portid].rx_dump_cb[queue]);
2933 ports[portid].rx_dump_cb[queue] = NULL;
2938 remove_tx_dump_callbacks(portid_t portid)
2940 struct rte_eth_dev_info dev_info;
2944 if (port_id_is_invalid(portid, ENABLED_WARN))
2947 ret = eth_dev_info_get_print_err(portid, &dev_info);
2951 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
2952 if (ports[portid].tx_dump_cb[queue]) {
2953 rte_eth_remove_tx_callback(portid, queue,
2954 ports[portid].tx_dump_cb[queue]);
2955 ports[portid].tx_dump_cb[queue] = NULL;
2960 configure_rxtx_dump_callbacks(uint16_t verbose)
2964 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2965 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
2969 RTE_ETH_FOREACH_DEV(portid)
2971 if (verbose == 1 || verbose > 2)
2972 add_rx_dump_callbacks(portid);
2974 remove_rx_dump_callbacks(portid);
2976 add_tx_dump_callbacks(portid);
2978 remove_tx_dump_callbacks(portid);
2983 set_verbose_level(uint16_t vb_level)
2985 printf("Change verbose level from %u to %u\n",
2986 (unsigned int) verbose_level, (unsigned int) vb_level);
2987 verbose_level = vb_level;
2988 configure_rxtx_dump_callbacks(verbose_level);
2992 vlan_extend_set(portid_t port_id, int on)
2996 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2998 if (port_id_is_invalid(port_id, ENABLED_WARN))
3001 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3004 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
3005 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3007 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
3008 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3011 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3013 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
3014 "diag=%d\n", port_id, on, diag);
3015 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3019 rx_vlan_strip_set(portid_t port_id, int on)
3023 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3025 if (port_id_is_invalid(port_id, ENABLED_WARN))
3028 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3031 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
3032 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3034 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
3035 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3038 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3040 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
3041 "diag=%d\n", port_id, on, diag);
3042 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3046 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
3050 if (port_id_is_invalid(port_id, ENABLED_WARN))
3053 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
3055 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
3056 "diag=%d\n", port_id, queue_id, on, diag);
3060 rx_vlan_filter_set(portid_t port_id, int on)
3064 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3066 if (port_id_is_invalid(port_id, ENABLED_WARN))
3069 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3072 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
3073 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3075 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
3076 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3079 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3081 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
3082 "diag=%d\n", port_id, on, diag);
3083 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3087 rx_vlan_qinq_strip_set(portid_t port_id, int on)
3091 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3093 if (port_id_is_invalid(port_id, ENABLED_WARN))
3096 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3099 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
3100 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3102 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
3103 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3106 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3108 printf("%s(port_pi=%d, on=%d) failed "
3109 "diag=%d\n", __func__, port_id, on, diag);
3110 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3114 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
3118 if (port_id_is_invalid(port_id, ENABLED_WARN))
3120 if (vlan_id_is_invalid(vlan_id))
3122 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
3125 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
3127 port_id, vlan_id, on, diag);
3132 rx_vlan_all_filter_set(portid_t port_id, int on)
3136 if (port_id_is_invalid(port_id, ENABLED_WARN))
3138 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
3139 if (rx_vft_set(port_id, vlan_id, on))
3145 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
3149 if (port_id_is_invalid(port_id, ENABLED_WARN))
3152 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
3156 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
3158 port_id, vlan_type, tp_id, diag);
3162 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
3164 struct rte_eth_dev_info dev_info;
3167 if (port_id_is_invalid(port_id, ENABLED_WARN))
3169 if (vlan_id_is_invalid(vlan_id))
3172 if (ports[port_id].dev_conf.txmode.offloads &
3173 DEV_TX_OFFLOAD_QINQ_INSERT) {
3174 printf("Error, as QinQ has been enabled.\n");
3178 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3182 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
3183 printf("Error: vlan insert is not supported by port %d\n",
3188 tx_vlan_reset(port_id);
3189 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
3190 ports[port_id].tx_vlan_id = vlan_id;
3194 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
3196 struct rte_eth_dev_info dev_info;
3199 if (port_id_is_invalid(port_id, ENABLED_WARN))
3201 if (vlan_id_is_invalid(vlan_id))
3203 if (vlan_id_is_invalid(vlan_id_outer))
3206 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3210 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
3211 printf("Error: qinq insert not supported by port %d\n",
3216 tx_vlan_reset(port_id);
3217 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
3218 DEV_TX_OFFLOAD_QINQ_INSERT);
3219 ports[port_id].tx_vlan_id = vlan_id;
3220 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
3224 tx_vlan_reset(portid_t port_id)
3226 if (port_id_is_invalid(port_id, ENABLED_WARN))
3228 ports[port_id].dev_conf.txmode.offloads &=
3229 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
3230 DEV_TX_OFFLOAD_QINQ_INSERT);
3231 ports[port_id].tx_vlan_id = 0;
3232 ports[port_id].tx_vlan_id_outer = 0;
3236 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
3238 if (port_id_is_invalid(port_id, ENABLED_WARN))
3241 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
3245 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
3248 uint8_t existing_mapping_found = 0;
3250 if (port_id_is_invalid(port_id, ENABLED_WARN))
3253 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
3256 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
3257 printf("map_value not in required range 0..%d\n",
3258 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
3262 if (!is_rx) { /*then tx*/
3263 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3264 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3265 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
3266 tx_queue_stats_mappings[i].stats_counter_id = map_value;
3267 existing_mapping_found = 1;
3271 if (!existing_mapping_found) { /* A new additional mapping... */
3272 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
3273 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
3274 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
3275 nb_tx_queue_stats_mappings++;
3279 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3280 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3281 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
3282 rx_queue_stats_mappings[i].stats_counter_id = map_value;
3283 existing_mapping_found = 1;
3287 if (!existing_mapping_found) { /* A new additional mapping... */
3288 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
3289 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
3290 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
3291 nb_rx_queue_stats_mappings++;
3297 set_xstats_hide_zero(uint8_t on_off)
3299 xstats_hide_zero = on_off;
3303 print_fdir_mask(struct rte_eth_fdir_masks *mask)
3305 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
3307 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3308 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
3309 " tunnel_id: 0x%08x",
3310 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
3311 rte_be_to_cpu_32(mask->tunnel_id_mask));
3312 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3313 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
3314 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
3315 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
3317 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
3318 rte_be_to_cpu_16(mask->src_port_mask),
3319 rte_be_to_cpu_16(mask->dst_port_mask));
3321 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3322 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
3323 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
3324 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
3325 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
3327 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3328 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
3329 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
3330 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
3331 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
3338 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3340 struct rte_eth_flex_payload_cfg *cfg;
3343 for (i = 0; i < flex_conf->nb_payloads; i++) {
3344 cfg = &flex_conf->flex_set[i];
3345 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3347 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3348 printf("\n L2_PAYLOAD: ");
3349 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3350 printf("\n L3_PAYLOAD: ");
3351 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3352 printf("\n L4_PAYLOAD: ");
3354 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
3355 for (j = 0; j < num; j++)
3356 printf(" %-5u", cfg->src_offset[j]);
3362 flowtype_to_str(uint16_t flow_type)
3364 struct flow_type_info {
3370 static struct flow_type_info flowtype_str_table[] = {
3371 {"raw", RTE_ETH_FLOW_RAW},
3372 {"ipv4", RTE_ETH_FLOW_IPV4},
3373 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3374 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3375 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3376 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3377 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3378 {"ipv6", RTE_ETH_FLOW_IPV6},
3379 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3380 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3381 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3382 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3383 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3384 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3385 {"port", RTE_ETH_FLOW_PORT},
3386 {"vxlan", RTE_ETH_FLOW_VXLAN},
3387 {"geneve", RTE_ETH_FLOW_GENEVE},
3388 {"nvgre", RTE_ETH_FLOW_NVGRE},
3389 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
3392 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3393 if (flowtype_str_table[i].ftype == flow_type)
3394 return flowtype_str_table[i].str;
3401 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3403 struct rte_eth_fdir_flex_mask *mask;
3407 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3408 mask = &flex_conf->flex_mask[i];
3409 p = flowtype_to_str(mask->flow_type);
3410 printf("\n %s:\t", p ? p : "unknown");
3411 for (j = 0; j < num; j++)
3412 printf(" %02x", mask->mask[j]);
3418 print_fdir_flow_type(uint32_t flow_types_mask)
3423 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3424 if (!(flow_types_mask & (1 << i)))
3426 p = flowtype_to_str(i);
3436 fdir_get_infos(portid_t port_id)
3438 struct rte_eth_fdir_stats fdir_stat;
3439 struct rte_eth_fdir_info fdir_info;
3442 static const char *fdir_stats_border = "########################";
3444 if (port_id_is_invalid(port_id, ENABLED_WARN))
3446 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3448 printf("\n FDIR is not supported on port %-2d\n",
3453 memset(&fdir_info, 0, sizeof(fdir_info));
3454 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3455 RTE_ETH_FILTER_INFO, &fdir_info);
3456 memset(&fdir_stat, 0, sizeof(fdir_stat));
3457 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3458 RTE_ETH_FILTER_STATS, &fdir_stat);
3459 printf("\n %s FDIR infos for port %-2d %s\n",
3460 fdir_stats_border, port_id, fdir_stats_border);
3462 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3463 printf(" PERFECT\n");
3464 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3465 printf(" PERFECT-MAC-VLAN\n");
3466 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3467 printf(" PERFECT-TUNNEL\n");
3468 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3469 printf(" SIGNATURE\n");
3471 printf(" DISABLE\n");
3472 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3473 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3474 printf(" SUPPORTED FLOW TYPE: ");
3475 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3477 printf(" FLEX PAYLOAD INFO:\n");
3478 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
3479 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
3480 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
3481 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3482 fdir_info.flex_payload_unit,
3483 fdir_info.max_flex_payload_segment_num,
3484 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3486 print_fdir_mask(&fdir_info.mask);
3487 if (fdir_info.flex_conf.nb_payloads > 0) {
3488 printf(" FLEX PAYLOAD SRC OFFSET:");
3489 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3491 if (fdir_info.flex_conf.nb_flexmasks > 0) {
3492 printf(" FLEX MASK CFG:");
3493 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3495 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
3496 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3497 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
3498 fdir_info.guarant_spc, fdir_info.best_spc);
3499 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
3500 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
3501 " add: %-10"PRIu64" remove: %"PRIu64"\n"
3502 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
3503 fdir_stat.collision, fdir_stat.free,
3504 fdir_stat.maxhash, fdir_stat.maxlen,
3505 fdir_stat.add, fdir_stat.remove,
3506 fdir_stat.f_add, fdir_stat.f_remove);
3507 printf(" %s############################%s\n",
3508 fdir_stats_border, fdir_stats_border);
3512 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3514 struct rte_port *port;
3515 struct rte_eth_fdir_flex_conf *flex_conf;
3518 port = &ports[port_id];
3519 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3520 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3521 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3526 if (i >= RTE_ETH_FLOW_MAX) {
3527 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3528 idx = flex_conf->nb_flexmasks;
3529 flex_conf->nb_flexmasks++;
3531 printf("The flex mask table is full. Can not set flex"
3532 " mask for flow_type(%u).", cfg->flow_type);
3536 rte_memcpy(&flex_conf->flex_mask[idx],
3538 sizeof(struct rte_eth_fdir_flex_mask));
3542 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3544 struct rte_port *port;
3545 struct rte_eth_fdir_flex_conf *flex_conf;
3548 port = &ports[port_id];
3549 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3550 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3551 if (cfg->type == flex_conf->flex_set[i].type) {
3556 if (i >= RTE_ETH_PAYLOAD_MAX) {
3557 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3558 idx = flex_conf->nb_payloads;
3559 flex_conf->nb_payloads++;
3561 printf("The flex payload table is full. Can not set"
3562 " flex payload for type(%u).", cfg->type);
3566 rte_memcpy(&flex_conf->flex_set[idx],
3568 sizeof(struct rte_eth_flex_payload_cfg));
3573 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3575 #ifdef RTE_LIBRTE_IXGBE_PMD
3579 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3581 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3585 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3586 is_rx ? "rx" : "tx", port_id, diag);
3589 printf("VF %s setting not supported for port %d\n",
3590 is_rx ? "Rx" : "Tx", port_id);
3596 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3599 struct rte_eth_link link;
3602 if (port_id_is_invalid(port_id, ENABLED_WARN))
3604 ret = eth_link_get_nowait_print_err(port_id, &link);
3607 if (rate > link.link_speed) {
3608 printf("Invalid rate value:%u bigger than link speed: %u\n",
3609 rate, link.link_speed);
3612 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3615 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3621 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3623 int diag = -ENOTSUP;
3627 RTE_SET_USED(q_msk);
3629 #ifdef RTE_LIBRTE_IXGBE_PMD
3630 if (diag == -ENOTSUP)
3631 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3634 #ifdef RTE_LIBRTE_BNXT_PMD
3635 if (diag == -ENOTSUP)
3636 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3641 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3647 * Functions to manage the set of filtered Multicast MAC addresses.
3649 * A pool of filtered multicast MAC addresses is associated with each port.
3650 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3651 * The address of the pool and the number of valid multicast MAC addresses
3652 * recorded in the pool are stored in the fields "mc_addr_pool" and
3653 * "mc_addr_nb" of the "rte_port" data structure.
3655 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3656 * to be supplied a contiguous array of multicast MAC addresses.
3657 * To comply with this constraint, the set of multicast addresses recorded
3658 * into the pool are systematically compacted at the beginning of the pool.
3659 * Hence, when a multicast address is removed from the pool, all following
3660 * addresses, if any, are copied back to keep the set contiguous.
3662 #define MCAST_POOL_INC 32
3665 mcast_addr_pool_extend(struct rte_port *port)
3667 struct rte_ether_addr *mc_pool;
3668 size_t mc_pool_size;
3671 * If a free entry is available at the end of the pool, just
3672 * increment the number of recorded multicast addresses.
3674 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3680 * [re]allocate a pool with MCAST_POOL_INC more entries.
3681 * The previous test guarantees that port->mc_addr_nb is a multiple
3682 * of MCAST_POOL_INC.
3684 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
3686 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
3688 if (mc_pool == NULL) {
3689 printf("allocation of pool of %u multicast addresses failed\n",
3690 port->mc_addr_nb + MCAST_POOL_INC);
3694 port->mc_addr_pool = mc_pool;
3701 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3704 if (addr_idx == port->mc_addr_nb) {
3705 /* No need to recompact the set of multicast addressses. */
3706 if (port->mc_addr_nb == 0) {
3707 /* free the pool of multicast addresses. */
3708 free(port->mc_addr_pool);
3709 port->mc_addr_pool = NULL;
3713 memmove(&port->mc_addr_pool[addr_idx],
3714 &port->mc_addr_pool[addr_idx + 1],
3715 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
3719 eth_port_multicast_addr_list_set(portid_t port_id)
3721 struct rte_port *port;
3724 port = &ports[port_id];
3725 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3729 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3730 port->mc_addr_nb, port_id, -diag);
3734 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
3736 struct rte_port *port;
3739 if (port_id_is_invalid(port_id, ENABLED_WARN))
3742 port = &ports[port_id];
3745 * Check that the added multicast MAC address is not already recorded
3746 * in the pool of multicast addresses.
3748 for (i = 0; i < port->mc_addr_nb; i++) {
3749 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3750 printf("multicast address already filtered by port\n");
3755 if (mcast_addr_pool_extend(port) != 0)
3757 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
3758 eth_port_multicast_addr_list_set(port_id);
3762 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
3764 struct rte_port *port;
3767 if (port_id_is_invalid(port_id, ENABLED_WARN))
3770 port = &ports[port_id];
3773 * Search the pool of multicast MAC addresses for the removed address.
3775 for (i = 0; i < port->mc_addr_nb; i++) {
3776 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3779 if (i == port->mc_addr_nb) {
3780 printf("multicast address not filtered by port %d\n", port_id);
3784 mcast_addr_pool_remove(port, i);
3785 eth_port_multicast_addr_list_set(port_id);
3789 port_dcb_info_display(portid_t port_id)
3791 struct rte_eth_dcb_info dcb_info;
3794 static const char *border = "================";
3796 if (port_id_is_invalid(port_id, ENABLED_WARN))
3799 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3801 printf("\n Failed to get dcb infos on port %-2d\n",
3805 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
3806 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
3808 for (i = 0; i < dcb_info.nb_tcs; i++)
3810 printf("\n Priority : ");
3811 for (i = 0; i < dcb_info.nb_tcs; i++)
3812 printf("\t%4d", dcb_info.prio_tc[i]);
3813 printf("\n BW percent :");
3814 for (i = 0; i < dcb_info.nb_tcs; i++)
3815 printf("\t%4d%%", dcb_info.tc_bws[i]);
3816 printf("\n RXQ base : ");
3817 for (i = 0; i < dcb_info.nb_tcs; i++)
3818 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
3819 printf("\n RXQ number :");
3820 for (i = 0; i < dcb_info.nb_tcs; i++)
3821 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
3822 printf("\n TXQ base : ");
3823 for (i = 0; i < dcb_info.nb_tcs; i++)
3824 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
3825 printf("\n TXQ number :");
3826 for (i = 0; i < dcb_info.nb_tcs; i++)
3827 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
3832 open_file(const char *file_path, uint32_t *size)
3834 int fd = open(file_path, O_RDONLY);
3836 uint8_t *buf = NULL;
3844 printf("%s: Failed to open %s\n", __func__, file_path);
3848 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
3850 printf("%s: File operations failed\n", __func__);
3854 pkg_size = st_buf.st_size;
3857 printf("%s: File operations failed\n", __func__);
3861 buf = (uint8_t *)malloc(pkg_size);
3864 printf("%s: Failed to malloc memory\n", __func__);
3868 ret = read(fd, buf, pkg_size);
3871 printf("%s: File read operation failed\n", __func__);
3885 save_file(const char *file_path, uint8_t *buf, uint32_t size)
3887 FILE *fh = fopen(file_path, "wb");
3890 printf("%s: Failed to open %s\n", __func__, file_path);
3894 if (fwrite(buf, 1, size, fh) != size) {
3896 printf("%s: File write operation failed\n", __func__);
3906 close_file(uint8_t *buf)
3917 port_queue_region_info_display(portid_t port_id, void *buf)
3919 #ifdef RTE_LIBRTE_I40E_PMD
3921 struct rte_pmd_i40e_queue_regions *info =
3922 (struct rte_pmd_i40e_queue_regions *)buf;
3923 static const char *queue_region_info_stats_border = "-------";
3925 if (!info->queue_region_number)
3926 printf("there is no region has been set before");
3928 printf("\n %s All queue region info for port=%2d %s",
3929 queue_region_info_stats_border, port_id,
3930 queue_region_info_stats_border);
3931 printf("\n queue_region_number: %-14u \n",
3932 info->queue_region_number);
3934 for (i = 0; i < info->queue_region_number; i++) {
3935 printf("\n region_id: %-14u queue_number: %-14u "
3936 "queue_start_index: %-14u \n",
3937 info->region[i].region_id,
3938 info->region[i].queue_num,
3939 info->region[i].queue_start_index);
3941 printf(" user_priority_num is %-14u :",
3942 info->region[i].user_priority_num);
3943 for (j = 0; j < info->region[i].user_priority_num; j++)
3944 printf(" %-14u ", info->region[i].user_priority[j]);
3946 printf("\n flowtype_num is %-14u :",
3947 info->region[i].flowtype_num);
3948 for (j = 0; j < info->region[i].flowtype_num; j++)
3949 printf(" %-14u ", info->region[i].hw_flowtype[j]);
3952 RTE_SET_USED(port_id);