1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
42 #include <rte_errno.h>
44 #include <rte_pmd_ixgbe.h>
47 #include <rte_pmd_i40e.h>
50 #include <rte_pmd_bnxt.h>
53 #include <rte_hexdump.h>
56 #include "cmdline_mtr.h"
58 #define ETHDEV_FWVERS_LEN 32
60 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
63 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
66 #define NS_PER_SEC 1E9
68 static char *flowtype_to_str(uint16_t flow_type);
71 enum tx_pkt_split split;
75 .split = TX_PKT_SPLIT_OFF,
79 .split = TX_PKT_SPLIT_ON,
83 .split = TX_PKT_SPLIT_RND,
88 const struct rss_type_info rss_type_table[] = {
89 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
90 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
91 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
92 ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
94 { "eth", ETH_RSS_ETH },
95 { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
96 { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
97 { "vlan", ETH_RSS_VLAN },
98 { "s-vlan", ETH_RSS_S_VLAN },
99 { "c-vlan", ETH_RSS_C_VLAN },
100 { "ipv4", ETH_RSS_IPV4 },
101 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
102 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
103 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
104 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
105 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
106 { "ipv6", ETH_RSS_IPV6 },
107 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
108 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
109 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
110 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
111 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
112 { "l2-payload", ETH_RSS_L2_PAYLOAD },
113 { "ipv6-ex", ETH_RSS_IPV6_EX },
114 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
115 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
116 { "port", ETH_RSS_PORT },
117 { "vxlan", ETH_RSS_VXLAN },
118 { "geneve", ETH_RSS_GENEVE },
119 { "nvgre", ETH_RSS_NVGRE },
120 { "ip", ETH_RSS_IP },
121 { "udp", ETH_RSS_UDP },
122 { "tcp", ETH_RSS_TCP },
123 { "sctp", ETH_RSS_SCTP },
124 { "tunnel", ETH_RSS_TUNNEL },
125 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
126 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
127 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
128 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
129 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
130 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
131 { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
132 { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
133 { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
134 { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
135 { "esp", ETH_RSS_ESP },
136 { "ah", ETH_RSS_AH },
137 { "l2tpv3", ETH_RSS_L2TPV3 },
138 { "pfcp", ETH_RSS_PFCP },
139 { "pppoe", ETH_RSS_PPPOE },
140 { "gtpu", ETH_RSS_GTPU },
141 { "ecpri", ETH_RSS_ECPRI },
142 { "mpls", ETH_RSS_MPLS },
146 static const struct {
147 enum rte_eth_fec_mode mode;
149 } fec_mode_name[] = {
151 .mode = RTE_ETH_FEC_NOFEC,
155 .mode = RTE_ETH_FEC_AUTO,
159 .mode = RTE_ETH_FEC_BASER,
163 .mode = RTE_ETH_FEC_RS,
169 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
171 char buf[RTE_ETHER_ADDR_FMT_SIZE];
172 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
173 printf("%s%s", name, buf);
177 nic_stats_display(portid_t port_id)
179 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
180 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
181 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
182 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
183 static uint64_t prev_ns[RTE_MAX_ETHPORTS];
184 struct timespec cur_time;
185 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
187 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
188 struct rte_eth_stats stats;
190 static const char *nic_stats_border = "########################";
192 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
196 rte_eth_stats_get(port_id, &stats);
197 printf("\n %s NIC statistics for port %-2d %s\n",
198 nic_stats_border, port_id, nic_stats_border);
200 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
201 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes);
202 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
203 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf);
204 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
205 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes);
208 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
211 ns = cur_time.tv_sec * NS_PER_SEC;
212 ns += cur_time.tv_nsec;
214 if (prev_ns[port_id] != 0)
215 diff_ns = ns - prev_ns[port_id];
216 prev_ns[port_id] = ns;
219 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
220 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
221 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
222 (stats.opackets - prev_pkts_tx[port_id]) : 0;
223 prev_pkts_rx[port_id] = stats.ipackets;
224 prev_pkts_tx[port_id] = stats.opackets;
225 mpps_rx = diff_ns > 0 ?
226 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
227 mpps_tx = diff_ns > 0 ?
228 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
230 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
231 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
232 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
233 (stats.obytes - prev_bytes_tx[port_id]) : 0;
234 prev_bytes_rx[port_id] = stats.ibytes;
235 prev_bytes_tx[port_id] = stats.obytes;
236 mbps_rx = diff_ns > 0 ?
237 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
238 mbps_tx = diff_ns > 0 ?
239 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
241 printf("\n Throughput (since last show)\n");
242 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
243 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
244 mpps_tx, mbps_tx * 8);
246 printf(" %s############################%s\n",
247 nic_stats_border, nic_stats_border);
251 nic_stats_clear(portid_t port_id)
255 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
260 ret = rte_eth_stats_reset(port_id);
262 printf("%s: Error: failed to reset stats (port %u): %s",
263 __func__, port_id, strerror(-ret));
267 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
271 printf("%s: Error: failed to get stats (port %u): %s",
272 __func__, port_id, strerror(ret));
275 printf("\n NIC statistics for port %d cleared\n", port_id);
279 nic_xstats_display(portid_t port_id)
281 struct rte_eth_xstat *xstats;
282 int cnt_xstats, idx_xstat;
283 struct rte_eth_xstat_name *xstats_names;
285 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
289 printf("###### NIC extended statistics for port %-2d\n", port_id);
290 if (!rte_eth_dev_is_valid_port(port_id)) {
291 printf("Error: Invalid port number %i\n", port_id);
296 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
297 if (cnt_xstats < 0) {
298 printf("Error: Cannot get count of xstats\n");
302 /* Get id-name lookup table */
303 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
304 if (xstats_names == NULL) {
305 printf("Cannot allocate memory for xstats lookup\n");
308 if (cnt_xstats != rte_eth_xstats_get_names(
309 port_id, xstats_names, cnt_xstats)) {
310 printf("Error: Cannot get xstats lookup\n");
315 /* Get stats themselves */
316 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
317 if (xstats == NULL) {
318 printf("Cannot allocate memory for xstats\n");
322 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
323 printf("Error: Unable to get xstats\n");
330 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
331 if (xstats_hide_zero && !xstats[idx_xstat].value)
333 printf("%s: %"PRIu64"\n",
334 xstats_names[idx_xstat].name,
335 xstats[idx_xstat].value);
342 nic_xstats_clear(portid_t port_id)
346 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
351 ret = rte_eth_xstats_reset(port_id);
353 printf("%s: Error: failed to reset xstats (port %u): %s",
354 __func__, port_id, strerror(-ret));
358 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
362 printf("%s: Error: failed to get stats (port %u): %s",
363 __func__, port_id, strerror(ret));
369 get_queue_state_name(uint8_t queue_state)
371 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED)
373 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED)
375 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN)
382 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
384 struct rte_eth_burst_mode mode;
385 struct rte_eth_rxq_info qinfo;
387 static const char *info_border = "*********************";
389 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
391 printf("Failed to retrieve information for port: %u, "
392 "RX queue: %hu\nerror desc: %s(%d)\n",
393 port_id, queue_id, strerror(-rc), rc);
397 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
398 info_border, port_id, queue_id, info_border);
400 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
401 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
402 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
403 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
404 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
405 printf("\nRX drop packets: %s",
406 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
407 printf("\nRX deferred start: %s",
408 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
409 printf("\nRX scattered packets: %s",
410 (qinfo.scattered_rx != 0) ? "on" : "off");
411 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state));
412 if (qinfo.rx_buf_size != 0)
413 printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
414 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
416 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
417 printf("\nBurst mode: %s%s",
419 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
420 " (per queue)" : "");
426 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
428 struct rte_eth_burst_mode mode;
429 struct rte_eth_txq_info qinfo;
431 static const char *info_border = "*********************";
433 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
435 printf("Failed to retrieve information for port: %u, "
436 "TX queue: %hu\nerror desc: %s(%d)\n",
437 port_id, queue_id, strerror(-rc), rc);
441 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
442 info_border, port_id, queue_id, info_border);
444 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
445 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
446 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
447 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
448 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
449 printf("\nTX deferred start: %s",
450 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
451 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
452 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state));
454 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
455 printf("\nBurst mode: %s%s",
457 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
458 " (per queue)" : "");
463 static int bus_match_all(const struct rte_bus *bus, const void *data)
471 device_infos_display_speeds(uint32_t speed_capa)
473 printf("\n\tDevice speed capability:");
474 if (speed_capa == ETH_LINK_SPEED_AUTONEG)
475 printf(" Autonegotiate (all speeds)");
476 if (speed_capa & ETH_LINK_SPEED_FIXED)
477 printf(" Disable autonegotiate (fixed speed) ");
478 if (speed_capa & ETH_LINK_SPEED_10M_HD)
479 printf(" 10 Mbps half-duplex ");
480 if (speed_capa & ETH_LINK_SPEED_10M)
481 printf(" 10 Mbps full-duplex ");
482 if (speed_capa & ETH_LINK_SPEED_100M_HD)
483 printf(" 100 Mbps half-duplex ");
484 if (speed_capa & ETH_LINK_SPEED_100M)
485 printf(" 100 Mbps full-duplex ");
486 if (speed_capa & ETH_LINK_SPEED_1G)
488 if (speed_capa & ETH_LINK_SPEED_2_5G)
489 printf(" 2.5 Gbps ");
490 if (speed_capa & ETH_LINK_SPEED_5G)
492 if (speed_capa & ETH_LINK_SPEED_10G)
494 if (speed_capa & ETH_LINK_SPEED_20G)
496 if (speed_capa & ETH_LINK_SPEED_25G)
498 if (speed_capa & ETH_LINK_SPEED_40G)
500 if (speed_capa & ETH_LINK_SPEED_50G)
502 if (speed_capa & ETH_LINK_SPEED_56G)
504 if (speed_capa & ETH_LINK_SPEED_100G)
505 printf(" 100 Gbps ");
506 if (speed_capa & ETH_LINK_SPEED_200G)
507 printf(" 200 Gbps ");
511 device_infos_display(const char *identifier)
513 static const char *info_border = "*********************";
514 struct rte_bus *start = NULL, *next;
515 struct rte_dev_iterator dev_iter;
516 char name[RTE_ETH_NAME_MAX_LEN];
517 struct rte_ether_addr mac_addr;
518 struct rte_device *dev;
519 struct rte_devargs da;
521 struct rte_eth_dev_info dev_info;
524 memset(&da, 0, sizeof(da));
528 if (rte_devargs_parsef(&da, "%s", identifier)) {
529 printf("cannot parse identifier\n");
534 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
537 if (identifier && da.bus != next)
540 /* Skip buses that don't have iterate method */
541 if (!next->dev_iterate)
544 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
545 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
549 /* Check for matching device if identifier is present */
551 strncmp(da.name, dev->name, strlen(dev->name)))
553 printf("\n%s Infos for device %s %s\n",
554 info_border, dev->name, info_border);
555 printf("Bus name: %s", dev->bus->name);
556 printf("\nDriver name: %s", dev->driver->name);
557 printf("\nDevargs: %s",
558 dev->devargs ? dev->devargs->args : "");
559 printf("\nConnect to socket: %d", dev->numa_node);
562 /* List ports with matching device name */
563 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
564 printf("\n\tPort id: %-2d", port_id);
565 if (eth_macaddr_get_print_err(port_id,
567 print_ethaddr("\n\tMAC address: ",
569 rte_eth_dev_get_name_by_port(port_id, name);
570 printf("\n\tDevice name: %s", name);
571 if (rte_eth_dev_info_get(port_id, &dev_info) == 0)
572 device_infos_display_speeds(dev_info.speed_capa);
577 rte_devargs_reset(&da);
581 port_infos_display(portid_t port_id)
583 struct rte_port *port;
584 struct rte_ether_addr mac_addr;
585 struct rte_eth_link link;
586 struct rte_eth_dev_info dev_info;
588 struct rte_mempool * mp;
589 static const char *info_border = "*********************";
591 char name[RTE_ETH_NAME_MAX_LEN];
593 char fw_version[ETHDEV_FWVERS_LEN];
595 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
599 port = &ports[port_id];
600 ret = eth_link_get_nowait_print_err(port_id, &link);
604 ret = eth_dev_info_get_print_err(port_id, &dev_info);
608 printf("\n%s Infos for port %-2d %s\n",
609 info_border, port_id, info_border);
610 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
611 print_ethaddr("MAC address: ", &mac_addr);
612 rte_eth_dev_get_name_by_port(port_id, name);
613 printf("\nDevice name: %s", name);
614 printf("\nDriver name: %s", dev_info.driver_name);
616 if (rte_eth_dev_fw_version_get(port_id, fw_version,
617 ETHDEV_FWVERS_LEN) == 0)
618 printf("\nFirmware-version: %s", fw_version);
620 printf("\nFirmware-version: %s", "not available");
622 if (dev_info.device->devargs && dev_info.device->devargs->args)
623 printf("\nDevargs: %s", dev_info.device->devargs->args);
624 printf("\nConnect to socket: %u", port->socket_id);
626 if (port_numa[port_id] != NUMA_NO_CONFIG) {
627 mp = mbuf_pool_find(port_numa[port_id], 0);
629 printf("\nmemory allocation on the socket: %d",
632 printf("\nmemory allocation on the socket: %u",port->socket_id);
634 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
635 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
636 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
637 ("full-duplex") : ("half-duplex"));
638 printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
641 if (!rte_eth_dev_get_mtu(port_id, &mtu))
642 printf("MTU: %u\n", mtu);
644 printf("Promiscuous mode: %s\n",
645 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
646 printf("Allmulticast mode: %s\n",
647 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
648 printf("Maximum number of MAC addresses: %u\n",
649 (unsigned int)(port->dev_info.max_mac_addrs));
650 printf("Maximum number of MAC addresses of hash filtering: %u\n",
651 (unsigned int)(port->dev_info.max_hash_mac_addrs));
653 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
654 if (vlan_offload >= 0){
655 printf("VLAN offload: \n");
656 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
657 printf(" strip on, ");
659 printf(" strip off, ");
661 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
662 printf("filter on, ");
664 printf("filter off, ");
666 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
667 printf("extend on, ");
669 printf("extend off, ");
671 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
672 printf("qinq strip on\n");
674 printf("qinq strip off\n");
677 if (dev_info.hash_key_size > 0)
678 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
679 if (dev_info.reta_size > 0)
680 printf("Redirection table size: %u\n", dev_info.reta_size);
681 if (!dev_info.flow_type_rss_offloads)
682 printf("No RSS offload flow type is supported.\n");
687 printf("Supported RSS offload flow types:\n");
688 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
689 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
690 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
692 p = flowtype_to_str(i);
696 printf(" user defined %d\n", i);
700 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
701 printf("Maximum configurable length of RX packet: %u\n",
702 dev_info.max_rx_pktlen);
703 printf("Maximum configurable size of LRO aggregated packet: %u\n",
704 dev_info.max_lro_pkt_size);
705 if (dev_info.max_vfs)
706 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
707 if (dev_info.max_vmdq_pools)
708 printf("Maximum number of VMDq pools: %u\n",
709 dev_info.max_vmdq_pools);
711 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
712 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
713 printf("Max possible number of RXDs per queue: %hu\n",
714 dev_info.rx_desc_lim.nb_max);
715 printf("Min possible number of RXDs per queue: %hu\n",
716 dev_info.rx_desc_lim.nb_min);
717 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
719 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
720 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
721 printf("Max possible number of TXDs per queue: %hu\n",
722 dev_info.tx_desc_lim.nb_max);
723 printf("Min possible number of TXDs per queue: %hu\n",
724 dev_info.tx_desc_lim.nb_min);
725 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
726 printf("Max segment number per packet: %hu\n",
727 dev_info.tx_desc_lim.nb_seg_max);
728 printf("Max segment number per MTU/TSO: %hu\n",
729 dev_info.tx_desc_lim.nb_mtu_seg_max);
731 /* Show switch info only if valid switch domain and port id is set */
732 if (dev_info.switch_info.domain_id !=
733 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
734 if (dev_info.switch_info.name)
735 printf("Switch name: %s\n", dev_info.switch_info.name);
737 printf("Switch domain Id: %u\n",
738 dev_info.switch_info.domain_id);
739 printf("Switch Port Id: %u\n",
740 dev_info.switch_info.port_id);
745 port_summary_header_display(void)
747 uint16_t port_number;
749 port_number = rte_eth_dev_count_avail();
750 printf("Number of available ports: %i\n", port_number);
751 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
752 "Driver", "Status", "Link");
756 port_summary_display(portid_t port_id)
758 struct rte_ether_addr mac_addr;
759 struct rte_eth_link link;
760 struct rte_eth_dev_info dev_info;
761 char name[RTE_ETH_NAME_MAX_LEN];
764 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
769 ret = eth_link_get_nowait_print_err(port_id, &link);
773 ret = eth_dev_info_get_print_err(port_id, &dev_info);
777 rte_eth_dev_get_name_by_port(port_id, name);
778 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
782 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n",
783 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
784 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
785 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
786 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
787 rte_eth_link_speed_to_str(link.link_speed));
791 port_eeprom_display(portid_t port_id)
793 struct rte_dev_eeprom_info einfo;
795 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
800 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
801 if (len_eeprom < 0) {
802 switch (len_eeprom) {
804 printf("port index %d invalid\n", port_id);
807 printf("operation not supported by device\n");
810 printf("device is removed\n");
813 printf("Unable to get EEPROM: %d\n", len_eeprom);
819 char buf[len_eeprom];
821 einfo.length = len_eeprom;
824 ret = rte_eth_dev_get_eeprom(port_id, &einfo);
828 printf("port index %d invalid\n", port_id);
831 printf("operation not supported by device\n");
834 printf("device is removed\n");
837 printf("Unable to get EEPROM: %d\n", ret);
842 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
843 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
847 port_module_eeprom_display(portid_t port_id)
849 struct rte_eth_dev_module_info minfo;
850 struct rte_dev_eeprom_info einfo;
853 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
859 ret = rte_eth_dev_get_module_info(port_id, &minfo);
863 printf("port index %d invalid\n", port_id);
866 printf("operation not supported by device\n");
869 printf("device is removed\n");
872 printf("Unable to get module EEPROM: %d\n", ret);
878 char buf[minfo.eeprom_len];
880 einfo.length = minfo.eeprom_len;
883 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
887 printf("port index %d invalid\n", port_id);
890 printf("operation not supported by device\n");
893 printf("device is removed\n");
896 printf("Unable to get module EEPROM: %d\n", ret);
902 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
903 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
907 port_id_is_invalid(portid_t port_id, enum print_warning warning)
911 if (port_id == (portid_t)RTE_PORT_ALL)
914 RTE_ETH_FOREACH_DEV(pid)
918 if (warning == ENABLED_WARN)
919 printf("Invalid port %d\n", port_id);
924 void print_valid_ports(void)
928 printf("The valid ports array is [");
929 RTE_ETH_FOREACH_DEV(pid) {
936 vlan_id_is_invalid(uint16_t vlan_id)
940 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
945 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
947 const struct rte_pci_device *pci_dev;
948 const struct rte_bus *bus;
952 printf("Port register offset 0x%X not aligned on a 4-byte "
958 if (!ports[port_id].dev_info.device) {
959 printf("Invalid device\n");
963 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
964 if (bus && !strcmp(bus->name, "pci")) {
965 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
967 printf("Not a PCI device\n");
971 pci_len = pci_dev->mem_resource[0].len;
972 if (reg_off >= pci_len) {
973 printf("Port %d: register offset %u (0x%X) out of port PCI "
974 "resource (length=%"PRIu64")\n",
975 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
982 reg_bit_pos_is_invalid(uint8_t bit_pos)
986 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
990 #define display_port_and_reg_off(port_id, reg_off) \
991 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
994 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
996 display_port_and_reg_off(port_id, (unsigned)reg_off);
997 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1001 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1006 if (port_id_is_invalid(port_id, ENABLED_WARN))
1008 if (port_reg_off_is_invalid(port_id, reg_off))
1010 if (reg_bit_pos_is_invalid(bit_x))
1012 reg_v = port_id_pci_reg_read(port_id, reg_off);
1013 display_port_and_reg_off(port_id, (unsigned)reg_off);
1014 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1018 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1019 uint8_t bit1_pos, uint8_t bit2_pos)
1025 if (port_id_is_invalid(port_id, ENABLED_WARN))
1027 if (port_reg_off_is_invalid(port_id, reg_off))
1029 if (reg_bit_pos_is_invalid(bit1_pos))
1031 if (reg_bit_pos_is_invalid(bit2_pos))
1033 if (bit1_pos > bit2_pos)
1034 l_bit = bit2_pos, h_bit = bit1_pos;
1036 l_bit = bit1_pos, h_bit = bit2_pos;
1038 reg_v = port_id_pci_reg_read(port_id, reg_off);
1041 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1042 display_port_and_reg_off(port_id, (unsigned)reg_off);
1043 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1044 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1048 port_reg_display(portid_t port_id, uint32_t reg_off)
1052 if (port_id_is_invalid(port_id, ENABLED_WARN))
1054 if (port_reg_off_is_invalid(port_id, reg_off))
1056 reg_v = port_id_pci_reg_read(port_id, reg_off);
1057 display_port_reg_value(port_id, reg_off, reg_v);
1061 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1066 if (port_id_is_invalid(port_id, ENABLED_WARN))
1068 if (port_reg_off_is_invalid(port_id, reg_off))
1070 if (reg_bit_pos_is_invalid(bit_pos))
1073 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1076 reg_v = port_id_pci_reg_read(port_id, reg_off);
1078 reg_v &= ~(1 << bit_pos);
1080 reg_v |= (1 << bit_pos);
1081 port_id_pci_reg_write(port_id, reg_off, reg_v);
1082 display_port_reg_value(port_id, reg_off, reg_v);
1086 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1087 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1094 if (port_id_is_invalid(port_id, ENABLED_WARN))
1096 if (port_reg_off_is_invalid(port_id, reg_off))
1098 if (reg_bit_pos_is_invalid(bit1_pos))
1100 if (reg_bit_pos_is_invalid(bit2_pos))
1102 if (bit1_pos > bit2_pos)
1103 l_bit = bit2_pos, h_bit = bit1_pos;
1105 l_bit = bit1_pos, h_bit = bit2_pos;
1107 if ((h_bit - l_bit) < 31)
1108 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1112 if (value > max_v) {
1113 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1114 (unsigned)value, (unsigned)value,
1115 (unsigned)max_v, (unsigned)max_v);
1118 reg_v = port_id_pci_reg_read(port_id, reg_off);
1119 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1120 reg_v |= (value << l_bit); /* Set changed bits */
1121 port_id_pci_reg_write(port_id, reg_off, reg_v);
1122 display_port_reg_value(port_id, reg_off, reg_v);
1126 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1128 if (port_id_is_invalid(port_id, ENABLED_WARN))
1130 if (port_reg_off_is_invalid(port_id, reg_off))
1132 port_id_pci_reg_write(port_id, reg_off, reg_v);
1133 display_port_reg_value(port_id, reg_off, reg_v);
1137 port_mtu_set(portid_t port_id, uint16_t mtu)
1140 struct rte_port *rte_port = &ports[port_id];
1141 struct rte_eth_dev_info dev_info;
1142 uint16_t eth_overhead;
1145 if (port_id_is_invalid(port_id, ENABLED_WARN))
1148 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1152 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1153 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1154 mtu, dev_info.min_mtu, dev_info.max_mtu);
1157 diag = rte_eth_dev_set_mtu(port_id, mtu);
1159 printf("Set MTU failed. diag=%d\n", diag);
1160 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1162 * Ether overhead in driver is equal to the difference of
1163 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
1164 * device supports jumbo frame.
1166 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
1167 if (mtu > RTE_ETHER_MTU) {
1168 rte_port->dev_conf.rxmode.offloads |=
1169 DEV_RX_OFFLOAD_JUMBO_FRAME;
1170 rte_port->dev_conf.rxmode.max_rx_pkt_len =
1173 rte_port->dev_conf.rxmode.offloads &=
1174 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1178 /* Generic flow management functions. */
1180 static struct port_flow_tunnel *
1181 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id)
1183 struct port_flow_tunnel *flow_tunnel;
1185 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1186 if (flow_tunnel->id == port_tunnel_id)
1196 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel)
1199 switch (tunnel->type) {
1203 case RTE_FLOW_ITEM_TYPE_VXLAN:
1211 struct port_flow_tunnel *
1212 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun)
1214 struct rte_port *port = &ports[port_id];
1215 struct port_flow_tunnel *flow_tunnel;
1217 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1218 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun)))
1227 void port_flow_tunnel_list(portid_t port_id)
1229 struct rte_port *port = &ports[port_id];
1230 struct port_flow_tunnel *flt;
1232 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1233 printf("port %u tunnel #%u type=%s",
1234 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel));
1235 if (flt->tunnel.tun_id)
1236 printf(" id=%" PRIu64, flt->tunnel.tun_id);
1241 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id)
1243 struct rte_port *port = &ports[port_id];
1244 struct port_flow_tunnel *flt;
1246 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1247 if (flt->id == tunnel_id)
1251 LIST_REMOVE(flt, chain);
1253 printf("port %u: flow tunnel #%u destroyed\n",
1254 port_id, tunnel_id);
1258 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops)
1260 struct rte_port *port = &ports[port_id];
1261 enum rte_flow_item_type type;
1262 struct port_flow_tunnel *flt;
1264 if (!strcmp(ops->type, "vxlan"))
1265 type = RTE_FLOW_ITEM_TYPE_VXLAN;
1267 printf("cannot offload \"%s\" tunnel type\n", ops->type);
1270 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1271 if (flt->tunnel.type == type)
1275 flt = calloc(1, sizeof(*flt));
1277 printf("failed to allocate port flt object\n");
1280 flt->tunnel.type = type;
1281 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 :
1282 LIST_FIRST(&port->flow_tunnel_list)->id + 1;
1283 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain);
1285 printf("port %d: flow tunnel #%u type %s\n",
1286 port_id, flt->id, ops->type);
1289 /** Generate a port_flow entry from attributes/pattern/actions. */
1290 static struct port_flow *
1291 port_flow_new(const struct rte_flow_attr *attr,
1292 const struct rte_flow_item *pattern,
1293 const struct rte_flow_action *actions,
1294 struct rte_flow_error *error)
1296 const struct rte_flow_conv_rule rule = {
1298 .pattern_ro = pattern,
1299 .actions_ro = actions,
1301 struct port_flow *pf;
1304 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1307 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1310 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1314 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1321 /** Print a message out of a flow error. */
1323 port_flow_complain(struct rte_flow_error *error)
1325 static const char *const errstrlist[] = {
1326 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1327 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1328 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1329 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1330 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1331 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1332 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1333 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1334 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1335 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1336 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1337 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1338 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1339 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1340 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1341 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1342 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1346 int err = rte_errno;
1348 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1349 !errstrlist[error->type])
1350 errstr = "unknown type";
1352 errstr = errstrlist[error->type];
1353 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__,
1354 error->type, errstr,
1355 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1356 error->cause), buf) : "",
1357 error->message ? error->message : "(no stated reason)",
1363 rss_config_display(struct rte_flow_action_rss *rss_conf)
1367 if (rss_conf == NULL) {
1368 printf("Invalid rule\n");
1374 if (rss_conf->queue_num == 0)
1376 for (i = 0; i < rss_conf->queue_num; i++)
1377 printf(" %d", rss_conf->queue[i]);
1380 printf(" function: ");
1381 switch (rss_conf->func) {
1382 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1383 printf("default\n");
1385 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1386 printf("toeplitz\n");
1388 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1389 printf("simple_xor\n");
1391 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1392 printf("symmetric_toeplitz\n");
1395 printf("Unknown function\n");
1399 printf(" types:\n");
1400 if (rss_conf->types == 0) {
1404 for (i = 0; rss_type_table[i].str; i++) {
1405 if ((rss_conf->types &
1406 rss_type_table[i].rss_type) ==
1407 rss_type_table[i].rss_type &&
1408 rss_type_table[i].rss_type != 0)
1409 printf(" %s\n", rss_type_table[i].str);
1413 static struct port_indirect_action *
1414 action_get_by_id(portid_t port_id, uint32_t id)
1416 struct rte_port *port;
1417 struct port_indirect_action **ppia;
1418 struct port_indirect_action *pia = NULL;
1420 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1421 port_id == (portid_t)RTE_PORT_ALL)
1423 port = &ports[port_id];
1424 ppia = &port->actions_list;
1426 if ((*ppia)->id == id) {
1430 ppia = &(*ppia)->next;
1433 printf("Failed to find indirect action #%u on port %u\n",
1439 action_alloc(portid_t port_id, uint32_t id,
1440 struct port_indirect_action **action)
1442 struct rte_port *port;
1443 struct port_indirect_action **ppia;
1444 struct port_indirect_action *pia = NULL;
1447 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1448 port_id == (portid_t)RTE_PORT_ALL)
1450 port = &ports[port_id];
1451 if (id == UINT32_MAX) {
1452 /* taking first available ID */
1453 if (port->actions_list) {
1454 if (port->actions_list->id == UINT32_MAX - 1) {
1455 printf("Highest indirect action ID is already"
1456 " assigned, delete it first\n");
1459 id = port->actions_list->id + 1;
1464 pia = calloc(1, sizeof(*pia));
1466 printf("Allocation of port %u indirect action failed\n",
1470 ppia = &port->actions_list;
1471 while (*ppia && (*ppia)->id > id)
1472 ppia = &(*ppia)->next;
1473 if (*ppia && (*ppia)->id == id) {
1474 printf("Indirect action #%u is already assigned,"
1475 " delete it first\n", id);
1486 /** Create indirect action */
1488 port_action_handle_create(portid_t port_id, uint32_t id,
1489 const struct rte_flow_indir_action_conf *conf,
1490 const struct rte_flow_action *action)
1492 struct port_indirect_action *pia;
1494 struct rte_flow_error error;
1496 ret = action_alloc(port_id, id, &pia);
1499 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
1500 struct rte_flow_action_age *age =
1501 (struct rte_flow_action_age *)(uintptr_t)(action->conf);
1503 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
1504 age->context = &pia->age_type;
1505 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) {
1506 struct rte_flow_action_conntrack *ct =
1507 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf);
1509 memcpy(ct, &conntrack_context, sizeof(*ct));
1511 /* Poisoning to make sure PMDs update it in case of error. */
1512 memset(&error, 0x22, sizeof(error));
1513 pia->handle = rte_flow_action_handle_create(port_id, conf, action,
1516 uint32_t destroy_id = pia->id;
1517 port_action_handle_destroy(port_id, 1, &destroy_id);
1518 return port_flow_complain(&error);
1520 pia->type = action->type;
1521 printf("Indirect action #%u created\n", pia->id);
1525 /** Destroy indirect action */
1527 port_action_handle_destroy(portid_t port_id,
1529 const uint32_t *actions)
1531 struct rte_port *port;
1532 struct port_indirect_action **tmp;
1536 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1537 port_id == (portid_t)RTE_PORT_ALL)
1539 port = &ports[port_id];
1540 tmp = &port->actions_list;
1544 for (i = 0; i != n; ++i) {
1545 struct rte_flow_error error;
1546 struct port_indirect_action *pia = *tmp;
1548 if (actions[i] != pia->id)
1551 * Poisoning to make sure PMDs update it in case
1554 memset(&error, 0x33, sizeof(error));
1556 if (pia->handle && rte_flow_action_handle_destroy(
1557 port_id, pia->handle, &error)) {
1558 ret = port_flow_complain(&error);
1562 printf("Indirect action #%u destroyed\n", pia->id);
1567 tmp = &(*tmp)->next;
1574 /** Get indirect action by port + id */
1575 struct rte_flow_action_handle *
1576 port_action_handle_get_by_id(portid_t port_id, uint32_t id)
1579 struct port_indirect_action *pia = action_get_by_id(port_id, id);
1581 return (pia) ? pia->handle : NULL;
1584 /** Update indirect action */
1586 port_action_handle_update(portid_t port_id, uint32_t id,
1587 const struct rte_flow_action *action)
1589 struct rte_flow_error error;
1590 struct rte_flow_action_handle *action_handle;
1591 struct port_indirect_action *pia;
1594 action_handle = port_action_handle_get_by_id(port_id, id);
1597 pia = action_get_by_id(port_id, id);
1600 switch (pia->type) {
1601 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1602 update = action->conf;
1608 if (rte_flow_action_handle_update(port_id, action_handle, update,
1610 return port_flow_complain(&error);
1612 printf("Indirect action #%u updated\n", id);
1617 port_action_handle_query(portid_t port_id, uint32_t id)
1619 struct rte_flow_error error;
1620 struct port_indirect_action *pia;
1622 struct rte_flow_query_count count;
1623 struct rte_flow_query_age age;
1624 struct rte_flow_action_conntrack ct;
1627 pia = action_get_by_id(port_id, id);
1630 switch (pia->type) {
1631 case RTE_FLOW_ACTION_TYPE_AGE:
1632 case RTE_FLOW_ACTION_TYPE_COUNT:
1635 printf("Indirect action %u (type: %d) on port %u doesn't support query\n",
1636 id, pia->type, port_id);
1639 /* Poisoning to make sure PMDs update it in case of error. */
1640 memset(&error, 0x55, sizeof(error));
1641 memset(&query, 0, sizeof(query));
1642 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error))
1643 return port_flow_complain(&error);
1644 switch (pia->type) {
1645 case RTE_FLOW_ACTION_TYPE_AGE:
1646 printf("Indirect AGE action:\n"
1648 " sec_since_last_hit_valid: %u\n"
1649 " sec_since_last_hit: %" PRIu32 "\n",
1651 query.age.sec_since_last_hit_valid,
1652 query.age.sec_since_last_hit);
1654 case RTE_FLOW_ACTION_TYPE_COUNT:
1655 printf("Indirect COUNT action:\n"
1658 " hits: %" PRIu64 "\n"
1659 " bytes: %" PRIu64 "\n",
1660 query.count.hits_set,
1661 query.count.bytes_set,
1665 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1666 printf("Conntrack Context:\n"
1667 " Peer: %u, Flow dir: %s, Enable: %u\n"
1668 " Live: %u, SACK: %u, CACK: %u\n"
1669 " Packet dir: %s, Liberal: %u, State: %u\n"
1670 " Factor: %u, Retrans: %u, TCP flags: %u\n"
1671 " Last Seq: %u, Last ACK: %u\n"
1672 " Last Win: %u, Last End: %u\n",
1674 query.ct.is_original_dir ? "Original" : "Reply",
1675 query.ct.enable, query.ct.live_connection,
1676 query.ct.selective_ack, query.ct.challenge_ack_passed,
1677 query.ct.last_direction ? "Original" : "Reply",
1678 query.ct.liberal_mode, query.ct.state,
1679 query.ct.max_ack_window, query.ct.retransmission_limit,
1680 query.ct.last_index, query.ct.last_seq,
1681 query.ct.last_ack, query.ct.last_window,
1683 printf(" Original Dir:\n"
1684 " scale: %u, fin: %u, ack seen: %u\n"
1685 " unacked data: %u\n Sent end: %u,"
1686 " Reply end: %u, Max win: %u, Max ACK: %u\n",
1687 query.ct.original_dir.scale,
1688 query.ct.original_dir.close_initiated,
1689 query.ct.original_dir.last_ack_seen,
1690 query.ct.original_dir.data_unacked,
1691 query.ct.original_dir.sent_end,
1692 query.ct.original_dir.reply_end,
1693 query.ct.original_dir.max_win,
1694 query.ct.original_dir.max_ack);
1695 printf(" Reply Dir:\n"
1696 " scale: %u, fin: %u, ack seen: %u\n"
1697 " unacked data: %u\n Sent end: %u,"
1698 " Reply end: %u, Max win: %u, Max ACK: %u\n",
1699 query.ct.reply_dir.scale,
1700 query.ct.reply_dir.close_initiated,
1701 query.ct.reply_dir.last_ack_seen,
1702 query.ct.reply_dir.data_unacked,
1703 query.ct.reply_dir.sent_end,
1704 query.ct.reply_dir.reply_end,
1705 query.ct.reply_dir.max_win,
1706 query.ct.reply_dir.max_ack);
1709 printf("Indirect action %u (type: %d) on port %u doesn't support query\n",
1710 id, pia->type, port_id);
1716 static struct port_flow_tunnel *
1717 port_flow_tunnel_offload_cmd_prep(portid_t port_id,
1718 const struct rte_flow_item *pattern,
1719 const struct rte_flow_action *actions,
1720 const struct tunnel_ops *tunnel_ops)
1723 struct rte_port *port;
1724 struct port_flow_tunnel *pft;
1725 struct rte_flow_error error;
1727 port = &ports[port_id];
1728 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id);
1730 printf("failed to locate port flow tunnel #%u\n",
1734 if (tunnel_ops->actions) {
1735 uint32_t num_actions;
1736 const struct rte_flow_action *aptr;
1738 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel,
1740 &pft->num_pmd_actions,
1743 port_flow_complain(&error);
1746 for (aptr = actions, num_actions = 1;
1747 aptr->type != RTE_FLOW_ACTION_TYPE_END;
1748 aptr++, num_actions++);
1749 pft->actions = malloc(
1750 (num_actions + pft->num_pmd_actions) *
1751 sizeof(actions[0]));
1752 if (!pft->actions) {
1753 rte_flow_tunnel_action_decap_release(
1754 port_id, pft->actions,
1755 pft->num_pmd_actions, &error);
1758 rte_memcpy(pft->actions, pft->pmd_actions,
1759 pft->num_pmd_actions * sizeof(actions[0]));
1760 rte_memcpy(pft->actions + pft->num_pmd_actions, actions,
1761 num_actions * sizeof(actions[0]));
1763 if (tunnel_ops->items) {
1765 const struct rte_flow_item *iptr;
1767 ret = rte_flow_tunnel_match(port_id, &pft->tunnel,
1769 &pft->num_pmd_items,
1772 port_flow_complain(&error);
1775 for (iptr = pattern, num_items = 1;
1776 iptr->type != RTE_FLOW_ITEM_TYPE_END;
1777 iptr++, num_items++);
1778 pft->items = malloc((num_items + pft->num_pmd_items) *
1779 sizeof(pattern[0]));
1781 rte_flow_tunnel_item_release(
1782 port_id, pft->pmd_items,
1783 pft->num_pmd_items, &error);
1786 rte_memcpy(pft->items, pft->pmd_items,
1787 pft->num_pmd_items * sizeof(pattern[0]));
1788 rte_memcpy(pft->items + pft->num_pmd_items, pattern,
1789 num_items * sizeof(pattern[0]));
1796 port_flow_tunnel_offload_cmd_release(portid_t port_id,
1797 const struct tunnel_ops *tunnel_ops,
1798 struct port_flow_tunnel *pft)
1800 struct rte_flow_error error;
1802 if (tunnel_ops->actions) {
1804 rte_flow_tunnel_action_decap_release(
1805 port_id, pft->pmd_actions,
1806 pft->num_pmd_actions, &error);
1807 pft->actions = NULL;
1808 pft->pmd_actions = NULL;
1810 if (tunnel_ops->items) {
1812 rte_flow_tunnel_item_release(port_id, pft->pmd_items,
1816 pft->pmd_items = NULL;
1820 /** Add port meter policy */
1822 port_meter_policy_add(portid_t port_id, uint32_t policy_id,
1823 const struct rte_flow_action *actions)
1825 struct rte_mtr_error error;
1826 const struct rte_flow_action *act = actions;
1827 const struct rte_flow_action *start;
1828 struct rte_mtr_meter_policy_params policy;
1829 uint32_t i = 0, act_n;
1832 for (i = 0; i < RTE_COLORS; i++) {
1833 for (act_n = 0, start = act;
1834 act->type != RTE_FLOW_ACTION_TYPE_END; act++)
1836 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END)
1837 policy.actions[i] = start;
1839 policy.actions[i] = NULL;
1842 ret = rte_mtr_meter_policy_add(port_id,
1846 print_mtr_err_msg(&error);
1850 /** Validate flow rule. */
1852 port_flow_validate(portid_t port_id,
1853 const struct rte_flow_attr *attr,
1854 const struct rte_flow_item *pattern,
1855 const struct rte_flow_action *actions,
1856 const struct tunnel_ops *tunnel_ops)
1858 struct rte_flow_error error;
1859 struct port_flow_tunnel *pft = NULL;
1861 /* Poisoning to make sure PMDs update it in case of error. */
1862 memset(&error, 0x11, sizeof(error));
1863 if (tunnel_ops->enabled) {
1864 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
1865 actions, tunnel_ops);
1869 pattern = pft->items;
1871 actions = pft->actions;
1873 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1874 return port_flow_complain(&error);
1875 if (tunnel_ops->enabled)
1876 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
1877 printf("Flow rule validated\n");
1881 /** Return age action structure if exists, otherwise NULL. */
1882 static struct rte_flow_action_age *
1883 age_action_get(const struct rte_flow_action *actions)
1885 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1886 switch (actions->type) {
1887 case RTE_FLOW_ACTION_TYPE_AGE:
1888 return (struct rte_flow_action_age *)
1889 (uintptr_t)actions->conf;
1897 /** Create flow rule. */
1899 port_flow_create(portid_t port_id,
1900 const struct rte_flow_attr *attr,
1901 const struct rte_flow_item *pattern,
1902 const struct rte_flow_action *actions,
1903 const struct tunnel_ops *tunnel_ops)
1905 struct rte_flow *flow;
1906 struct rte_port *port;
1907 struct port_flow *pf;
1909 struct rte_flow_error error;
1910 struct port_flow_tunnel *pft = NULL;
1911 struct rte_flow_action_age *age = age_action_get(actions);
1913 port = &ports[port_id];
1914 if (port->flow_list) {
1915 if (port->flow_list->id == UINT32_MAX) {
1916 printf("Highest rule ID is already assigned, delete"
1920 id = port->flow_list->id + 1;
1922 if (tunnel_ops->enabled) {
1923 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
1924 actions, tunnel_ops);
1928 pattern = pft->items;
1930 actions = pft->actions;
1932 pf = port_flow_new(attr, pattern, actions, &error);
1934 return port_flow_complain(&error);
1936 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
1937 age->context = &pf->age_type;
1939 /* Poisoning to make sure PMDs update it in case of error. */
1940 memset(&error, 0x22, sizeof(error));
1941 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1943 if (tunnel_ops->enabled)
1944 port_flow_tunnel_offload_cmd_release(port_id,
1947 return port_flow_complain(&error);
1949 pf->next = port->flow_list;
1952 port->flow_list = pf;
1953 if (tunnel_ops->enabled)
1954 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
1955 printf("Flow rule #%u created\n", pf->id);
1959 /** Destroy a number of flow rules. */
1961 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1963 struct rte_port *port;
1964 struct port_flow **tmp;
1968 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1969 port_id == (portid_t)RTE_PORT_ALL)
1971 port = &ports[port_id];
1972 tmp = &port->flow_list;
1976 for (i = 0; i != n; ++i) {
1977 struct rte_flow_error error;
1978 struct port_flow *pf = *tmp;
1980 if (rule[i] != pf->id)
1983 * Poisoning to make sure PMDs update it in case
1986 memset(&error, 0x33, sizeof(error));
1987 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1988 ret = port_flow_complain(&error);
1991 printf("Flow rule #%u destroyed\n", pf->id);
1997 tmp = &(*tmp)->next;
2003 /** Remove all flow rules. */
2005 port_flow_flush(portid_t port_id)
2007 struct rte_flow_error error;
2008 struct rte_port *port;
2011 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2012 port_id == (portid_t)RTE_PORT_ALL)
2015 port = &ports[port_id];
2017 if (port->flow_list == NULL)
2020 /* Poisoning to make sure PMDs update it in case of error. */
2021 memset(&error, 0x44, sizeof(error));
2022 if (rte_flow_flush(port_id, &error)) {
2023 port_flow_complain(&error);
2026 while (port->flow_list) {
2027 struct port_flow *pf = port->flow_list->next;
2029 free(port->flow_list);
2030 port->flow_list = pf;
2035 /** Dump flow rules. */
2037 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id,
2038 const char *file_name)
2041 FILE *file = stdout;
2042 struct rte_flow_error error;
2043 struct rte_port *port;
2044 struct port_flow *pflow;
2045 struct rte_flow *tmpFlow = NULL;
2048 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2049 port_id == (portid_t)RTE_PORT_ALL)
2053 port = &ports[port_id];
2054 pflow = port->flow_list;
2056 if (rule_id != pflow->id) {
2057 pflow = pflow->next;
2059 tmpFlow = pflow->flow;
2065 if (found == false) {
2066 printf("Failed to dump to flow %d\n", rule_id);
2071 if (file_name && strlen(file_name)) {
2072 file = fopen(file_name, "w");
2074 printf("Failed to create file %s: %s\n", file_name,
2081 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error);
2083 ret = rte_flow_dev_dump(port_id, NULL, file, &error);
2085 port_flow_complain(&error);
2086 printf("Failed to dump flow: %s\n", strerror(-ret));
2088 printf("Flow dump finished\n");
2089 if (file_name && strlen(file_name))
2094 /** Query a flow rule. */
2096 port_flow_query(portid_t port_id, uint32_t rule,
2097 const struct rte_flow_action *action)
2099 struct rte_flow_error error;
2100 struct rte_port *port;
2101 struct port_flow *pf;
2104 struct rte_flow_query_count count;
2105 struct rte_flow_action_rss rss_conf;
2106 struct rte_flow_query_age age;
2110 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2111 port_id == (portid_t)RTE_PORT_ALL)
2113 port = &ports[port_id];
2114 for (pf = port->flow_list; pf; pf = pf->next)
2118 printf("Flow rule #%u not found\n", rule);
2121 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2122 &name, sizeof(name),
2123 (void *)(uintptr_t)action->type, &error);
2125 return port_flow_complain(&error);
2126 switch (action->type) {
2127 case RTE_FLOW_ACTION_TYPE_COUNT:
2128 case RTE_FLOW_ACTION_TYPE_RSS:
2129 case RTE_FLOW_ACTION_TYPE_AGE:
2132 printf("Cannot query action type %d (%s)\n",
2133 action->type, name);
2136 /* Poisoning to make sure PMDs update it in case of error. */
2137 memset(&error, 0x55, sizeof(error));
2138 memset(&query, 0, sizeof(query));
2139 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
2140 return port_flow_complain(&error);
2141 switch (action->type) {
2142 case RTE_FLOW_ACTION_TYPE_COUNT:
2146 " hits: %" PRIu64 "\n"
2147 " bytes: %" PRIu64 "\n",
2149 query.count.hits_set,
2150 query.count.bytes_set,
2154 case RTE_FLOW_ACTION_TYPE_RSS:
2155 rss_config_display(&query.rss_conf);
2157 case RTE_FLOW_ACTION_TYPE_AGE:
2160 " sec_since_last_hit_valid: %u\n"
2161 " sec_since_last_hit: %" PRIu32 "\n",
2164 query.age.sec_since_last_hit_valid,
2165 query.age.sec_since_last_hit);
2168 printf("Cannot display result for action type %d (%s)\n",
2169 action->type, name);
2175 /** List simply and destroy all aged flows. */
2177 port_flow_aged(portid_t port_id, uint8_t destroy)
2180 int nb_context, total = 0, idx;
2181 struct rte_flow_error error;
2182 enum age_action_context_type *type;
2184 struct port_flow *pf;
2185 struct port_indirect_action *pia;
2188 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2189 port_id == (portid_t)RTE_PORT_ALL)
2191 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
2192 printf("Port %u total aged flows: %d\n", port_id, total);
2194 port_flow_complain(&error);
2199 contexts = malloc(sizeof(void *) * total);
2200 if (contexts == NULL) {
2201 printf("Cannot allocate contexts for aged flow\n");
2204 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
2205 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
2206 if (nb_context != total) {
2207 printf("Port:%d get aged flows count(%d) != total(%d)\n",
2208 port_id, nb_context, total);
2213 for (idx = 0; idx < nb_context; idx++) {
2214 if (!contexts[idx]) {
2215 printf("Error: get Null context in port %u\n", port_id);
2218 type = (enum age_action_context_type *)contexts[idx];
2220 case ACTION_AGE_CONTEXT_TYPE_FLOW:
2221 ctx.pf = container_of(type, struct port_flow, age_type);
2222 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32
2226 ctx.pf->rule.attr->group,
2227 ctx.pf->rule.attr->priority,
2228 ctx.pf->rule.attr->ingress ? 'i' : '-',
2229 ctx.pf->rule.attr->egress ? 'e' : '-',
2230 ctx.pf->rule.attr->transfer ? 't' : '-');
2231 if (destroy && !port_flow_destroy(port_id, 1,
2235 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION:
2236 ctx.pia = container_of(type,
2237 struct port_indirect_action, age_type);
2238 printf("%-20s\t%" PRIu32 "\n", "Indirect action",
2242 printf("Error: invalid context type %u\n", port_id);
2246 printf("\n%d flows destroyed\n", total);
2250 /** List flow rules. */
2252 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
2254 struct rte_port *port;
2255 struct port_flow *pf;
2256 struct port_flow *list = NULL;
2259 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2260 port_id == (portid_t)RTE_PORT_ALL)
2262 port = &ports[port_id];
2263 if (!port->flow_list)
2265 /* Sort flows by group, priority and ID. */
2266 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2267 struct port_flow **tmp;
2268 const struct rte_flow_attr *curr = pf->rule.attr;
2271 /* Filter out unwanted groups. */
2272 for (i = 0; i != n; ++i)
2273 if (curr->group == group[i])
2278 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
2279 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
2281 if (curr->group > comp->group ||
2282 (curr->group == comp->group &&
2283 curr->priority > comp->priority) ||
2284 (curr->group == comp->group &&
2285 curr->priority == comp->priority &&
2286 pf->id > (*tmp)->id))
2293 printf("ID\tGroup\tPrio\tAttr\tRule\n");
2294 for (pf = list; pf != NULL; pf = pf->tmp) {
2295 const struct rte_flow_item *item = pf->rule.pattern;
2296 const struct rte_flow_action *action = pf->rule.actions;
2299 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
2301 pf->rule.attr->group,
2302 pf->rule.attr->priority,
2303 pf->rule.attr->ingress ? 'i' : '-',
2304 pf->rule.attr->egress ? 'e' : '-',
2305 pf->rule.attr->transfer ? 't' : '-');
2306 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
2307 if ((uint32_t)item->type > INT_MAX)
2308 name = "PMD_INTERNAL";
2309 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
2310 &name, sizeof(name),
2311 (void *)(uintptr_t)item->type,
2314 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
2315 printf("%s ", name);
2319 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
2320 if ((uint32_t)action->type > INT_MAX)
2321 name = "PMD_INTERNAL";
2322 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2323 &name, sizeof(name),
2324 (void *)(uintptr_t)action->type,
2327 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
2328 printf(" %s", name);
2335 /** Restrict ingress traffic to the defined flow rules. */
2337 port_flow_isolate(portid_t port_id, int set)
2339 struct rte_flow_error error;
2341 /* Poisoning to make sure PMDs update it in case of error. */
2342 memset(&error, 0x66, sizeof(error));
2343 if (rte_flow_isolate(port_id, set, &error))
2344 return port_flow_complain(&error);
2345 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
2347 set ? "now restricted" : "not restricted anymore");
2352 * RX/TX ring descriptors display functions.
2355 rx_queue_id_is_invalid(queueid_t rxq_id)
2357 if (rxq_id < nb_rxq)
2359 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
2364 tx_queue_id_is_invalid(queueid_t txq_id)
2366 if (txq_id < nb_txq)
2368 printf("Invalid TX queue %d (must be < nb_txq=%d)\n", txq_id, nb_txq);
2373 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
2375 struct rte_port *port = &ports[port_id];
2376 struct rte_eth_rxq_info rx_qinfo;
2379 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
2381 *ring_size = rx_qinfo.nb_desc;
2385 if (ret != -ENOTSUP)
2388 * If the rte_eth_rx_queue_info_get is not support for this PMD,
2389 * ring_size stored in testpmd will be used for validity verification.
2390 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
2391 * being 0, it will use a default value provided by PMDs to setup this
2392 * rxq. If the default value is 0, it will use the
2393 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
2395 if (port->nb_rx_desc[rxq_id])
2396 *ring_size = port->nb_rx_desc[rxq_id];
2397 else if (port->dev_info.default_rxportconf.ring_size)
2398 *ring_size = port->dev_info.default_rxportconf.ring_size;
2400 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2405 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
2407 struct rte_port *port = &ports[port_id];
2408 struct rte_eth_txq_info tx_qinfo;
2411 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
2413 *ring_size = tx_qinfo.nb_desc;
2417 if (ret != -ENOTSUP)
2420 * If the rte_eth_tx_queue_info_get is not support for this PMD,
2421 * ring_size stored in testpmd will be used for validity verification.
2422 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
2423 * being 0, it will use a default value provided by PMDs to setup this
2424 * txq. If the default value is 0, it will use the
2425 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
2427 if (port->nb_tx_desc[txq_id])
2428 *ring_size = port->nb_tx_desc[txq_id];
2429 else if (port->dev_info.default_txportconf.ring_size)
2430 *ring_size = port->dev_info.default_txportconf.ring_size;
2432 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2437 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
2442 ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
2446 if (rxdesc_id < ring_size)
2449 printf("Invalid RX descriptor %u (must be < ring_size=%u)\n",
2450 rxdesc_id, ring_size);
2455 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
2460 ret = get_tx_ring_size(port_id, txq_id, &ring_size);
2464 if (txdesc_id < ring_size)
2467 printf("Invalid TX descriptor %u (must be < ring_size=%u)\n",
2468 txdesc_id, ring_size);
2472 static const struct rte_memzone *
2473 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
2475 char mz_name[RTE_MEMZONE_NAMESIZE];
2476 const struct rte_memzone *mz;
2478 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
2479 port_id, q_id, ring_name);
2480 mz = rte_memzone_lookup(mz_name);
2482 printf("%s ring memory zoneof (port %d, queue %d) not"
2483 "found (zone name = %s\n",
2484 ring_name, port_id, q_id, mz_name);
2488 union igb_ring_dword {
2491 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2501 struct igb_ring_desc_32_bytes {
2502 union igb_ring_dword lo_dword;
2503 union igb_ring_dword hi_dword;
2504 union igb_ring_dword resv1;
2505 union igb_ring_dword resv2;
2508 struct igb_ring_desc_16_bytes {
2509 union igb_ring_dword lo_dword;
2510 union igb_ring_dword hi_dword;
2514 ring_rxd_display_dword(union igb_ring_dword dword)
2516 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
2517 (unsigned)dword.words.hi);
2521 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
2522 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2525 __rte_unused portid_t port_id,
2529 struct igb_ring_desc_16_bytes *ring =
2530 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2531 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2533 struct rte_eth_dev_info dev_info;
2535 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2539 if (strstr(dev_info.driver_name, "i40e") != NULL) {
2540 /* 32 bytes RX descriptor, i40e only */
2541 struct igb_ring_desc_32_bytes *ring =
2542 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
2543 ring[desc_id].lo_dword.dword =
2544 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2545 ring_rxd_display_dword(ring[desc_id].lo_dword);
2546 ring[desc_id].hi_dword.dword =
2547 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2548 ring_rxd_display_dword(ring[desc_id].hi_dword);
2549 ring[desc_id].resv1.dword =
2550 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
2551 ring_rxd_display_dword(ring[desc_id].resv1);
2552 ring[desc_id].resv2.dword =
2553 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
2554 ring_rxd_display_dword(ring[desc_id].resv2);
2559 /* 16 bytes RX descriptor */
2560 ring[desc_id].lo_dword.dword =
2561 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2562 ring_rxd_display_dword(ring[desc_id].lo_dword);
2563 ring[desc_id].hi_dword.dword =
2564 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2565 ring_rxd_display_dword(ring[desc_id].hi_dword);
2569 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
2571 struct igb_ring_desc_16_bytes *ring;
2572 struct igb_ring_desc_16_bytes txd;
2574 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2575 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2576 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2577 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
2578 (unsigned)txd.lo_dword.words.lo,
2579 (unsigned)txd.lo_dword.words.hi,
2580 (unsigned)txd.hi_dword.words.lo,
2581 (unsigned)txd.hi_dword.words.hi);
2585 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
2587 const struct rte_memzone *rx_mz;
2589 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
2591 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
2594 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
2598 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
2600 const struct rte_memzone *tx_mz;
2602 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
2604 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
2607 ring_tx_descriptor_display(tx_mz, txd_id);
2611 fwd_lcores_config_display(void)
2615 printf("List of forwarding lcores:");
2616 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
2617 printf(" %2u", fwd_lcores_cpuids[lc_id]);
2621 rxtx_config_display(void)
2626 printf(" %s packet forwarding%s packets/burst=%d\n",
2627 cur_fwd_eng->fwd_mode_name,
2628 retry_enabled == 0 ? "" : " with retry",
2631 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
2632 printf(" packet len=%u - nb packet segments=%d\n",
2633 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
2635 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
2636 nb_fwd_lcores, nb_fwd_ports);
2638 RTE_ETH_FOREACH_DEV(pid) {
2639 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
2640 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
2641 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
2642 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
2643 struct rte_eth_rxq_info rx_qinfo;
2644 struct rte_eth_txq_info tx_qinfo;
2645 uint16_t rx_free_thresh_tmp;
2646 uint16_t tx_free_thresh_tmp;
2647 uint16_t tx_rs_thresh_tmp;
2648 uint16_t nb_rx_desc_tmp;
2649 uint16_t nb_tx_desc_tmp;
2650 uint64_t offloads_tmp;
2651 uint8_t pthresh_tmp;
2652 uint8_t hthresh_tmp;
2653 uint8_t wthresh_tmp;
2656 /* per port config */
2657 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
2658 (unsigned int)pid, nb_rxq, nb_txq);
2660 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
2661 ports[pid].dev_conf.rxmode.offloads,
2662 ports[pid].dev_conf.txmode.offloads);
2664 /* per rx queue config only for first queue to be less verbose */
2665 for (qid = 0; qid < 1; qid++) {
2666 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
2668 nb_rx_desc_tmp = nb_rx_desc[qid];
2669 rx_free_thresh_tmp =
2670 rx_conf[qid].rx_free_thresh;
2671 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
2672 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
2673 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
2674 offloads_tmp = rx_conf[qid].offloads;
2676 nb_rx_desc_tmp = rx_qinfo.nb_desc;
2677 rx_free_thresh_tmp =
2678 rx_qinfo.conf.rx_free_thresh;
2679 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
2680 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
2681 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
2682 offloads_tmp = rx_qinfo.conf.offloads;
2685 printf(" RX queue: %d\n", qid);
2686 printf(" RX desc=%d - RX free threshold=%d\n",
2687 nb_rx_desc_tmp, rx_free_thresh_tmp);
2688 printf(" RX threshold registers: pthresh=%d hthresh=%d "
2690 pthresh_tmp, hthresh_tmp, wthresh_tmp);
2691 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp);
2694 /* per tx queue config only for first queue to be less verbose */
2695 for (qid = 0; qid < 1; qid++) {
2696 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
2698 nb_tx_desc_tmp = nb_tx_desc[qid];
2699 tx_free_thresh_tmp =
2700 tx_conf[qid].tx_free_thresh;
2701 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
2702 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
2703 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
2704 offloads_tmp = tx_conf[qid].offloads;
2705 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
2707 nb_tx_desc_tmp = tx_qinfo.nb_desc;
2708 tx_free_thresh_tmp =
2709 tx_qinfo.conf.tx_free_thresh;
2710 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
2711 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
2712 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
2713 offloads_tmp = tx_qinfo.conf.offloads;
2714 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
2717 printf(" TX queue: %d\n", qid);
2718 printf(" TX desc=%d - TX free threshold=%d\n",
2719 nb_tx_desc_tmp, tx_free_thresh_tmp);
2720 printf(" TX threshold registers: pthresh=%d hthresh=%d "
2722 pthresh_tmp, hthresh_tmp, wthresh_tmp);
2723 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
2724 offloads_tmp, tx_rs_thresh_tmp);
2730 port_rss_reta_info(portid_t port_id,
2731 struct rte_eth_rss_reta_entry64 *reta_conf,
2732 uint16_t nb_entries)
2734 uint16_t i, idx, shift;
2737 if (port_id_is_invalid(port_id, ENABLED_WARN))
2740 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
2742 printf("Failed to get RSS RETA info, return code = %d\n", ret);
2746 for (i = 0; i < nb_entries; i++) {
2747 idx = i / RTE_RETA_GROUP_SIZE;
2748 shift = i % RTE_RETA_GROUP_SIZE;
2749 if (!(reta_conf[idx].mask & (1ULL << shift)))
2751 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
2752 i, reta_conf[idx].reta[shift]);
2757 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
2761 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
2763 struct rte_eth_rss_conf rss_conf = {0};
2764 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
2768 struct rte_eth_dev_info dev_info;
2769 uint8_t hash_key_size;
2772 if (port_id_is_invalid(port_id, ENABLED_WARN))
2775 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2779 if (dev_info.hash_key_size > 0 &&
2780 dev_info.hash_key_size <= sizeof(rss_key))
2781 hash_key_size = dev_info.hash_key_size;
2783 printf("dev_info did not provide a valid hash key size\n");
2787 /* Get RSS hash key if asked to display it */
2788 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
2789 rss_conf.rss_key_len = hash_key_size;
2790 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2794 printf("port index %d invalid\n", port_id);
2797 printf("operation not supported by device\n");
2800 printf("operation failed - diag=%d\n", diag);
2805 rss_hf = rss_conf.rss_hf;
2807 printf("RSS disabled\n");
2810 printf("RSS functions:\n ");
2811 for (i = 0; rss_type_table[i].str; i++) {
2812 if (rss_hf & rss_type_table[i].rss_type)
2813 printf("%s ", rss_type_table[i].str);
2818 printf("RSS key:\n");
2819 for (i = 0; i < hash_key_size; i++)
2820 printf("%02X", rss_key[i]);
2825 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
2826 uint8_t hash_key_len)
2828 struct rte_eth_rss_conf rss_conf;
2832 rss_conf.rss_key = NULL;
2833 rss_conf.rss_key_len = hash_key_len;
2834 rss_conf.rss_hf = 0;
2835 for (i = 0; rss_type_table[i].str; i++) {
2836 if (!strcmp(rss_type_table[i].str, rss_type))
2837 rss_conf.rss_hf = rss_type_table[i].rss_type;
2839 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2841 rss_conf.rss_key = hash_key;
2842 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
2849 printf("port index %d invalid\n", port_id);
2852 printf("operation not supported by device\n");
2855 printf("operation failed - diag=%d\n", diag);
2861 * Setup forwarding configuration for each logical core.
2864 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
2866 streamid_t nb_fs_per_lcore;
2874 nb_fs = cfg->nb_fwd_streams;
2875 nb_fc = cfg->nb_fwd_lcores;
2876 if (nb_fs <= nb_fc) {
2877 nb_fs_per_lcore = 1;
2880 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2881 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2884 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2886 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2887 fwd_lcores[lc_id]->stream_idx = sm_id;
2888 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2889 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2893 * Assign extra remaining streams, if any.
2895 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2896 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2897 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2898 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2899 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2904 fwd_topology_tx_port_get(portid_t rxp)
2906 static int warning_once = 1;
2908 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2910 switch (port_topology) {
2912 case PORT_TOPOLOGY_PAIRED:
2913 if ((rxp & 0x1) == 0) {
2914 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2917 printf("\nWarning! port-topology=paired"
2918 " and odd forward ports number,"
2919 " the last port will pair with"
2926 case PORT_TOPOLOGY_CHAINED:
2927 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2928 case PORT_TOPOLOGY_LOOP:
2934 simple_fwd_config_setup(void)
2938 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2939 cur_fwd_config.nb_fwd_streams =
2940 (streamid_t) cur_fwd_config.nb_fwd_ports;
2942 /* reinitialize forwarding streams */
2946 * In the simple forwarding test, the number of forwarding cores
2947 * must be lower or equal to the number of forwarding ports.
2949 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2950 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2951 cur_fwd_config.nb_fwd_lcores =
2952 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2953 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2955 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2956 fwd_streams[i]->rx_port = fwd_ports_ids[i];
2957 fwd_streams[i]->rx_queue = 0;
2958 fwd_streams[i]->tx_port =
2959 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2960 fwd_streams[i]->tx_queue = 0;
2961 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2962 fwd_streams[i]->retry_enabled = retry_enabled;
2967 * For the RSS forwarding test all streams distributed over lcores. Each stream
2968 * being composed of a RX queue to poll on a RX port for input messages,
2969 * associated with a TX queue of a TX port where to send forwarded packets.
2972 rss_fwd_config_setup(void)
2983 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2984 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2985 cur_fwd_config.nb_fwd_streams =
2986 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2988 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2989 cur_fwd_config.nb_fwd_lcores =
2990 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2992 /* reinitialize forwarding streams */
2995 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2997 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2998 struct fwd_stream *fs;
3000 fs = fwd_streams[sm_id];
3001 txp = fwd_topology_tx_port_get(rxp);
3002 fs->rx_port = fwd_ports_ids[rxp];
3004 fs->tx_port = fwd_ports_ids[txp];
3006 fs->peer_addr = fs->tx_port;
3007 fs->retry_enabled = retry_enabled;
3009 if (rxp < nb_fwd_ports)
3017 get_fwd_port_total_tc_num(void)
3019 struct rte_eth_dcb_info dcb_info;
3020 uint16_t total_tc_num = 0;
3023 for (i = 0; i < nb_fwd_ports; i++) {
3024 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info);
3025 total_tc_num += dcb_info.nb_tcs;
3028 return total_tc_num;
3032 * For the DCB forwarding test, each core is assigned on each traffic class.
3034 * Each core is assigned a multi-stream, each stream being composed of
3035 * a RX queue to poll on a RX port for input messages, associated with
3036 * a TX queue of a TX port where to send forwarded packets. All RX and
3037 * TX queues are mapping to the same traffic class.
3038 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
3042 dcb_fwd_config_setup(void)
3044 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
3045 portid_t txp, rxp = 0;
3046 queueid_t txq, rxq = 0;
3048 uint16_t nb_rx_queue, nb_tx_queue;
3049 uint16_t i, j, k, sm_id = 0;
3050 uint16_t total_tc_num;
3051 struct rte_port *port;
3057 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED
3058 * or RTE_PORT_STOPPED.
3060 * Re-configure ports to get updated mapping between tc and queue in
3061 * case the queue number of the port is changed. Skip for started ports
3062 * since modifying queue number and calling dev_configure need to stop
3065 for (pid = 0; pid < nb_fwd_ports; pid++) {
3066 if (port_is_started(pid) == 1)
3070 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq,
3073 printf("Failed to re-configure port %d, ret = %d.\n",
3079 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3080 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3081 cur_fwd_config.nb_fwd_streams =
3082 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
3083 total_tc_num = get_fwd_port_total_tc_num();
3084 if (cur_fwd_config.nb_fwd_lcores > total_tc_num)
3085 cur_fwd_config.nb_fwd_lcores = total_tc_num;
3087 /* reinitialize forwarding streams */
3091 /* get the dcb info on the first RX and TX ports */
3092 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
3093 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
3095 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
3096 fwd_lcores[lc_id]->stream_nb = 0;
3097 fwd_lcores[lc_id]->stream_idx = sm_id;
3098 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
3099 /* if the nb_queue is zero, means this tc is
3100 * not enabled on the POOL
3102 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
3104 k = fwd_lcores[lc_id]->stream_nb +
3105 fwd_lcores[lc_id]->stream_idx;
3106 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
3107 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
3108 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
3109 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
3110 for (j = 0; j < nb_rx_queue; j++) {
3111 struct fwd_stream *fs;
3113 fs = fwd_streams[k + j];
3114 fs->rx_port = fwd_ports_ids[rxp];
3115 fs->rx_queue = rxq + j;
3116 fs->tx_port = fwd_ports_ids[txp];
3117 fs->tx_queue = txq + j % nb_tx_queue;
3118 fs->peer_addr = fs->tx_port;
3119 fs->retry_enabled = retry_enabled;
3121 fwd_lcores[lc_id]->stream_nb +=
3122 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
3124 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
3127 if (tc < rxp_dcb_info.nb_tcs)
3129 /* Restart from TC 0 on next RX port */
3131 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
3133 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
3136 if (rxp >= nb_fwd_ports)
3138 /* get the dcb information on next RX and TX ports */
3139 if ((rxp & 0x1) == 0)
3140 txp = (portid_t) (rxp + 1);
3142 txp = (portid_t) (rxp - 1);
3143 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
3144 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
3149 icmp_echo_config_setup(void)
3156 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
3157 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
3158 (nb_txq * nb_fwd_ports);
3160 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3161 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3162 cur_fwd_config.nb_fwd_streams =
3163 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
3164 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
3165 cur_fwd_config.nb_fwd_lcores =
3166 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
3167 if (verbose_level > 0) {
3168 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
3170 cur_fwd_config.nb_fwd_lcores,
3171 cur_fwd_config.nb_fwd_ports,
3172 cur_fwd_config.nb_fwd_streams);
3175 /* reinitialize forwarding streams */
3177 setup_fwd_config_of_each_lcore(&cur_fwd_config);
3179 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
3180 if (verbose_level > 0)
3181 printf(" core=%d: \n", lc_id);
3182 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3183 struct fwd_stream *fs;
3184 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3185 fs->rx_port = fwd_ports_ids[rxp];
3187 fs->tx_port = fs->rx_port;
3189 fs->peer_addr = fs->tx_port;
3190 fs->retry_enabled = retry_enabled;
3191 if (verbose_level > 0)
3192 printf(" stream=%d port=%d rxq=%d txq=%d\n",
3193 sm_id, fs->rx_port, fs->rx_queue,
3195 rxq = (queueid_t) (rxq + 1);
3196 if (rxq == nb_rxq) {
3198 rxp = (portid_t) (rxp + 1);
3205 fwd_config_setup(void)
3207 struct rte_port *port;
3211 cur_fwd_config.fwd_eng = cur_fwd_eng;
3212 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
3213 icmp_echo_config_setup();
3217 if ((nb_rxq > 1) && (nb_txq > 1)){
3219 for (i = 0; i < nb_fwd_ports; i++) {
3220 pt_id = fwd_ports_ids[i];
3221 port = &ports[pt_id];
3222 if (!port->dcb_flag) {
3223 printf("In DCB mode, all forwarding ports must "
3224 "be configured in this mode.\n");
3228 if (nb_fwd_lcores == 1) {
3229 printf("In DCB mode,the nb forwarding cores "
3230 "should be larger than 1.\n");
3234 dcb_fwd_config_setup();
3236 rss_fwd_config_setup();
3239 simple_fwd_config_setup();
3243 mp_alloc_to_str(uint8_t mode)
3246 case MP_ALLOC_NATIVE:
3252 case MP_ALLOC_XMEM_HUGE:
3262 pkt_fwd_config_display(struct fwd_config *cfg)
3264 struct fwd_stream *fs;
3268 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
3269 "NUMA support %s, MP allocation mode: %s\n",
3270 cfg->fwd_eng->fwd_mode_name,
3271 retry_enabled == 0 ? "" : " with retry",
3272 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
3273 numa_support == 1 ? "enabled" : "disabled",
3274 mp_alloc_to_str(mp_alloc_type));
3277 printf("TX retry num: %u, delay between TX retries: %uus\n",
3278 burst_tx_retry_num, burst_tx_delay_time);
3279 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
3280 printf("Logical Core %u (socket %u) forwards packets on "
3282 fwd_lcores_cpuids[lc_id],
3283 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
3284 fwd_lcores[lc_id]->stream_nb);
3285 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3286 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3287 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
3288 "P=%d/Q=%d (socket %u) ",
3289 fs->rx_port, fs->rx_queue,
3290 ports[fs->rx_port].socket_id,
3291 fs->tx_port, fs->tx_queue,
3292 ports[fs->tx_port].socket_id);
3293 print_ethaddr("peer=",
3294 &peer_eth_addrs[fs->peer_addr]);
3302 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
3304 struct rte_ether_addr new_peer_addr;
3305 if (!rte_eth_dev_is_valid_port(port_id)) {
3306 printf("Error: Invalid port number %i\n", port_id);
3309 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
3310 printf("Error: Invalid ethernet address: %s\n", peer_addr);
3313 peer_eth_addrs[port_id] = new_peer_addr;
3317 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
3320 unsigned int lcore_cpuid;
3325 for (i = 0; i < nb_lc; i++) {
3326 lcore_cpuid = lcorelist[i];
3327 if (! rte_lcore_is_enabled(lcore_cpuid)) {
3328 printf("lcore %u not enabled\n", lcore_cpuid);
3331 if (lcore_cpuid == rte_get_main_lcore()) {
3332 printf("lcore %u cannot be masked on for running "
3333 "packet forwarding, which is the main lcore "
3334 "and reserved for command line parsing only\n",
3339 fwd_lcores_cpuids[i] = lcore_cpuid;
3341 if (record_now == 0) {
3345 nb_cfg_lcores = (lcoreid_t) nb_lc;
3346 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
3347 printf("previous number of forwarding cores %u - changed to "
3348 "number of configured cores %u\n",
3349 (unsigned int) nb_fwd_lcores, nb_lc);
3350 nb_fwd_lcores = (lcoreid_t) nb_lc;
3357 set_fwd_lcores_mask(uint64_t lcoremask)
3359 unsigned int lcorelist[64];
3363 if (lcoremask == 0) {
3364 printf("Invalid NULL mask of cores\n");
3368 for (i = 0; i < 64; i++) {
3369 if (! ((uint64_t)(1ULL << i) & lcoremask))
3371 lcorelist[nb_lc++] = i;
3373 return set_fwd_lcores_list(lcorelist, nb_lc);
3377 set_fwd_lcores_number(uint16_t nb_lc)
3379 if (test_done == 0) {
3380 printf("Please stop forwarding first\n");
3383 if (nb_lc > nb_cfg_lcores) {
3384 printf("nb fwd cores %u > %u (max. number of configured "
3385 "lcores) - ignored\n",
3386 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
3389 nb_fwd_lcores = (lcoreid_t) nb_lc;
3390 printf("Number of forwarding cores set to %u\n",
3391 (unsigned int) nb_fwd_lcores);
3395 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
3403 for (i = 0; i < nb_pt; i++) {
3404 port_id = (portid_t) portlist[i];
3405 if (port_id_is_invalid(port_id, ENABLED_WARN))
3408 fwd_ports_ids[i] = port_id;
3410 if (record_now == 0) {
3414 nb_cfg_ports = (portid_t) nb_pt;
3415 if (nb_fwd_ports != (portid_t) nb_pt) {
3416 printf("previous number of forwarding ports %u - changed to "
3417 "number of configured ports %u\n",
3418 (unsigned int) nb_fwd_ports, nb_pt);
3419 nb_fwd_ports = (portid_t) nb_pt;
3424 * Parse the user input and obtain the list of forwarding ports
3427 * String containing the user input. User can specify
3428 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
3429 * For example, if the user wants to use all the available
3430 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3.
3431 * If the user wants to use only the ports 1,2 then the input
3433 * valid characters are '-' and ','
3434 * @param[out] values
3435 * This array will be filled with a list of port IDs
3436 * based on the user input
3437 * Note that duplicate entries are discarded and only the first
3438 * count entries in this array are port IDs and all the rest
3439 * will contain default values
3440 * @param[in] maxsize
3441 * This parameter denotes 2 things
3442 * 1) Number of elements in the values array
3443 * 2) Maximum value of each element in the values array
3445 * On success, returns total count of parsed port IDs
3446 * On failure, returns 0
3449 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
3451 unsigned int count = 0;
3455 unsigned int marked[maxsize];
3457 if (list == NULL || values == NULL)
3460 for (i = 0; i < (int)maxsize; i++)
3466 /*Remove the blank spaces if any*/
3467 while (isblank(*list))
3472 value = strtol(list, &end, 10);
3473 if (errno || end == NULL)
3475 if (value < 0 || value >= (int)maxsize)
3477 while (isblank(*end))
3479 if (*end == '-' && min == INT_MAX) {
3481 } else if ((*end == ',') || (*end == '\0')) {
3485 for (i = min; i <= max; i++) {
3486 if (count < maxsize) {
3498 } while (*end != '\0');
3504 parse_fwd_portlist(const char *portlist)
3506 unsigned int portcount;
3507 unsigned int portindex[RTE_MAX_ETHPORTS];
3508 unsigned int i, valid_port_count = 0;
3510 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
3512 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
3515 * Here we verify the validity of the ports
3516 * and thereby calculate the total number of
3519 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
3520 if (rte_eth_dev_is_valid_port(portindex[i])) {
3521 portindex[valid_port_count] = portindex[i];
3526 set_fwd_ports_list(portindex, valid_port_count);
3530 set_fwd_ports_mask(uint64_t portmask)
3532 unsigned int portlist[64];
3536 if (portmask == 0) {
3537 printf("Invalid NULL mask of ports\n");
3541 RTE_ETH_FOREACH_DEV(i) {
3542 if (! ((uint64_t)(1ULL << i) & portmask))
3544 portlist[nb_pt++] = i;
3546 set_fwd_ports_list(portlist, nb_pt);
3550 set_fwd_ports_number(uint16_t nb_pt)
3552 if (nb_pt > nb_cfg_ports) {
3553 printf("nb fwd ports %u > %u (number of configured "
3554 "ports) - ignored\n",
3555 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
3558 nb_fwd_ports = (portid_t) nb_pt;
3559 printf("Number of forwarding ports set to %u\n",
3560 (unsigned int) nb_fwd_ports);
3564 port_is_forwarding(portid_t port_id)
3568 if (port_id_is_invalid(port_id, ENABLED_WARN))
3571 for (i = 0; i < nb_fwd_ports; i++) {
3572 if (fwd_ports_ids[i] == port_id)
3580 set_nb_pkt_per_burst(uint16_t nb)
3582 if (nb > MAX_PKT_BURST) {
3583 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
3585 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
3588 nb_pkt_per_burst = nb;
3589 printf("Number of packets per burst set to %u\n",
3590 (unsigned int) nb_pkt_per_burst);
3594 tx_split_get_name(enum tx_pkt_split split)
3598 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3599 if (tx_split_name[i].split == split)
3600 return tx_split_name[i].name;
3606 set_tx_pkt_split(const char *name)
3610 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3611 if (strcmp(tx_split_name[i].name, name) == 0) {
3612 tx_pkt_split = tx_split_name[i].split;
3616 printf("unknown value: \"%s\"\n", name);
3620 parse_fec_mode(const char *name, uint32_t *mode)
3624 for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
3625 if (strcmp(fec_mode_name[i].name, name) == 0) {
3626 *mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
3634 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa)
3638 printf("FEC capabilities:\n");
3640 for (i = 0; i < num; i++) {
3642 rte_eth_link_speed_to_str(speed_fec_capa[i].speed));
3644 for (j = 0; j < RTE_DIM(fec_mode_name); j++) {
3645 if (RTE_ETH_FEC_MODE_TO_CAPA(j) &
3646 speed_fec_capa[i].capa)
3647 printf("%s ", fec_mode_name[j].name);
3654 show_rx_pkt_offsets(void)
3659 printf("Number of offsets: %u\n", n);
3661 printf("Segment offsets: ");
3662 for (i = 0; i != n - 1; i++)
3663 printf("%hu,", rx_pkt_seg_offsets[i]);
3664 printf("%hu\n", rx_pkt_seg_lengths[i]);
3669 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs)
3673 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) {
3674 printf("nb segments per RX packets=%u >= "
3675 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs);
3680 * No extra check here, the segment length will be checked by PMD
3681 * in the extended queue setup.
3683 for (i = 0; i < nb_offs; i++) {
3684 if (seg_offsets[i] >= UINT16_MAX) {
3685 printf("offset[%u]=%u > UINT16_MAX - give up\n",
3691 for (i = 0; i < nb_offs; i++)
3692 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i];
3694 rx_pkt_nb_offs = (uint8_t) nb_offs;
3698 show_rx_pkt_segments(void)
3703 printf("Number of segments: %u\n", n);
3705 printf("Segment sizes: ");
3706 for (i = 0; i != n - 1; i++)
3707 printf("%hu,", rx_pkt_seg_lengths[i]);
3708 printf("%hu\n", rx_pkt_seg_lengths[i]);
3713 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
3717 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) {
3718 printf("nb segments per RX packets=%u >= "
3719 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs);
3724 * No extra check here, the segment length will be checked by PMD
3725 * in the extended queue setup.
3727 for (i = 0; i < nb_segs; i++) {
3728 if (seg_lengths[i] >= UINT16_MAX) {
3729 printf("length[%u]=%u > UINT16_MAX - give up\n",
3735 for (i = 0; i < nb_segs; i++)
3736 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
3738 rx_pkt_nb_segs = (uint8_t) nb_segs;
3742 show_tx_pkt_segments(void)
3748 split = tx_split_get_name(tx_pkt_split);
3750 printf("Number of segments: %u\n", n);
3751 printf("Segment sizes: ");
3752 for (i = 0; i != n - 1; i++)
3753 printf("%hu,", tx_pkt_seg_lengths[i]);
3754 printf("%hu\n", tx_pkt_seg_lengths[i]);
3755 printf("Split packet: %s\n", split);
3759 nb_segs_is_invalid(unsigned int nb_segs)
3766 RTE_ETH_FOREACH_DEV(port_id) {
3767 for (queue_id = 0; queue_id < nb_txq; queue_id++) {
3768 ret = get_tx_ring_size(port_id, queue_id, &ring_size);
3770 /* Port may not be initialized yet, can't say
3771 * the port is invalid in this stage.
3775 if (ring_size < nb_segs) {
3776 printf("nb segments per TX packets=%u >= TX "
3777 "queue(%u) ring_size=%u - txpkts ignored\n",
3778 nb_segs, queue_id, ring_size);
3788 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
3790 uint16_t tx_pkt_len;
3794 * For single segment settings failed check is ignored.
3795 * It is a very basic capability to send the single segment
3796 * packets, suppose it is always supported.
3798 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) {
3799 printf("Tx segment size(%u) is not supported - txpkts ignored\n",
3804 if (nb_segs > RTE_MAX_SEGS_PER_PKT) {
3805 printf("Tx segment size(%u) is bigger than max number of segment(%u)\n",
3806 nb_segs, RTE_MAX_SEGS_PER_PKT);
3811 * Check that each segment length is greater or equal than
3812 * the mbuf data size.
3813 * Check also that the total packet length is greater or equal than the
3814 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
3818 for (i = 0; i < nb_segs; i++) {
3819 if (seg_lengths[i] > mbuf_data_size[0]) {
3820 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
3821 i, seg_lengths[i], mbuf_data_size[0]);
3824 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
3826 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
3827 printf("total packet length=%u < %d - give up\n",
3828 (unsigned) tx_pkt_len,
3829 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
3833 for (i = 0; i < nb_segs; i++)
3834 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
3836 tx_pkt_length = tx_pkt_len;
3837 tx_pkt_nb_segs = (uint8_t) nb_segs;
3841 show_tx_pkt_times(void)
3843 printf("Interburst gap: %u\n", tx_pkt_times_inter);
3844 printf("Intraburst gap: %u\n", tx_pkt_times_intra);
3848 set_tx_pkt_times(unsigned int *tx_times)
3850 tx_pkt_times_inter = tx_times[0];
3851 tx_pkt_times_intra = tx_times[1];
3855 setup_gro(const char *onoff, portid_t port_id)
3857 if (!rte_eth_dev_is_valid_port(port_id)) {
3858 printf("invalid port id %u\n", port_id);
3861 if (test_done == 0) {
3862 printf("Before enable/disable GRO,"
3863 " please stop forwarding first\n");
3866 if (strcmp(onoff, "on") == 0) {
3867 if (gro_ports[port_id].enable != 0) {
3868 printf("Port %u has enabled GRO. Please"
3869 " disable GRO first\n", port_id);
3872 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3873 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
3874 gro_ports[port_id].param.max_flow_num =
3875 GRO_DEFAULT_FLOW_NUM;
3876 gro_ports[port_id].param.max_item_per_flow =
3877 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
3879 gro_ports[port_id].enable = 1;
3881 if (gro_ports[port_id].enable == 0) {
3882 printf("Port %u has disabled GRO\n", port_id);
3885 gro_ports[port_id].enable = 0;
3890 setup_gro_flush_cycles(uint8_t cycles)
3892 if (test_done == 0) {
3893 printf("Before change flush interval for GRO,"
3894 " please stop forwarding first.\n");
3898 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
3899 GRO_DEFAULT_FLUSH_CYCLES) {
3900 printf("The flushing cycle be in the range"
3901 " of 1 to %u. Revert to the default"
3903 GRO_MAX_FLUSH_CYCLES,
3904 GRO_DEFAULT_FLUSH_CYCLES);
3905 cycles = GRO_DEFAULT_FLUSH_CYCLES;
3908 gro_flush_cycles = cycles;
3912 show_gro(portid_t port_id)
3914 struct rte_gro_param *param;
3915 uint32_t max_pkts_num;
3917 param = &gro_ports[port_id].param;
3919 if (!rte_eth_dev_is_valid_port(port_id)) {
3920 printf("Invalid port id %u.\n", port_id);
3923 if (gro_ports[port_id].enable) {
3924 printf("GRO type: TCP/IPv4\n");
3925 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3926 max_pkts_num = param->max_flow_num *
3927 param->max_item_per_flow;
3929 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
3930 printf("Max number of packets to perform GRO: %u\n",
3932 printf("Flushing cycles: %u\n", gro_flush_cycles);
3934 printf("Port %u doesn't enable GRO.\n", port_id);
3938 setup_gso(const char *mode, portid_t port_id)
3940 if (!rte_eth_dev_is_valid_port(port_id)) {
3941 printf("invalid port id %u\n", port_id);
3944 if (strcmp(mode, "on") == 0) {
3945 if (test_done == 0) {
3946 printf("before enabling GSO,"
3947 " please stop forwarding first\n");
3950 gso_ports[port_id].enable = 1;
3951 } else if (strcmp(mode, "off") == 0) {
3952 if (test_done == 0) {
3953 printf("before disabling GSO,"
3954 " please stop forwarding first\n");
3957 gso_ports[port_id].enable = 0;
3962 list_pkt_forwarding_modes(void)
3964 static char fwd_modes[128] = "";
3965 const char *separator = "|";
3966 struct fwd_engine *fwd_eng;
3969 if (strlen (fwd_modes) == 0) {
3970 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3971 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3972 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3973 strncat(fwd_modes, separator,
3974 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3976 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3983 list_pkt_forwarding_retry_modes(void)
3985 static char fwd_modes[128] = "";
3986 const char *separator = "|";
3987 struct fwd_engine *fwd_eng;
3990 if (strlen(fwd_modes) == 0) {
3991 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3992 if (fwd_eng == &rx_only_engine)
3994 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3996 strlen(fwd_modes) - 1);
3997 strncat(fwd_modes, separator,
3999 strlen(fwd_modes) - 1);
4001 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
4008 set_pkt_forwarding_mode(const char *fwd_mode_name)
4010 struct fwd_engine *fwd_eng;
4014 while ((fwd_eng = fwd_engines[i]) != NULL) {
4015 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
4016 printf("Set %s packet forwarding mode%s\n",
4018 retry_enabled == 0 ? "" : " with retry");
4019 cur_fwd_eng = fwd_eng;
4024 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
4028 add_rx_dump_callbacks(portid_t portid)
4030 struct rte_eth_dev_info dev_info;
4034 if (port_id_is_invalid(portid, ENABLED_WARN))
4037 ret = eth_dev_info_get_print_err(portid, &dev_info);
4041 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
4042 if (!ports[portid].rx_dump_cb[queue])
4043 ports[portid].rx_dump_cb[queue] =
4044 rte_eth_add_rx_callback(portid, queue,
4045 dump_rx_pkts, NULL);
4049 add_tx_dump_callbacks(portid_t portid)
4051 struct rte_eth_dev_info dev_info;
4055 if (port_id_is_invalid(portid, ENABLED_WARN))
4058 ret = eth_dev_info_get_print_err(portid, &dev_info);
4062 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
4063 if (!ports[portid].tx_dump_cb[queue])
4064 ports[portid].tx_dump_cb[queue] =
4065 rte_eth_add_tx_callback(portid, queue,
4066 dump_tx_pkts, NULL);
4070 remove_rx_dump_callbacks(portid_t portid)
4072 struct rte_eth_dev_info dev_info;
4076 if (port_id_is_invalid(portid, ENABLED_WARN))
4079 ret = eth_dev_info_get_print_err(portid, &dev_info);
4083 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
4084 if (ports[portid].rx_dump_cb[queue]) {
4085 rte_eth_remove_rx_callback(portid, queue,
4086 ports[portid].rx_dump_cb[queue]);
4087 ports[portid].rx_dump_cb[queue] = NULL;
4092 remove_tx_dump_callbacks(portid_t portid)
4094 struct rte_eth_dev_info dev_info;
4098 if (port_id_is_invalid(portid, ENABLED_WARN))
4101 ret = eth_dev_info_get_print_err(portid, &dev_info);
4105 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
4106 if (ports[portid].tx_dump_cb[queue]) {
4107 rte_eth_remove_tx_callback(portid, queue,
4108 ports[portid].tx_dump_cb[queue]);
4109 ports[portid].tx_dump_cb[queue] = NULL;
4114 configure_rxtx_dump_callbacks(uint16_t verbose)
4118 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4119 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
4123 RTE_ETH_FOREACH_DEV(portid)
4125 if (verbose == 1 || verbose > 2)
4126 add_rx_dump_callbacks(portid);
4128 remove_rx_dump_callbacks(portid);
4130 add_tx_dump_callbacks(portid);
4132 remove_tx_dump_callbacks(portid);
4137 set_verbose_level(uint16_t vb_level)
4139 printf("Change verbose level from %u to %u\n",
4140 (unsigned int) verbose_level, (unsigned int) vb_level);
4141 verbose_level = vb_level;
4142 configure_rxtx_dump_callbacks(verbose_level);
4146 vlan_extend_set(portid_t port_id, int on)
4150 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4152 if (port_id_is_invalid(port_id, ENABLED_WARN))
4155 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4158 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
4159 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
4161 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
4162 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
4165 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4167 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
4168 "diag=%d\n", port_id, on, diag);
4171 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4175 rx_vlan_strip_set(portid_t port_id, int on)
4179 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4181 if (port_id_is_invalid(port_id, ENABLED_WARN))
4184 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4187 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
4188 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
4190 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
4191 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4194 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4196 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
4197 "diag=%d\n", port_id, on, diag);
4200 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4204 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
4208 if (port_id_is_invalid(port_id, ENABLED_WARN))
4211 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
4213 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
4214 "diag=%d\n", port_id, queue_id, on, diag);
4218 rx_vlan_filter_set(portid_t port_id, int on)
4222 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4224 if (port_id_is_invalid(port_id, ENABLED_WARN))
4227 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4230 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
4231 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
4233 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
4234 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
4237 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4239 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
4240 "diag=%d\n", port_id, on, diag);
4243 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4247 rx_vlan_qinq_strip_set(portid_t port_id, int on)
4251 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4253 if (port_id_is_invalid(port_id, ENABLED_WARN))
4256 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4259 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
4260 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
4262 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
4263 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
4266 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4268 printf("%s(port_pi=%d, on=%d) failed "
4269 "diag=%d\n", __func__, port_id, on, diag);
4272 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4276 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
4280 if (port_id_is_invalid(port_id, ENABLED_WARN))
4282 if (vlan_id_is_invalid(vlan_id))
4284 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
4287 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
4289 port_id, vlan_id, on, diag);
4294 rx_vlan_all_filter_set(portid_t port_id, int on)
4298 if (port_id_is_invalid(port_id, ENABLED_WARN))
4300 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
4301 if (rx_vft_set(port_id, vlan_id, on))
4307 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
4311 if (port_id_is_invalid(port_id, ENABLED_WARN))
4314 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
4318 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
4320 port_id, vlan_type, tp_id, diag);
4324 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
4326 struct rte_eth_dev_info dev_info;
4329 if (vlan_id_is_invalid(vlan_id))
4332 if (ports[port_id].dev_conf.txmode.offloads &
4333 DEV_TX_OFFLOAD_QINQ_INSERT) {
4334 printf("Error, as QinQ has been enabled.\n");
4338 ret = eth_dev_info_get_print_err(port_id, &dev_info);
4342 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
4343 printf("Error: vlan insert is not supported by port %d\n",
4348 tx_vlan_reset(port_id);
4349 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
4350 ports[port_id].tx_vlan_id = vlan_id;
4354 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
4356 struct rte_eth_dev_info dev_info;
4359 if (vlan_id_is_invalid(vlan_id))
4361 if (vlan_id_is_invalid(vlan_id_outer))
4364 ret = eth_dev_info_get_print_err(port_id, &dev_info);
4368 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
4369 printf("Error: qinq insert not supported by port %d\n",
4374 tx_vlan_reset(port_id);
4375 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
4376 DEV_TX_OFFLOAD_QINQ_INSERT);
4377 ports[port_id].tx_vlan_id = vlan_id;
4378 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
4382 tx_vlan_reset(portid_t port_id)
4384 ports[port_id].dev_conf.txmode.offloads &=
4385 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
4386 DEV_TX_OFFLOAD_QINQ_INSERT);
4387 ports[port_id].tx_vlan_id = 0;
4388 ports[port_id].tx_vlan_id_outer = 0;
4392 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
4394 if (port_id_is_invalid(port_id, ENABLED_WARN))
4397 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
4401 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
4405 if (port_id_is_invalid(port_id, ENABLED_WARN))
4408 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
4411 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
4412 printf("map_value not in required range 0..%d\n",
4413 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
4417 if (!is_rx) { /* tx */
4418 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id,
4421 printf("failed to set tx queue stats mapping.\n");
4425 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id,
4428 printf("failed to set rx queue stats mapping.\n");
4435 set_xstats_hide_zero(uint8_t on_off)
4437 xstats_hide_zero = on_off;
4441 set_record_core_cycles(uint8_t on_off)
4443 record_core_cycles = on_off;
4447 set_record_burst_stats(uint8_t on_off)
4449 record_burst_stats = on_off;
4453 print_fdir_mask(struct rte_eth_fdir_masks *mask)
4455 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
4457 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4458 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
4459 " tunnel_id: 0x%08x",
4460 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
4461 rte_be_to_cpu_32(mask->tunnel_id_mask));
4462 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
4463 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
4464 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
4465 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
4467 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
4468 rte_be_to_cpu_16(mask->src_port_mask),
4469 rte_be_to_cpu_16(mask->dst_port_mask));
4471 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4472 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
4473 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
4474 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
4475 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
4477 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4478 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
4479 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
4480 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
4481 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
4488 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4490 struct rte_eth_flex_payload_cfg *cfg;
4493 for (i = 0; i < flex_conf->nb_payloads; i++) {
4494 cfg = &flex_conf->flex_set[i];
4495 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
4497 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
4498 printf("\n L2_PAYLOAD: ");
4499 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
4500 printf("\n L3_PAYLOAD: ");
4501 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
4502 printf("\n L4_PAYLOAD: ");
4504 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
4505 for (j = 0; j < num; j++)
4506 printf(" %-5u", cfg->src_offset[j]);
4512 flowtype_to_str(uint16_t flow_type)
4514 struct flow_type_info {
4520 static struct flow_type_info flowtype_str_table[] = {
4521 {"raw", RTE_ETH_FLOW_RAW},
4522 {"ipv4", RTE_ETH_FLOW_IPV4},
4523 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
4524 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
4525 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
4526 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
4527 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
4528 {"ipv6", RTE_ETH_FLOW_IPV6},
4529 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
4530 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
4531 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
4532 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
4533 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
4534 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
4535 {"port", RTE_ETH_FLOW_PORT},
4536 {"vxlan", RTE_ETH_FLOW_VXLAN},
4537 {"geneve", RTE_ETH_FLOW_GENEVE},
4538 {"nvgre", RTE_ETH_FLOW_NVGRE},
4539 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
4542 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
4543 if (flowtype_str_table[i].ftype == flow_type)
4544 return flowtype_str_table[i].str;
4550 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
4553 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4555 struct rte_eth_fdir_flex_mask *mask;
4559 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
4560 mask = &flex_conf->flex_mask[i];
4561 p = flowtype_to_str(mask->flow_type);
4562 printf("\n %s:\t", p ? p : "unknown");
4563 for (j = 0; j < num; j++)
4564 printf(" %02x", mask->mask[j]);
4570 print_fdir_flow_type(uint32_t flow_types_mask)
4575 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
4576 if (!(flow_types_mask & (1 << i)))
4578 p = flowtype_to_str(i);
4588 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
4589 struct rte_eth_fdir_stats *fdir_stat)
4594 if (ret == -ENOTSUP) {
4595 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
4597 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
4600 #ifdef RTE_NET_IXGBE
4601 if (ret == -ENOTSUP) {
4602 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
4604 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
4611 printf("\n FDIR is not supported on port %-2d\n",
4615 printf("programming error: (%s)\n", strerror(-ret));
4622 fdir_get_infos(portid_t port_id)
4624 struct rte_eth_fdir_stats fdir_stat;
4625 struct rte_eth_fdir_info fdir_info;
4627 static const char *fdir_stats_border = "########################";
4629 if (port_id_is_invalid(port_id, ENABLED_WARN))
4632 memset(&fdir_info, 0, sizeof(fdir_info));
4633 memset(&fdir_stat, 0, sizeof(fdir_stat));
4634 if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
4637 printf("\n %s FDIR infos for port %-2d %s\n",
4638 fdir_stats_border, port_id, fdir_stats_border);
4640 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
4641 printf(" PERFECT\n");
4642 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
4643 printf(" PERFECT-MAC-VLAN\n");
4644 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4645 printf(" PERFECT-TUNNEL\n");
4646 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
4647 printf(" SIGNATURE\n");
4649 printf(" DISABLE\n");
4650 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
4651 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
4652 printf(" SUPPORTED FLOW TYPE: ");
4653 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
4655 printf(" FLEX PAYLOAD INFO:\n");
4656 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
4657 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
4658 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
4659 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
4660 fdir_info.flex_payload_unit,
4661 fdir_info.max_flex_payload_segment_num,
4662 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
4664 print_fdir_mask(&fdir_info.mask);
4665 if (fdir_info.flex_conf.nb_payloads > 0) {
4666 printf(" FLEX PAYLOAD SRC OFFSET:");
4667 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4669 if (fdir_info.flex_conf.nb_flexmasks > 0) {
4670 printf(" FLEX MASK CFG:");
4671 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4673 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
4674 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
4675 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
4676 fdir_info.guarant_spc, fdir_info.best_spc);
4677 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
4678 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
4679 " add: %-10"PRIu64" remove: %"PRIu64"\n"
4680 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
4681 fdir_stat.collision, fdir_stat.free,
4682 fdir_stat.maxhash, fdir_stat.maxlen,
4683 fdir_stat.add, fdir_stat.remove,
4684 fdir_stat.f_add, fdir_stat.f_remove);
4685 printf(" %s############################%s\n",
4686 fdir_stats_border, fdir_stats_border);
4689 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */
4692 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
4694 struct rte_port *port;
4695 struct rte_eth_fdir_flex_conf *flex_conf;
4698 port = &ports[port_id];
4699 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4700 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
4701 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
4706 if (i >= RTE_ETH_FLOW_MAX) {
4707 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
4708 idx = flex_conf->nb_flexmasks;
4709 flex_conf->nb_flexmasks++;
4711 printf("The flex mask table is full. Can not set flex"
4712 " mask for flow_type(%u).", cfg->flow_type);
4716 rte_memcpy(&flex_conf->flex_mask[idx],
4718 sizeof(struct rte_eth_fdir_flex_mask));
4722 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
4724 struct rte_port *port;
4725 struct rte_eth_fdir_flex_conf *flex_conf;
4728 port = &ports[port_id];
4729 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4730 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
4731 if (cfg->type == flex_conf->flex_set[i].type) {
4736 if (i >= RTE_ETH_PAYLOAD_MAX) {
4737 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
4738 idx = flex_conf->nb_payloads;
4739 flex_conf->nb_payloads++;
4741 printf("The flex payload table is full. Can not set"
4742 " flex payload for type(%u).", cfg->type);
4746 rte_memcpy(&flex_conf->flex_set[idx],
4748 sizeof(struct rte_eth_flex_payload_cfg));
4753 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
4755 #ifdef RTE_NET_IXGBE
4759 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
4761 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
4765 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
4766 is_rx ? "rx" : "tx", port_id, diag);
4769 printf("VF %s setting not supported for port %d\n",
4770 is_rx ? "Rx" : "Tx", port_id);
4776 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
4779 struct rte_eth_link link;
4782 if (port_id_is_invalid(port_id, ENABLED_WARN))
4784 ret = eth_link_get_nowait_print_err(port_id, &link);
4787 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
4788 rate > link.link_speed) {
4789 printf("Invalid rate value:%u bigger than link speed: %u\n",
4790 rate, link.link_speed);
4793 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
4796 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
4802 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
4804 int diag = -ENOTSUP;
4808 RTE_SET_USED(q_msk);
4810 #ifdef RTE_NET_IXGBE
4811 if (diag == -ENOTSUP)
4812 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
4816 if (diag == -ENOTSUP)
4817 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
4822 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
4828 * Functions to manage the set of filtered Multicast MAC addresses.
4830 * A pool of filtered multicast MAC addresses is associated with each port.
4831 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
4832 * The address of the pool and the number of valid multicast MAC addresses
4833 * recorded in the pool are stored in the fields "mc_addr_pool" and
4834 * "mc_addr_nb" of the "rte_port" data structure.
4836 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
4837 * to be supplied a contiguous array of multicast MAC addresses.
4838 * To comply with this constraint, the set of multicast addresses recorded
4839 * into the pool are systematically compacted at the beginning of the pool.
4840 * Hence, when a multicast address is removed from the pool, all following
4841 * addresses, if any, are copied back to keep the set contiguous.
4843 #define MCAST_POOL_INC 32
4846 mcast_addr_pool_extend(struct rte_port *port)
4848 struct rte_ether_addr *mc_pool;
4849 size_t mc_pool_size;
4852 * If a free entry is available at the end of the pool, just
4853 * increment the number of recorded multicast addresses.
4855 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
4861 * [re]allocate a pool with MCAST_POOL_INC more entries.
4862 * The previous test guarantees that port->mc_addr_nb is a multiple
4863 * of MCAST_POOL_INC.
4865 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
4867 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
4869 if (mc_pool == NULL) {
4870 printf("allocation of pool of %u multicast addresses failed\n",
4871 port->mc_addr_nb + MCAST_POOL_INC);
4875 port->mc_addr_pool = mc_pool;
4882 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
4884 if (mcast_addr_pool_extend(port) != 0)
4886 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
4890 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
4893 if (addr_idx == port->mc_addr_nb) {
4894 /* No need to recompact the set of multicast addressses. */
4895 if (port->mc_addr_nb == 0) {
4896 /* free the pool of multicast addresses. */
4897 free(port->mc_addr_pool);
4898 port->mc_addr_pool = NULL;
4902 memmove(&port->mc_addr_pool[addr_idx],
4903 &port->mc_addr_pool[addr_idx + 1],
4904 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
4908 eth_port_multicast_addr_list_set(portid_t port_id)
4910 struct rte_port *port;
4913 port = &ports[port_id];
4914 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
4917 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
4918 port_id, port->mc_addr_nb, diag);
4924 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
4926 struct rte_port *port;
4929 if (port_id_is_invalid(port_id, ENABLED_WARN))
4932 port = &ports[port_id];
4935 * Check that the added multicast MAC address is not already recorded
4936 * in the pool of multicast addresses.
4938 for (i = 0; i < port->mc_addr_nb; i++) {
4939 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
4940 printf("multicast address already filtered by port\n");
4945 mcast_addr_pool_append(port, mc_addr);
4946 if (eth_port_multicast_addr_list_set(port_id) < 0)
4947 /* Rollback on failure, remove the address from the pool */
4948 mcast_addr_pool_remove(port, i);
4952 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
4954 struct rte_port *port;
4957 if (port_id_is_invalid(port_id, ENABLED_WARN))
4960 port = &ports[port_id];
4963 * Search the pool of multicast MAC addresses for the removed address.
4965 for (i = 0; i < port->mc_addr_nb; i++) {
4966 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
4969 if (i == port->mc_addr_nb) {
4970 printf("multicast address not filtered by port %d\n", port_id);
4974 mcast_addr_pool_remove(port, i);
4975 if (eth_port_multicast_addr_list_set(port_id) < 0)
4976 /* Rollback on failure, add the address back into the pool */
4977 mcast_addr_pool_append(port, mc_addr);
4981 port_dcb_info_display(portid_t port_id)
4983 struct rte_eth_dcb_info dcb_info;
4986 static const char *border = "================";
4988 if (port_id_is_invalid(port_id, ENABLED_WARN))
4991 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
4993 printf("\n Failed to get dcb infos on port %-2d\n",
4997 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
4998 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
5000 for (i = 0; i < dcb_info.nb_tcs; i++)
5002 printf("\n Priority : ");
5003 for (i = 0; i < dcb_info.nb_tcs; i++)
5004 printf("\t%4d", dcb_info.prio_tc[i]);
5005 printf("\n BW percent :");
5006 for (i = 0; i < dcb_info.nb_tcs; i++)
5007 printf("\t%4d%%", dcb_info.tc_bws[i]);
5008 printf("\n RXQ base : ");
5009 for (i = 0; i < dcb_info.nb_tcs; i++)
5010 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
5011 printf("\n RXQ number :");
5012 for (i = 0; i < dcb_info.nb_tcs; i++)
5013 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
5014 printf("\n TXQ base : ");
5015 for (i = 0; i < dcb_info.nb_tcs; i++)
5016 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
5017 printf("\n TXQ number :");
5018 for (i = 0; i < dcb_info.nb_tcs; i++)
5019 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
5024 open_file(const char *file_path, uint32_t *size)
5026 int fd = open(file_path, O_RDONLY);
5028 uint8_t *buf = NULL;
5036 printf("%s: Failed to open %s\n", __func__, file_path);
5040 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
5042 printf("%s: File operations failed\n", __func__);
5046 pkg_size = st_buf.st_size;
5049 printf("%s: File operations failed\n", __func__);
5053 buf = (uint8_t *)malloc(pkg_size);
5056 printf("%s: Failed to malloc memory\n", __func__);
5060 ret = read(fd, buf, pkg_size);
5063 printf("%s: File read operation failed\n", __func__);
5077 save_file(const char *file_path, uint8_t *buf, uint32_t size)
5079 FILE *fh = fopen(file_path, "wb");
5082 printf("%s: Failed to open %s\n", __func__, file_path);
5086 if (fwrite(buf, 1, size, fh) != size) {
5088 printf("%s: File write operation failed\n", __func__);
5098 close_file(uint8_t *buf)
5109 port_queue_region_info_display(portid_t port_id, void *buf)
5113 struct rte_pmd_i40e_queue_regions *info =
5114 (struct rte_pmd_i40e_queue_regions *)buf;
5115 static const char *queue_region_info_stats_border = "-------";
5117 if (!info->queue_region_number)
5118 printf("there is no region has been set before");
5120 printf("\n %s All queue region info for port=%2d %s",
5121 queue_region_info_stats_border, port_id,
5122 queue_region_info_stats_border);
5123 printf("\n queue_region_number: %-14u \n",
5124 info->queue_region_number);
5126 for (i = 0; i < info->queue_region_number; i++) {
5127 printf("\n region_id: %-14u queue_number: %-14u "
5128 "queue_start_index: %-14u \n",
5129 info->region[i].region_id,
5130 info->region[i].queue_num,
5131 info->region[i].queue_start_index);
5133 printf(" user_priority_num is %-14u :",
5134 info->region[i].user_priority_num);
5135 for (j = 0; j < info->region[i].user_priority_num; j++)
5136 printf(" %-14u ", info->region[i].user_priority[j]);
5138 printf("\n flowtype_num is %-14u :",
5139 info->region[i].flowtype_num);
5140 for (j = 0; j < info->region[i].flowtype_num; j++)
5141 printf(" %-14u ", info->region[i].hw_flowtype[j]);
5144 RTE_SET_USED(port_id);
5152 show_macs(portid_t port_id)
5154 char buf[RTE_ETHER_ADDR_FMT_SIZE];
5155 struct rte_eth_dev_info dev_info;
5156 struct rte_ether_addr *addr;
5157 uint32_t i, num_macs = 0;
5158 struct rte_eth_dev *dev;
5160 dev = &rte_eth_devices[port_id];
5162 if (eth_dev_info_get_print_err(port_id, &dev_info))
5165 for (i = 0; i < dev_info.max_mac_addrs; i++) {
5166 addr = &dev->data->mac_addrs[i];
5168 /* skip zero address */
5169 if (rte_is_zero_ether_addr(addr))
5175 printf("Number of MAC address added: %d\n", num_macs);
5177 for (i = 0; i < dev_info.max_mac_addrs; i++) {
5178 addr = &dev->data->mac_addrs[i];
5180 /* skip zero address */
5181 if (rte_is_zero_ether_addr(addr))
5184 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
5185 printf(" %s\n", buf);
5190 show_mcast_macs(portid_t port_id)
5192 char buf[RTE_ETHER_ADDR_FMT_SIZE];
5193 struct rte_ether_addr *addr;
5194 struct rte_port *port;
5197 port = &ports[port_id];
5199 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
5201 for (i = 0; i < port->mc_addr_nb; i++) {
5202 addr = &port->mc_addr_pool[i];
5204 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
5205 printf(" %s\n", buf);