1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
52 #include <rte_hexdump.h>
56 #define ETHDEV_FWVERS_LEN 32
58 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
59 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
64 #define NS_PER_SEC 1E9
66 static char *flowtype_to_str(uint16_t flow_type);
69 enum tx_pkt_split split;
73 .split = TX_PKT_SPLIT_OFF,
77 .split = TX_PKT_SPLIT_ON,
81 .split = TX_PKT_SPLIT_RND,
86 const struct rss_type_info rss_type_table[] = {
87 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
88 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
89 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
92 { "eth", ETH_RSS_ETH },
93 { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
94 { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
95 { "vlan", ETH_RSS_VLAN },
96 { "s-vlan", ETH_RSS_S_VLAN },
97 { "c-vlan", ETH_RSS_C_VLAN },
98 { "ipv4", ETH_RSS_IPV4 },
99 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
100 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
101 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
102 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
103 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
104 { "ipv6", ETH_RSS_IPV6 },
105 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
106 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
107 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
108 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
109 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
110 { "l2-payload", ETH_RSS_L2_PAYLOAD },
111 { "ipv6-ex", ETH_RSS_IPV6_EX },
112 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
113 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
114 { "port", ETH_RSS_PORT },
115 { "vxlan", ETH_RSS_VXLAN },
116 { "geneve", ETH_RSS_GENEVE },
117 { "nvgre", ETH_RSS_NVGRE },
118 { "ip", ETH_RSS_IP },
119 { "udp", ETH_RSS_UDP },
120 { "tcp", ETH_RSS_TCP },
121 { "sctp", ETH_RSS_SCTP },
122 { "tunnel", ETH_RSS_TUNNEL },
123 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
124 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
125 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
126 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
127 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
128 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
129 { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
130 { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
131 { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
132 { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
133 { "esp", ETH_RSS_ESP },
134 { "ah", ETH_RSS_AH },
135 { "l2tpv3", ETH_RSS_L2TPV3 },
136 { "pfcp", ETH_RSS_PFCP },
137 { "pppoe", ETH_RSS_PPPOE },
138 { "gtpu", ETH_RSS_GTPU },
142 static const struct {
143 enum rte_eth_fec_mode mode;
145 } fec_mode_name[] = {
147 .mode = RTE_ETH_FEC_NOFEC,
151 .mode = RTE_ETH_FEC_AUTO,
155 .mode = RTE_ETH_FEC_BASER,
159 .mode = RTE_ETH_FEC_RS,
165 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
167 char buf[RTE_ETHER_ADDR_FMT_SIZE];
168 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
169 printf("%s%s", name, buf);
173 nic_stats_display(portid_t port_id)
175 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
176 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
177 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
178 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
179 static uint64_t prev_ns[RTE_MAX_ETHPORTS];
180 struct timespec cur_time;
181 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
183 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
184 struct rte_eth_stats stats;
185 struct rte_port *port = &ports[port_id];
188 static const char *nic_stats_border = "########################";
190 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
194 rte_eth_stats_get(port_id, &stats);
195 printf("\n %s NIC statistics for port %-2d %s\n",
196 nic_stats_border, port_id, nic_stats_border);
198 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
199 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
201 stats.ipackets, stats.imissed, stats.ibytes);
202 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
203 printf(" RX-nombuf: %-10"PRIu64"\n",
205 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
207 stats.opackets, stats.oerrors, stats.obytes);
210 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
211 " RX-bytes: %10"PRIu64"\n",
212 stats.ipackets, stats.ierrors, stats.ibytes);
213 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
214 printf(" RX-nombuf: %10"PRIu64"\n",
216 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
217 " TX-bytes: %10"PRIu64"\n",
218 stats.opackets, stats.oerrors, stats.obytes);
221 if (port->rx_queue_stats_mapping_enabled) {
223 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
224 printf(" Stats reg %2d RX-packets: %10"PRIu64
225 " RX-errors: %10"PRIu64
226 " RX-bytes: %10"PRIu64"\n",
227 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
230 if (port->tx_queue_stats_mapping_enabled) {
232 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
233 printf(" Stats reg %2d TX-packets: %10"PRIu64
234 " TX-bytes: %10"PRIu64"\n",
235 i, stats.q_opackets[i], stats.q_obytes[i]);
240 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
243 ns = cur_time.tv_sec * NS_PER_SEC;
244 ns += cur_time.tv_nsec;
246 if (prev_ns[port_id] != 0)
247 diff_ns = ns - prev_ns[port_id];
248 prev_ns[port_id] = ns;
251 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
252 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
253 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
254 (stats.opackets - prev_pkts_tx[port_id]) : 0;
255 prev_pkts_rx[port_id] = stats.ipackets;
256 prev_pkts_tx[port_id] = stats.opackets;
257 mpps_rx = diff_ns > 0 ?
258 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
259 mpps_tx = diff_ns > 0 ?
260 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
262 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
263 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
264 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
265 (stats.obytes - prev_bytes_tx[port_id]) : 0;
266 prev_bytes_rx[port_id] = stats.ibytes;
267 prev_bytes_tx[port_id] = stats.obytes;
268 mbps_rx = diff_ns > 0 ?
269 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
270 mbps_tx = diff_ns > 0 ?
271 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
273 printf("\n Throughput (since last show)\n");
274 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
275 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
276 mpps_tx, mbps_tx * 8);
278 printf(" %s############################%s\n",
279 nic_stats_border, nic_stats_border);
283 nic_stats_clear(portid_t port_id)
287 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
292 ret = rte_eth_stats_reset(port_id);
294 printf("%s: Error: failed to reset stats (port %u): %s",
295 __func__, port_id, strerror(-ret));
299 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
303 printf("%s: Error: failed to get stats (port %u): %s",
304 __func__, port_id, strerror(ret));
307 printf("\n NIC statistics for port %d cleared\n", port_id);
311 nic_xstats_display(portid_t port_id)
313 struct rte_eth_xstat *xstats;
314 int cnt_xstats, idx_xstat;
315 struct rte_eth_xstat_name *xstats_names;
317 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
321 printf("###### NIC extended statistics for port %-2d\n", port_id);
322 if (!rte_eth_dev_is_valid_port(port_id)) {
323 printf("Error: Invalid port number %i\n", port_id);
328 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
329 if (cnt_xstats < 0) {
330 printf("Error: Cannot get count of xstats\n");
334 /* Get id-name lookup table */
335 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
336 if (xstats_names == NULL) {
337 printf("Cannot allocate memory for xstats lookup\n");
340 if (cnt_xstats != rte_eth_xstats_get_names(
341 port_id, xstats_names, cnt_xstats)) {
342 printf("Error: Cannot get xstats lookup\n");
347 /* Get stats themselves */
348 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
349 if (xstats == NULL) {
350 printf("Cannot allocate memory for xstats\n");
354 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
355 printf("Error: Unable to get xstats\n");
362 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
363 if (xstats_hide_zero && !xstats[idx_xstat].value)
365 printf("%s: %"PRIu64"\n",
366 xstats_names[idx_xstat].name,
367 xstats[idx_xstat].value);
374 nic_xstats_clear(portid_t port_id)
378 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
383 ret = rte_eth_xstats_reset(port_id);
385 printf("%s: Error: failed to reset xstats (port %u): %s",
386 __func__, port_id, strerror(-ret));
390 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
394 printf("%s: Error: failed to get stats (port %u): %s",
395 __func__, port_id, strerror(ret));
401 nic_stats_mapping_display(portid_t port_id)
403 struct rte_port *port = &ports[port_id];
406 static const char *nic_stats_mapping_border = "########################";
408 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
413 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
414 printf("Port id %d - either does not support queue statistic mapping or"
415 " no queue statistic mapping set\n", port_id);
419 printf("\n %s NIC statistics mapping for port %-2d %s\n",
420 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
422 if (port->rx_queue_stats_mapping_enabled) {
423 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
424 if (rx_queue_stats_mappings[i].port_id == port_id) {
425 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
426 rx_queue_stats_mappings[i].queue_id,
427 rx_queue_stats_mappings[i].stats_counter_id);
434 if (port->tx_queue_stats_mapping_enabled) {
435 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
436 if (tx_queue_stats_mappings[i].port_id == port_id) {
437 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
438 tx_queue_stats_mappings[i].queue_id,
439 tx_queue_stats_mappings[i].stats_counter_id);
444 printf(" %s####################################%s\n",
445 nic_stats_mapping_border, nic_stats_mapping_border);
449 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
451 struct rte_eth_burst_mode mode;
452 struct rte_eth_rxq_info qinfo;
454 static const char *info_border = "*********************";
456 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
458 printf("Failed to retrieve information for port: %u, "
459 "RX queue: %hu\nerror desc: %s(%d)\n",
460 port_id, queue_id, strerror(-rc), rc);
464 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
465 info_border, port_id, queue_id, info_border);
467 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
468 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
469 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
470 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
471 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
472 printf("\nRX drop packets: %s",
473 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
474 printf("\nRX deferred start: %s",
475 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
476 printf("\nRX scattered packets: %s",
477 (qinfo.scattered_rx != 0) ? "on" : "off");
478 if (qinfo.rx_buf_size != 0)
479 printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
480 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
482 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
483 printf("\nBurst mode: %s%s",
485 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
486 " (per queue)" : "");
492 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
494 struct rte_eth_burst_mode mode;
495 struct rte_eth_txq_info qinfo;
497 static const char *info_border = "*********************";
499 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
501 printf("Failed to retrieve information for port: %u, "
502 "TX queue: %hu\nerror desc: %s(%d)\n",
503 port_id, queue_id, strerror(-rc), rc);
507 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
508 info_border, port_id, queue_id, info_border);
510 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
511 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
512 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
513 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
514 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
515 printf("\nTX deferred start: %s",
516 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
517 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
519 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
520 printf("\nBurst mode: %s%s",
522 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
523 " (per queue)" : "");
528 static int bus_match_all(const struct rte_bus *bus, const void *data)
536 device_infos_display_speeds(uint32_t speed_capa)
538 printf("\n\tDevice speed capability:");
539 if (speed_capa == ETH_LINK_SPEED_AUTONEG)
540 printf(" Autonegotiate (all speeds)");
541 if (speed_capa & ETH_LINK_SPEED_FIXED)
542 printf(" Disable autonegotiate (fixed speed) ");
543 if (speed_capa & ETH_LINK_SPEED_10M_HD)
544 printf(" 10 Mbps half-duplex ");
545 if (speed_capa & ETH_LINK_SPEED_10M)
546 printf(" 10 Mbps full-duplex ");
547 if (speed_capa & ETH_LINK_SPEED_100M_HD)
548 printf(" 100 Mbps half-duplex ");
549 if (speed_capa & ETH_LINK_SPEED_100M)
550 printf(" 100 Mbps full-duplex ");
551 if (speed_capa & ETH_LINK_SPEED_1G)
553 if (speed_capa & ETH_LINK_SPEED_2_5G)
554 printf(" 2.5 Gbps ");
555 if (speed_capa & ETH_LINK_SPEED_5G)
557 if (speed_capa & ETH_LINK_SPEED_10G)
559 if (speed_capa & ETH_LINK_SPEED_20G)
561 if (speed_capa & ETH_LINK_SPEED_25G)
563 if (speed_capa & ETH_LINK_SPEED_40G)
565 if (speed_capa & ETH_LINK_SPEED_50G)
567 if (speed_capa & ETH_LINK_SPEED_56G)
569 if (speed_capa & ETH_LINK_SPEED_100G)
570 printf(" 100 Gbps ");
571 if (speed_capa & ETH_LINK_SPEED_200G)
572 printf(" 200 Gbps ");
576 device_infos_display(const char *identifier)
578 static const char *info_border = "*********************";
579 struct rte_bus *start = NULL, *next;
580 struct rte_dev_iterator dev_iter;
581 char name[RTE_ETH_NAME_MAX_LEN];
582 struct rte_ether_addr mac_addr;
583 struct rte_device *dev;
584 struct rte_devargs da;
586 struct rte_eth_dev_info dev_info;
589 memset(&da, 0, sizeof(da));
593 if (rte_devargs_parsef(&da, "%s", identifier)) {
594 printf("cannot parse identifier\n");
601 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
604 if (identifier && da.bus != next)
607 /* Skip buses that don't have iterate method */
608 if (!next->dev_iterate)
611 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
612 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
616 /* Check for matching device if identifier is present */
618 strncmp(da.name, dev->name, strlen(dev->name)))
620 printf("\n%s Infos for device %s %s\n",
621 info_border, dev->name, info_border);
622 printf("Bus name: %s", dev->bus->name);
623 printf("\nDriver name: %s", dev->driver->name);
624 printf("\nDevargs: %s",
625 dev->devargs ? dev->devargs->args : "");
626 printf("\nConnect to socket: %d", dev->numa_node);
629 /* List ports with matching device name */
630 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
631 printf("\n\tPort id: %-2d", port_id);
632 if (eth_macaddr_get_print_err(port_id,
634 print_ethaddr("\n\tMAC address: ",
636 rte_eth_dev_get_name_by_port(port_id, name);
637 printf("\n\tDevice name: %s", name);
638 if (rte_eth_dev_info_get(port_id, &dev_info) == 0)
639 device_infos_display_speeds(dev_info.speed_capa);
647 port_infos_display(portid_t port_id)
649 struct rte_port *port;
650 struct rte_ether_addr mac_addr;
651 struct rte_eth_link link;
652 struct rte_eth_dev_info dev_info;
654 struct rte_mempool * mp;
655 static const char *info_border = "*********************";
657 char name[RTE_ETH_NAME_MAX_LEN];
659 char fw_version[ETHDEV_FWVERS_LEN];
661 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
665 port = &ports[port_id];
666 ret = eth_link_get_nowait_print_err(port_id, &link);
670 ret = eth_dev_info_get_print_err(port_id, &dev_info);
674 printf("\n%s Infos for port %-2d %s\n",
675 info_border, port_id, info_border);
676 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
677 print_ethaddr("MAC address: ", &mac_addr);
678 rte_eth_dev_get_name_by_port(port_id, name);
679 printf("\nDevice name: %s", name);
680 printf("\nDriver name: %s", dev_info.driver_name);
682 if (rte_eth_dev_fw_version_get(port_id, fw_version,
683 ETHDEV_FWVERS_LEN) == 0)
684 printf("\nFirmware-version: %s", fw_version);
686 printf("\nFirmware-version: %s", "not available");
688 if (dev_info.device->devargs && dev_info.device->devargs->args)
689 printf("\nDevargs: %s", dev_info.device->devargs->args);
690 printf("\nConnect to socket: %u", port->socket_id);
692 if (port_numa[port_id] != NUMA_NO_CONFIG) {
693 mp = mbuf_pool_find(port_numa[port_id], 0);
695 printf("\nmemory allocation on the socket: %d",
698 printf("\nmemory allocation on the socket: %u",port->socket_id);
700 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
701 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
702 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
703 ("full-duplex") : ("half-duplex"));
705 if (!rte_eth_dev_get_mtu(port_id, &mtu))
706 printf("MTU: %u\n", mtu);
708 printf("Promiscuous mode: %s\n",
709 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
710 printf("Allmulticast mode: %s\n",
711 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
712 printf("Maximum number of MAC addresses: %u\n",
713 (unsigned int)(port->dev_info.max_mac_addrs));
714 printf("Maximum number of MAC addresses of hash filtering: %u\n",
715 (unsigned int)(port->dev_info.max_hash_mac_addrs));
717 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
718 if (vlan_offload >= 0){
719 printf("VLAN offload: \n");
720 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
721 printf(" strip on, ");
723 printf(" strip off, ");
725 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
726 printf("filter on, ");
728 printf("filter off, ");
730 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
731 printf("extend on, ");
733 printf("extend off, ");
735 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
736 printf("qinq strip on\n");
738 printf("qinq strip off\n");
741 if (dev_info.hash_key_size > 0)
742 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
743 if (dev_info.reta_size > 0)
744 printf("Redirection table size: %u\n", dev_info.reta_size);
745 if (!dev_info.flow_type_rss_offloads)
746 printf("No RSS offload flow type is supported.\n");
751 printf("Supported RSS offload flow types:\n");
752 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
753 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
754 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
756 p = flowtype_to_str(i);
760 printf(" user defined %d\n", i);
764 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
765 printf("Maximum configurable length of RX packet: %u\n",
766 dev_info.max_rx_pktlen);
767 printf("Maximum configurable size of LRO aggregated packet: %u\n",
768 dev_info.max_lro_pkt_size);
769 if (dev_info.max_vfs)
770 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
771 if (dev_info.max_vmdq_pools)
772 printf("Maximum number of VMDq pools: %u\n",
773 dev_info.max_vmdq_pools);
775 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
776 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
777 printf("Max possible number of RXDs per queue: %hu\n",
778 dev_info.rx_desc_lim.nb_max);
779 printf("Min possible number of RXDs per queue: %hu\n",
780 dev_info.rx_desc_lim.nb_min);
781 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
783 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
784 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
785 printf("Max possible number of TXDs per queue: %hu\n",
786 dev_info.tx_desc_lim.nb_max);
787 printf("Min possible number of TXDs per queue: %hu\n",
788 dev_info.tx_desc_lim.nb_min);
789 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
790 printf("Max segment number per packet: %hu\n",
791 dev_info.tx_desc_lim.nb_seg_max);
792 printf("Max segment number per MTU/TSO: %hu\n",
793 dev_info.tx_desc_lim.nb_mtu_seg_max);
795 /* Show switch info only if valid switch domain and port id is set */
796 if (dev_info.switch_info.domain_id !=
797 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
798 if (dev_info.switch_info.name)
799 printf("Switch name: %s\n", dev_info.switch_info.name);
801 printf("Switch domain Id: %u\n",
802 dev_info.switch_info.domain_id);
803 printf("Switch Port Id: %u\n",
804 dev_info.switch_info.port_id);
809 port_summary_header_display(void)
811 uint16_t port_number;
813 port_number = rte_eth_dev_count_avail();
814 printf("Number of available ports: %i\n", port_number);
815 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
816 "Driver", "Status", "Link");
820 port_summary_display(portid_t port_id)
822 struct rte_ether_addr mac_addr;
823 struct rte_eth_link link;
824 struct rte_eth_dev_info dev_info;
825 char name[RTE_ETH_NAME_MAX_LEN];
828 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
833 ret = eth_link_get_nowait_print_err(port_id, &link);
837 ret = eth_dev_info_get_print_err(port_id, &dev_info);
841 rte_eth_dev_get_name_by_port(port_id, name);
842 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
846 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n",
847 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
848 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
849 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
850 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
851 rte_eth_link_speed_to_str(link.link_speed));
855 port_eeprom_display(portid_t port_id)
857 struct rte_dev_eeprom_info einfo;
859 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
864 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
865 if (len_eeprom < 0) {
866 switch (len_eeprom) {
868 printf("port index %d invalid\n", port_id);
871 printf("operation not supported by device\n");
874 printf("device is removed\n");
877 printf("Unable to get EEPROM: %d\n", len_eeprom);
883 char buf[len_eeprom];
885 einfo.length = len_eeprom;
888 ret = rte_eth_dev_get_eeprom(port_id, &einfo);
892 printf("port index %d invalid\n", port_id);
895 printf("operation not supported by device\n");
898 printf("device is removed\n");
901 printf("Unable to get EEPROM: %d\n", ret);
906 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
907 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
911 port_module_eeprom_display(portid_t port_id)
913 struct rte_eth_dev_module_info minfo;
914 struct rte_dev_eeprom_info einfo;
917 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
923 ret = rte_eth_dev_get_module_info(port_id, &minfo);
927 printf("port index %d invalid\n", port_id);
930 printf("operation not supported by device\n");
933 printf("device is removed\n");
936 printf("Unable to get module EEPROM: %d\n", ret);
942 char buf[minfo.eeprom_len];
944 einfo.length = minfo.eeprom_len;
947 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
951 printf("port index %d invalid\n", port_id);
954 printf("operation not supported by device\n");
957 printf("device is removed\n");
960 printf("Unable to get module EEPROM: %d\n", ret);
966 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
967 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
971 port_offload_cap_display(portid_t port_id)
973 struct rte_eth_dev_info dev_info;
974 static const char *info_border = "************";
977 if (port_id_is_invalid(port_id, ENABLED_WARN))
980 ret = eth_dev_info_get_print_err(port_id, &dev_info);
984 printf("\n%s Port %d supported offload features: %s\n",
985 info_border, port_id, info_border);
987 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
988 printf("VLAN stripped: ");
989 if (ports[port_id].dev_conf.rxmode.offloads &
990 DEV_RX_OFFLOAD_VLAN_STRIP)
996 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
997 printf("Double VLANs stripped: ");
998 if (ports[port_id].dev_conf.rxmode.offloads &
999 DEV_RX_OFFLOAD_QINQ_STRIP)
1005 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
1006 printf("RX IPv4 checksum: ");
1007 if (ports[port_id].dev_conf.rxmode.offloads &
1008 DEV_RX_OFFLOAD_IPV4_CKSUM)
1014 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
1015 printf("RX UDP checksum: ");
1016 if (ports[port_id].dev_conf.rxmode.offloads &
1017 DEV_RX_OFFLOAD_UDP_CKSUM)
1023 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
1024 printf("RX TCP checksum: ");
1025 if (ports[port_id].dev_conf.rxmode.offloads &
1026 DEV_RX_OFFLOAD_TCP_CKSUM)
1032 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
1033 printf("RX SCTP checksum: ");
1034 if (ports[port_id].dev_conf.rxmode.offloads &
1035 DEV_RX_OFFLOAD_SCTP_CKSUM)
1041 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
1042 printf("RX Outer IPv4 checksum: ");
1043 if (ports[port_id].dev_conf.rxmode.offloads &
1044 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
1050 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
1051 printf("RX Outer UDP checksum: ");
1052 if (ports[port_id].dev_conf.rxmode.offloads &
1053 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
1059 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
1060 printf("Large receive offload: ");
1061 if (ports[port_id].dev_conf.rxmode.offloads &
1062 DEV_RX_OFFLOAD_TCP_LRO)
1068 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
1069 printf("HW timestamp: ");
1070 if (ports[port_id].dev_conf.rxmode.offloads &
1071 DEV_RX_OFFLOAD_TIMESTAMP)
1077 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
1078 printf("Rx Keep CRC: ");
1079 if (ports[port_id].dev_conf.rxmode.offloads &
1080 DEV_RX_OFFLOAD_KEEP_CRC)
1086 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
1087 printf("RX offload security: ");
1088 if (ports[port_id].dev_conf.rxmode.offloads &
1089 DEV_RX_OFFLOAD_SECURITY)
1095 if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1096 printf("RX offload buffer split: ");
1097 if (ports[port_id].dev_conf.rxmode.offloads &
1098 RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)
1104 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
1105 printf("VLAN insert: ");
1106 if (ports[port_id].dev_conf.txmode.offloads &
1107 DEV_TX_OFFLOAD_VLAN_INSERT)
1113 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
1114 printf("Double VLANs insert: ");
1115 if (ports[port_id].dev_conf.txmode.offloads &
1116 DEV_TX_OFFLOAD_QINQ_INSERT)
1122 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
1123 printf("TX IPv4 checksum: ");
1124 if (ports[port_id].dev_conf.txmode.offloads &
1125 DEV_TX_OFFLOAD_IPV4_CKSUM)
1131 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
1132 printf("TX UDP checksum: ");
1133 if (ports[port_id].dev_conf.txmode.offloads &
1134 DEV_TX_OFFLOAD_UDP_CKSUM)
1140 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
1141 printf("TX TCP checksum: ");
1142 if (ports[port_id].dev_conf.txmode.offloads &
1143 DEV_TX_OFFLOAD_TCP_CKSUM)
1149 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
1150 printf("TX SCTP checksum: ");
1151 if (ports[port_id].dev_conf.txmode.offloads &
1152 DEV_TX_OFFLOAD_SCTP_CKSUM)
1158 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
1159 printf("TX Outer IPv4 checksum: ");
1160 if (ports[port_id].dev_conf.txmode.offloads &
1161 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
1167 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
1168 printf("TX TCP segmentation: ");
1169 if (ports[port_id].dev_conf.txmode.offloads &
1170 DEV_TX_OFFLOAD_TCP_TSO)
1176 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
1177 printf("TX UDP segmentation: ");
1178 if (ports[port_id].dev_conf.txmode.offloads &
1179 DEV_TX_OFFLOAD_UDP_TSO)
1185 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
1186 printf("TSO for VXLAN tunnel packet: ");
1187 if (ports[port_id].dev_conf.txmode.offloads &
1188 DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
1194 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
1195 printf("TSO for GRE tunnel packet: ");
1196 if (ports[port_id].dev_conf.txmode.offloads &
1197 DEV_TX_OFFLOAD_GRE_TNL_TSO)
1203 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
1204 printf("TSO for IPIP tunnel packet: ");
1205 if (ports[port_id].dev_conf.txmode.offloads &
1206 DEV_TX_OFFLOAD_IPIP_TNL_TSO)
1212 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
1213 printf("TSO for GENEVE tunnel packet: ");
1214 if (ports[port_id].dev_conf.txmode.offloads &
1215 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
1221 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
1222 printf("IP tunnel TSO: ");
1223 if (ports[port_id].dev_conf.txmode.offloads &
1224 DEV_TX_OFFLOAD_IP_TNL_TSO)
1230 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
1231 printf("UDP tunnel TSO: ");
1232 if (ports[port_id].dev_conf.txmode.offloads &
1233 DEV_TX_OFFLOAD_UDP_TNL_TSO)
1239 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
1240 printf("TX Outer UDP checksum: ");
1241 if (ports[port_id].dev_conf.txmode.offloads &
1242 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
1248 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) {
1249 printf("Tx scheduling on timestamp: ");
1250 if (ports[port_id].dev_conf.txmode.offloads &
1251 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP)
1260 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1264 if (port_id == (portid_t)RTE_PORT_ALL)
1267 RTE_ETH_FOREACH_DEV(pid)
1271 if (warning == ENABLED_WARN)
1272 printf("Invalid port %d\n", port_id);
1277 void print_valid_ports(void)
1281 printf("The valid ports array is [");
1282 RTE_ETH_FOREACH_DEV(pid) {
1289 vlan_id_is_invalid(uint16_t vlan_id)
1293 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1298 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1300 const struct rte_pci_device *pci_dev;
1301 const struct rte_bus *bus;
1304 if (reg_off & 0x3) {
1305 printf("Port register offset 0x%X not aligned on a 4-byte "
1311 if (!ports[port_id].dev_info.device) {
1312 printf("Invalid device\n");
1316 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1317 if (bus && !strcmp(bus->name, "pci")) {
1318 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1320 printf("Not a PCI device\n");
1324 pci_len = pci_dev->mem_resource[0].len;
1325 if (reg_off >= pci_len) {
1326 printf("Port %d: register offset %u (0x%X) out of port PCI "
1327 "resource (length=%"PRIu64")\n",
1328 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
1335 reg_bit_pos_is_invalid(uint8_t bit_pos)
1339 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
1343 #define display_port_and_reg_off(port_id, reg_off) \
1344 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1347 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1349 display_port_and_reg_off(port_id, (unsigned)reg_off);
1350 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1354 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1359 if (port_id_is_invalid(port_id, ENABLED_WARN))
1361 if (port_reg_off_is_invalid(port_id, reg_off))
1363 if (reg_bit_pos_is_invalid(bit_x))
1365 reg_v = port_id_pci_reg_read(port_id, reg_off);
1366 display_port_and_reg_off(port_id, (unsigned)reg_off);
1367 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1371 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1372 uint8_t bit1_pos, uint8_t bit2_pos)
1378 if (port_id_is_invalid(port_id, ENABLED_WARN))
1380 if (port_reg_off_is_invalid(port_id, reg_off))
1382 if (reg_bit_pos_is_invalid(bit1_pos))
1384 if (reg_bit_pos_is_invalid(bit2_pos))
1386 if (bit1_pos > bit2_pos)
1387 l_bit = bit2_pos, h_bit = bit1_pos;
1389 l_bit = bit1_pos, h_bit = bit2_pos;
1391 reg_v = port_id_pci_reg_read(port_id, reg_off);
1394 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1395 display_port_and_reg_off(port_id, (unsigned)reg_off);
1396 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1397 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1401 port_reg_display(portid_t port_id, uint32_t reg_off)
1405 if (port_id_is_invalid(port_id, ENABLED_WARN))
1407 if (port_reg_off_is_invalid(port_id, reg_off))
1409 reg_v = port_id_pci_reg_read(port_id, reg_off);
1410 display_port_reg_value(port_id, reg_off, reg_v);
1414 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1419 if (port_id_is_invalid(port_id, ENABLED_WARN))
1421 if (port_reg_off_is_invalid(port_id, reg_off))
1423 if (reg_bit_pos_is_invalid(bit_pos))
1426 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1429 reg_v = port_id_pci_reg_read(port_id, reg_off);
1431 reg_v &= ~(1 << bit_pos);
1433 reg_v |= (1 << bit_pos);
1434 port_id_pci_reg_write(port_id, reg_off, reg_v);
1435 display_port_reg_value(port_id, reg_off, reg_v);
1439 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1440 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1447 if (port_id_is_invalid(port_id, ENABLED_WARN))
1449 if (port_reg_off_is_invalid(port_id, reg_off))
1451 if (reg_bit_pos_is_invalid(bit1_pos))
1453 if (reg_bit_pos_is_invalid(bit2_pos))
1455 if (bit1_pos > bit2_pos)
1456 l_bit = bit2_pos, h_bit = bit1_pos;
1458 l_bit = bit1_pos, h_bit = bit2_pos;
1460 if ((h_bit - l_bit) < 31)
1461 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1465 if (value > max_v) {
1466 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1467 (unsigned)value, (unsigned)value,
1468 (unsigned)max_v, (unsigned)max_v);
1471 reg_v = port_id_pci_reg_read(port_id, reg_off);
1472 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1473 reg_v |= (value << l_bit); /* Set changed bits */
1474 port_id_pci_reg_write(port_id, reg_off, reg_v);
1475 display_port_reg_value(port_id, reg_off, reg_v);
1479 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1481 if (port_id_is_invalid(port_id, ENABLED_WARN))
1483 if (port_reg_off_is_invalid(port_id, reg_off))
1485 port_id_pci_reg_write(port_id, reg_off, reg_v);
1486 display_port_reg_value(port_id, reg_off, reg_v);
1490 port_mtu_set(portid_t port_id, uint16_t mtu)
1493 struct rte_port *rte_port = &ports[port_id];
1494 struct rte_eth_dev_info dev_info;
1495 uint16_t eth_overhead;
1498 if (port_id_is_invalid(port_id, ENABLED_WARN))
1501 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1505 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1506 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1507 mtu, dev_info.min_mtu, dev_info.max_mtu);
1510 diag = rte_eth_dev_set_mtu(port_id, mtu);
1512 printf("Set MTU failed. diag=%d\n", diag);
1513 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1515 * Ether overhead in driver is equal to the difference of
1516 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
1517 * device supports jumbo frame.
1519 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
1520 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) {
1521 rte_port->dev_conf.rxmode.offloads |=
1522 DEV_RX_OFFLOAD_JUMBO_FRAME;
1523 rte_port->dev_conf.rxmode.max_rx_pkt_len =
1526 rte_port->dev_conf.rxmode.offloads &=
1527 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1531 /* Generic flow management functions. */
1533 static struct port_flow_tunnel *
1534 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id)
1536 struct port_flow_tunnel *flow_tunnel;
1538 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1539 if (flow_tunnel->id == port_tunnel_id)
1549 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel)
1552 switch (tunnel->type) {
1556 case RTE_FLOW_ITEM_TYPE_VXLAN:
1564 struct port_flow_tunnel *
1565 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun)
1567 struct rte_port *port = &ports[port_id];
1568 struct port_flow_tunnel *flow_tunnel;
1570 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1571 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun)))
1580 void port_flow_tunnel_list(portid_t port_id)
1582 struct rte_port *port = &ports[port_id];
1583 struct port_flow_tunnel *flt;
1585 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1586 printf("port %u tunnel #%u type=%s",
1587 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel));
1588 if (flt->tunnel.tun_id)
1589 printf(" id=%" PRIu64, flt->tunnel.tun_id);
1594 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id)
1596 struct rte_port *port = &ports[port_id];
1597 struct port_flow_tunnel *flt;
1599 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1600 if (flt->id == tunnel_id)
1604 LIST_REMOVE(flt, chain);
1606 printf("port %u: flow tunnel #%u destroyed\n",
1607 port_id, tunnel_id);
1611 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops)
1613 struct rte_port *port = &ports[port_id];
1614 enum rte_flow_item_type type;
1615 struct port_flow_tunnel *flt;
1617 if (!strcmp(ops->type, "vxlan"))
1618 type = RTE_FLOW_ITEM_TYPE_VXLAN;
1620 printf("cannot offload \"%s\" tunnel type\n", ops->type);
1623 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1624 if (flt->tunnel.type == type)
1628 flt = calloc(1, sizeof(*flt));
1630 printf("failed to allocate port flt object\n");
1633 flt->tunnel.type = type;
1634 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 :
1635 LIST_FIRST(&port->flow_tunnel_list)->id + 1;
1636 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain);
1638 printf("port %d: flow tunnel #%u type %s\n",
1639 port_id, flt->id, ops->type);
1642 /** Generate a port_flow entry from attributes/pattern/actions. */
1643 static struct port_flow *
1644 port_flow_new(const struct rte_flow_attr *attr,
1645 const struct rte_flow_item *pattern,
1646 const struct rte_flow_action *actions,
1647 struct rte_flow_error *error)
1649 const struct rte_flow_conv_rule rule = {
1651 .pattern_ro = pattern,
1652 .actions_ro = actions,
1654 struct port_flow *pf;
1657 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1660 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1663 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1667 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1674 /** Print a message out of a flow error. */
1676 port_flow_complain(struct rte_flow_error *error)
1678 static const char *const errstrlist[] = {
1679 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1680 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1681 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1682 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1683 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1684 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1685 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1686 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1687 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1688 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1689 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1690 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1691 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1692 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1693 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1694 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1695 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1699 int err = rte_errno;
1701 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1702 !errstrlist[error->type])
1703 errstr = "unknown type";
1705 errstr = errstrlist[error->type];
1706 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__,
1707 error->type, errstr,
1708 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1709 error->cause), buf) : "",
1710 error->message ? error->message : "(no stated reason)",
1716 rss_config_display(struct rte_flow_action_rss *rss_conf)
1720 if (rss_conf == NULL) {
1721 printf("Invalid rule\n");
1727 if (rss_conf->queue_num == 0)
1729 for (i = 0; i < rss_conf->queue_num; i++)
1730 printf(" %d", rss_conf->queue[i]);
1733 printf(" function: ");
1734 switch (rss_conf->func) {
1735 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1736 printf("default\n");
1738 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1739 printf("toeplitz\n");
1741 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1742 printf("simple_xor\n");
1744 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1745 printf("symmetric_toeplitz\n");
1748 printf("Unknown function\n");
1752 printf(" types:\n");
1753 if (rss_conf->types == 0) {
1757 for (i = 0; rss_type_table[i].str; i++) {
1758 if ((rss_conf->types &
1759 rss_type_table[i].rss_type) ==
1760 rss_type_table[i].rss_type &&
1761 rss_type_table[i].rss_type != 0)
1762 printf(" %s\n", rss_type_table[i].str);
1766 static struct port_shared_action *
1767 action_get_by_id(portid_t port_id, uint32_t id)
1769 struct rte_port *port;
1770 struct port_shared_action **ppsa;
1771 struct port_shared_action *psa = NULL;
1773 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1774 port_id == (portid_t)RTE_PORT_ALL)
1776 port = &ports[port_id];
1777 ppsa = &port->actions_list;
1779 if ((*ppsa)->id == id) {
1783 ppsa = &(*ppsa)->next;
1786 printf("Failed to find shared action #%u on port %u\n",
1792 action_alloc(portid_t port_id, uint32_t id,
1793 struct port_shared_action **action)
1795 struct rte_port *port;
1796 struct port_shared_action **ppsa;
1797 struct port_shared_action *psa = NULL;
1800 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1801 port_id == (portid_t)RTE_PORT_ALL)
1803 port = &ports[port_id];
1804 if (id == UINT32_MAX) {
1805 /* taking first available ID */
1806 if (port->actions_list) {
1807 if (port->actions_list->id == UINT32_MAX - 1) {
1808 printf("Highest shared action ID is already"
1809 " assigned, delete it first\n");
1812 id = port->actions_list->id + 1;
1817 psa = calloc(1, sizeof(*psa));
1819 printf("Allocation of port %u shared action failed\n",
1823 ppsa = &port->actions_list;
1824 while (*ppsa && (*ppsa)->id > id)
1825 ppsa = &(*ppsa)->next;
1826 if (*ppsa && (*ppsa)->id == id) {
1827 printf("Shared action #%u is already assigned,"
1828 " delete it first\n", id);
1839 /** Create shared action */
1841 port_shared_action_create(portid_t port_id, uint32_t id,
1842 const struct rte_flow_shared_action_conf *conf,
1843 const struct rte_flow_action *action)
1845 struct port_shared_action *psa;
1847 struct rte_flow_error error;
1849 ret = action_alloc(port_id, id, &psa);
1852 /* Poisoning to make sure PMDs update it in case of error. */
1853 memset(&error, 0x22, sizeof(error));
1854 psa->action = rte_flow_shared_action_create(port_id, conf, action,
1857 uint32_t destroy_id = psa->id;
1858 port_shared_action_destroy(port_id, 1, &destroy_id);
1859 return port_flow_complain(&error);
1861 psa->type = action->type;
1862 printf("Shared action #%u created\n", psa->id);
1866 /** Destroy shared action */
1868 port_shared_action_destroy(portid_t port_id,
1870 const uint32_t *actions)
1872 struct rte_port *port;
1873 struct port_shared_action **tmp;
1877 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1878 port_id == (portid_t)RTE_PORT_ALL)
1880 port = &ports[port_id];
1881 tmp = &port->actions_list;
1885 for (i = 0; i != n; ++i) {
1886 struct rte_flow_error error;
1887 struct port_shared_action *psa = *tmp;
1889 if (actions[i] != psa->id)
1892 * Poisoning to make sure PMDs update it in case
1895 memset(&error, 0x33, sizeof(error));
1897 if (psa->action && rte_flow_shared_action_destroy(
1898 port_id, psa->action, &error)) {
1899 ret = port_flow_complain(&error);
1904 printf("Shared action #%u destroyed\n", psa->id);
1908 tmp = &(*tmp)->next;
1915 /** Get shared action by port + id */
1916 struct rte_flow_shared_action *
1917 port_shared_action_get_by_id(portid_t port_id, uint32_t id)
1920 struct port_shared_action *psa = action_get_by_id(port_id, id);
1922 return (psa) ? psa->action : NULL;
1925 /** Update shared action */
1927 port_shared_action_update(portid_t port_id, uint32_t id,
1928 const struct rte_flow_action *action)
1930 struct rte_flow_error error;
1931 struct rte_flow_shared_action *shared_action;
1933 shared_action = port_shared_action_get_by_id(port_id, id);
1936 if (rte_flow_shared_action_update(port_id, shared_action, action,
1938 return port_flow_complain(&error);
1940 printf("Shared action #%u updated\n", id);
1945 port_shared_action_query(portid_t port_id, uint32_t id)
1947 struct rte_flow_error error;
1948 struct port_shared_action *psa;
1949 uint64_t default_data;
1953 psa = action_get_by_id(port_id, id);
1956 switch (psa->type) {
1957 case RTE_FLOW_ACTION_TYPE_RSS:
1958 data = &default_data;
1961 printf("Shared action %u (type: %d) on port %u doesn't support"
1962 " query\n", id, psa->type, port_id);
1965 if (rte_flow_shared_action_query(port_id, psa->action, data, &error))
1966 ret = port_flow_complain(&error);
1967 switch (psa->type) {
1968 case RTE_FLOW_ACTION_TYPE_RSS:
1970 printf("Shared RSS action:\n\trefs:%u\n",
1971 *((uint32_t *)data));
1975 printf("Shared action %u (type: %d) on port %u doesn't support"
1976 " query\n", id, psa->type, port_id);
1981 static struct port_flow_tunnel *
1982 port_flow_tunnel_offload_cmd_prep(portid_t port_id,
1983 const struct rte_flow_item *pattern,
1984 const struct rte_flow_action *actions,
1985 const struct tunnel_ops *tunnel_ops)
1988 struct rte_port *port;
1989 struct port_flow_tunnel *pft;
1990 struct rte_flow_error error;
1992 port = &ports[port_id];
1993 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id);
1995 printf("failed to locate port flow tunnel #%u\n",
1999 if (tunnel_ops->actions) {
2000 uint32_t num_actions;
2001 const struct rte_flow_action *aptr;
2003 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel,
2005 &pft->num_pmd_actions,
2008 port_flow_complain(&error);
2011 for (aptr = actions, num_actions = 1;
2012 aptr->type != RTE_FLOW_ACTION_TYPE_END;
2013 aptr++, num_actions++);
2014 pft->actions = malloc(
2015 (num_actions + pft->num_pmd_actions) *
2016 sizeof(actions[0]));
2017 if (!pft->actions) {
2018 rte_flow_tunnel_action_decap_release(
2019 port_id, pft->actions,
2020 pft->num_pmd_actions, &error);
2023 rte_memcpy(pft->actions, pft->pmd_actions,
2024 pft->num_pmd_actions * sizeof(actions[0]));
2025 rte_memcpy(pft->actions + pft->num_pmd_actions, actions,
2026 num_actions * sizeof(actions[0]));
2028 if (tunnel_ops->items) {
2030 const struct rte_flow_item *iptr;
2032 ret = rte_flow_tunnel_match(port_id, &pft->tunnel,
2034 &pft->num_pmd_items,
2037 port_flow_complain(&error);
2040 for (iptr = pattern, num_items = 1;
2041 iptr->type != RTE_FLOW_ITEM_TYPE_END;
2042 iptr++, num_items++);
2043 pft->items = malloc((num_items + pft->num_pmd_items) *
2044 sizeof(pattern[0]));
2046 rte_flow_tunnel_item_release(
2047 port_id, pft->pmd_items,
2048 pft->num_pmd_items, &error);
2051 rte_memcpy(pft->items, pft->pmd_items,
2052 pft->num_pmd_items * sizeof(pattern[0]));
2053 rte_memcpy(pft->items + pft->num_pmd_items, pattern,
2054 num_items * sizeof(pattern[0]));
2061 port_flow_tunnel_offload_cmd_release(portid_t port_id,
2062 const struct tunnel_ops *tunnel_ops,
2063 struct port_flow_tunnel *pft)
2065 struct rte_flow_error error;
2067 if (tunnel_ops->actions) {
2069 rte_flow_tunnel_action_decap_release(
2070 port_id, pft->pmd_actions,
2071 pft->num_pmd_actions, &error);
2072 pft->actions = NULL;
2073 pft->pmd_actions = NULL;
2075 if (tunnel_ops->items) {
2077 rte_flow_tunnel_item_release(port_id, pft->pmd_items,
2081 pft->pmd_items = NULL;
2085 /** Validate flow rule. */
2087 port_flow_validate(portid_t port_id,
2088 const struct rte_flow_attr *attr,
2089 const struct rte_flow_item *pattern,
2090 const struct rte_flow_action *actions,
2091 const struct tunnel_ops *tunnel_ops)
2093 struct rte_flow_error error;
2094 struct port_flow_tunnel *pft = NULL;
2096 /* Poisoning to make sure PMDs update it in case of error. */
2097 memset(&error, 0x11, sizeof(error));
2098 if (tunnel_ops->enabled) {
2099 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2100 actions, tunnel_ops);
2104 pattern = pft->items;
2106 actions = pft->actions;
2108 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
2109 return port_flow_complain(&error);
2110 if (tunnel_ops->enabled)
2111 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2112 printf("Flow rule validated\n");
2116 /** Update age action context by port_flow pointer. */
2118 update_age_action_context(const struct rte_flow_action *actions,
2119 struct port_flow *pf)
2121 struct rte_flow_action_age *age = NULL;
2123 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2124 switch (actions->type) {
2125 case RTE_FLOW_ACTION_TYPE_AGE:
2126 age = (struct rte_flow_action_age *)
2127 (uintptr_t)actions->conf;
2136 /** Create flow rule. */
2138 port_flow_create(portid_t port_id,
2139 const struct rte_flow_attr *attr,
2140 const struct rte_flow_item *pattern,
2141 const struct rte_flow_action *actions,
2142 const struct tunnel_ops *tunnel_ops)
2144 struct rte_flow *flow;
2145 struct rte_port *port;
2146 struct port_flow *pf;
2148 struct rte_flow_error error;
2149 struct port_flow_tunnel *pft = NULL;
2151 port = &ports[port_id];
2152 if (port->flow_list) {
2153 if (port->flow_list->id == UINT32_MAX) {
2154 printf("Highest rule ID is already assigned, delete"
2158 id = port->flow_list->id + 1;
2160 if (tunnel_ops->enabled) {
2161 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2162 actions, tunnel_ops);
2166 pattern = pft->items;
2168 actions = pft->actions;
2170 pf = port_flow_new(attr, pattern, actions, &error);
2172 return port_flow_complain(&error);
2173 update_age_action_context(actions, pf);
2174 /* Poisoning to make sure PMDs update it in case of error. */
2175 memset(&error, 0x22, sizeof(error));
2176 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
2179 return port_flow_complain(&error);
2181 pf->next = port->flow_list;
2184 port->flow_list = pf;
2185 if (tunnel_ops->enabled)
2186 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2187 printf("Flow rule #%u created\n", pf->id);
2191 /** Destroy a number of flow rules. */
2193 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
2195 struct rte_port *port;
2196 struct port_flow **tmp;
2200 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2201 port_id == (portid_t)RTE_PORT_ALL)
2203 port = &ports[port_id];
2204 tmp = &port->flow_list;
2208 for (i = 0; i != n; ++i) {
2209 struct rte_flow_error error;
2210 struct port_flow *pf = *tmp;
2212 if (rule[i] != pf->id)
2215 * Poisoning to make sure PMDs update it in case
2218 memset(&error, 0x33, sizeof(error));
2219 if (rte_flow_destroy(port_id, pf->flow, &error)) {
2220 ret = port_flow_complain(&error);
2223 printf("Flow rule #%u destroyed\n", pf->id);
2229 tmp = &(*tmp)->next;
2235 /** Remove all flow rules. */
2237 port_flow_flush(portid_t port_id)
2239 struct rte_flow_error error;
2240 struct rte_port *port;
2243 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2244 port_id == (portid_t)RTE_PORT_ALL)
2247 port = &ports[port_id];
2249 if (port->flow_list == NULL)
2252 /* Poisoning to make sure PMDs update it in case of error. */
2253 memset(&error, 0x44, sizeof(error));
2254 if (rte_flow_flush(port_id, &error)) {
2255 port_flow_complain(&error);
2258 while (port->flow_list) {
2259 struct port_flow *pf = port->flow_list->next;
2261 free(port->flow_list);
2262 port->flow_list = pf;
2267 /** Dump all flow rules. */
2269 port_flow_dump(portid_t port_id, const char *file_name)
2272 FILE *file = stdout;
2273 struct rte_flow_error error;
2275 if (file_name && strlen(file_name)) {
2276 file = fopen(file_name, "w");
2278 printf("Failed to create file %s: %s\n", file_name,
2283 ret = rte_flow_dev_dump(port_id, file, &error);
2285 port_flow_complain(&error);
2286 printf("Failed to dump flow: %s\n", strerror(-ret));
2288 printf("Flow dump finished\n");
2289 if (file_name && strlen(file_name))
2294 /** Query a flow rule. */
2296 port_flow_query(portid_t port_id, uint32_t rule,
2297 const struct rte_flow_action *action)
2299 struct rte_flow_error error;
2300 struct rte_port *port;
2301 struct port_flow *pf;
2304 struct rte_flow_query_count count;
2305 struct rte_flow_action_rss rss_conf;
2306 struct rte_flow_query_age age;
2310 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2311 port_id == (portid_t)RTE_PORT_ALL)
2313 port = &ports[port_id];
2314 for (pf = port->flow_list; pf; pf = pf->next)
2318 printf("Flow rule #%u not found\n", rule);
2321 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2322 &name, sizeof(name),
2323 (void *)(uintptr_t)action->type, &error);
2325 return port_flow_complain(&error);
2326 switch (action->type) {
2327 case RTE_FLOW_ACTION_TYPE_COUNT:
2328 case RTE_FLOW_ACTION_TYPE_RSS:
2329 case RTE_FLOW_ACTION_TYPE_AGE:
2332 printf("Cannot query action type %d (%s)\n",
2333 action->type, name);
2336 /* Poisoning to make sure PMDs update it in case of error. */
2337 memset(&error, 0x55, sizeof(error));
2338 memset(&query, 0, sizeof(query));
2339 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
2340 return port_flow_complain(&error);
2341 switch (action->type) {
2342 case RTE_FLOW_ACTION_TYPE_COUNT:
2346 " hits: %" PRIu64 "\n"
2347 " bytes: %" PRIu64 "\n",
2349 query.count.hits_set,
2350 query.count.bytes_set,
2354 case RTE_FLOW_ACTION_TYPE_RSS:
2355 rss_config_display(&query.rss_conf);
2357 case RTE_FLOW_ACTION_TYPE_AGE:
2360 " sec_since_last_hit_valid: %u\n"
2361 " sec_since_last_hit: %" PRIu32 "\n",
2364 query.age.sec_since_last_hit_valid,
2365 query.age.sec_since_last_hit);
2368 printf("Cannot display result for action type %d (%s)\n",
2369 action->type, name);
2375 /** List simply and destroy all aged flows. */
2377 port_flow_aged(portid_t port_id, uint8_t destroy)
2380 int nb_context, total = 0, idx;
2381 struct rte_flow_error error;
2382 struct port_flow *pf;
2384 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2385 port_id == (portid_t)RTE_PORT_ALL)
2387 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
2388 printf("Port %u total aged flows: %d\n", port_id, total);
2390 port_flow_complain(&error);
2395 contexts = malloc(sizeof(void *) * total);
2396 if (contexts == NULL) {
2397 printf("Cannot allocate contexts for aged flow\n");
2400 printf("ID\tGroup\tPrio\tAttr\n");
2401 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
2402 if (nb_context != total) {
2403 printf("Port:%d get aged flows count(%d) != total(%d)\n",
2404 port_id, nb_context, total);
2408 for (idx = 0; idx < nb_context; idx++) {
2409 pf = (struct port_flow *)contexts[idx];
2411 printf("Error: get Null context in port %u\n", port_id);
2414 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t\n",
2416 pf->rule.attr->group,
2417 pf->rule.attr->priority,
2418 pf->rule.attr->ingress ? 'i' : '-',
2419 pf->rule.attr->egress ? 'e' : '-',
2420 pf->rule.attr->transfer ? 't' : '-');
2428 for (idx = 0; idx < nb_context; idx++) {
2429 pf = (struct port_flow *)contexts[idx];
2433 ret = port_flow_destroy(port_id, 1, &flow_id);
2437 printf("%d flows be destroyed\n", total);
2442 /** List flow rules. */
2444 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
2446 struct rte_port *port;
2447 struct port_flow *pf;
2448 struct port_flow *list = NULL;
2451 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2452 port_id == (portid_t)RTE_PORT_ALL)
2454 port = &ports[port_id];
2455 if (!port->flow_list)
2457 /* Sort flows by group, priority and ID. */
2458 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2459 struct port_flow **tmp;
2460 const struct rte_flow_attr *curr = pf->rule.attr;
2463 /* Filter out unwanted groups. */
2464 for (i = 0; i != n; ++i)
2465 if (curr->group == group[i])
2470 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
2471 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
2473 if (curr->group > comp->group ||
2474 (curr->group == comp->group &&
2475 curr->priority > comp->priority) ||
2476 (curr->group == comp->group &&
2477 curr->priority == comp->priority &&
2478 pf->id > (*tmp)->id))
2485 printf("ID\tGroup\tPrio\tAttr\tRule\n");
2486 for (pf = list; pf != NULL; pf = pf->tmp) {
2487 const struct rte_flow_item *item = pf->rule.pattern;
2488 const struct rte_flow_action *action = pf->rule.actions;
2491 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
2493 pf->rule.attr->group,
2494 pf->rule.attr->priority,
2495 pf->rule.attr->ingress ? 'i' : '-',
2496 pf->rule.attr->egress ? 'e' : '-',
2497 pf->rule.attr->transfer ? 't' : '-');
2498 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
2499 if ((uint32_t)item->type > INT_MAX)
2500 name = "PMD_INTERNAL";
2501 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
2502 &name, sizeof(name),
2503 (void *)(uintptr_t)item->type,
2506 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
2507 printf("%s ", name);
2511 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
2512 if ((uint32_t)action->type > INT_MAX)
2513 name = "PMD_INTERNAL";
2514 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2515 &name, sizeof(name),
2516 (void *)(uintptr_t)action->type,
2519 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
2520 printf(" %s", name);
2527 /** Restrict ingress traffic to the defined flow rules. */
2529 port_flow_isolate(portid_t port_id, int set)
2531 struct rte_flow_error error;
2533 /* Poisoning to make sure PMDs update it in case of error. */
2534 memset(&error, 0x66, sizeof(error));
2535 if (rte_flow_isolate(port_id, set, &error))
2536 return port_flow_complain(&error);
2537 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
2539 set ? "now restricted" : "not restricted anymore");
2544 * RX/TX ring descriptors display functions.
2547 rx_queue_id_is_invalid(queueid_t rxq_id)
2549 if (rxq_id < nb_rxq)
2551 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
2556 tx_queue_id_is_invalid(queueid_t txq_id)
2558 if (txq_id < nb_txq)
2560 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
2565 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
2567 struct rte_port *port = &ports[port_id];
2568 struct rte_eth_rxq_info rx_qinfo;
2571 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
2573 *ring_size = rx_qinfo.nb_desc;
2577 if (ret != -ENOTSUP)
2580 * If the rte_eth_rx_queue_info_get is not support for this PMD,
2581 * ring_size stored in testpmd will be used for validity verification.
2582 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
2583 * being 0, it will use a default value provided by PMDs to setup this
2584 * rxq. If the default value is 0, it will use the
2585 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
2587 if (port->nb_rx_desc[rxq_id])
2588 *ring_size = port->nb_rx_desc[rxq_id];
2589 else if (port->dev_info.default_rxportconf.ring_size)
2590 *ring_size = port->dev_info.default_rxportconf.ring_size;
2592 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2597 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
2599 struct rte_port *port = &ports[port_id];
2600 struct rte_eth_txq_info tx_qinfo;
2603 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
2605 *ring_size = tx_qinfo.nb_desc;
2609 if (ret != -ENOTSUP)
2612 * If the rte_eth_tx_queue_info_get is not support for this PMD,
2613 * ring_size stored in testpmd will be used for validity verification.
2614 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
2615 * being 0, it will use a default value provided by PMDs to setup this
2616 * txq. If the default value is 0, it will use the
2617 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
2619 if (port->nb_tx_desc[txq_id])
2620 *ring_size = port->nb_tx_desc[txq_id];
2621 else if (port->dev_info.default_txportconf.ring_size)
2622 *ring_size = port->dev_info.default_txportconf.ring_size;
2624 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2629 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
2634 ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
2638 if (rxdesc_id < ring_size)
2641 printf("Invalid RX descriptor %u (must be < ring_size=%u)\n",
2642 rxdesc_id, ring_size);
2647 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
2652 ret = get_tx_ring_size(port_id, txq_id, &ring_size);
2656 if (txdesc_id < ring_size)
2659 printf("Invalid TX descriptor %u (must be < ring_size=%u)\n",
2660 txdesc_id, ring_size);
2664 static const struct rte_memzone *
2665 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
2667 char mz_name[RTE_MEMZONE_NAMESIZE];
2668 const struct rte_memzone *mz;
2670 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
2671 port_id, q_id, ring_name);
2672 mz = rte_memzone_lookup(mz_name);
2674 printf("%s ring memory zoneof (port %d, queue %d) not"
2675 "found (zone name = %s\n",
2676 ring_name, port_id, q_id, mz_name);
2680 union igb_ring_dword {
2683 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2693 struct igb_ring_desc_32_bytes {
2694 union igb_ring_dword lo_dword;
2695 union igb_ring_dword hi_dword;
2696 union igb_ring_dword resv1;
2697 union igb_ring_dword resv2;
2700 struct igb_ring_desc_16_bytes {
2701 union igb_ring_dword lo_dword;
2702 union igb_ring_dword hi_dword;
2706 ring_rxd_display_dword(union igb_ring_dword dword)
2708 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
2709 (unsigned)dword.words.hi);
2713 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
2714 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2717 __rte_unused portid_t port_id,
2721 struct igb_ring_desc_16_bytes *ring =
2722 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2723 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2725 struct rte_eth_dev_info dev_info;
2727 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2731 if (strstr(dev_info.driver_name, "i40e") != NULL) {
2732 /* 32 bytes RX descriptor, i40e only */
2733 struct igb_ring_desc_32_bytes *ring =
2734 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
2735 ring[desc_id].lo_dword.dword =
2736 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2737 ring_rxd_display_dword(ring[desc_id].lo_dword);
2738 ring[desc_id].hi_dword.dword =
2739 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2740 ring_rxd_display_dword(ring[desc_id].hi_dword);
2741 ring[desc_id].resv1.dword =
2742 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
2743 ring_rxd_display_dword(ring[desc_id].resv1);
2744 ring[desc_id].resv2.dword =
2745 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
2746 ring_rxd_display_dword(ring[desc_id].resv2);
2751 /* 16 bytes RX descriptor */
2752 ring[desc_id].lo_dword.dword =
2753 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2754 ring_rxd_display_dword(ring[desc_id].lo_dword);
2755 ring[desc_id].hi_dword.dword =
2756 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2757 ring_rxd_display_dword(ring[desc_id].hi_dword);
2761 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
2763 struct igb_ring_desc_16_bytes *ring;
2764 struct igb_ring_desc_16_bytes txd;
2766 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2767 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2768 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2769 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
2770 (unsigned)txd.lo_dword.words.lo,
2771 (unsigned)txd.lo_dword.words.hi,
2772 (unsigned)txd.hi_dword.words.lo,
2773 (unsigned)txd.hi_dword.words.hi);
2777 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
2779 const struct rte_memzone *rx_mz;
2781 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
2783 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
2786 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
2790 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
2792 const struct rte_memzone *tx_mz;
2794 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
2796 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
2799 ring_tx_descriptor_display(tx_mz, txd_id);
2803 fwd_lcores_config_display(void)
2807 printf("List of forwarding lcores:");
2808 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
2809 printf(" %2u", fwd_lcores_cpuids[lc_id]);
2813 rxtx_config_display(void)
2818 printf(" %s packet forwarding%s packets/burst=%d\n",
2819 cur_fwd_eng->fwd_mode_name,
2820 retry_enabled == 0 ? "" : " with retry",
2823 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
2824 printf(" packet len=%u - nb packet segments=%d\n",
2825 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
2827 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
2828 nb_fwd_lcores, nb_fwd_ports);
2830 RTE_ETH_FOREACH_DEV(pid) {
2831 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
2832 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
2833 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
2834 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
2835 struct rte_eth_rxq_info rx_qinfo;
2836 struct rte_eth_txq_info tx_qinfo;
2837 uint16_t rx_free_thresh_tmp;
2838 uint16_t tx_free_thresh_tmp;
2839 uint16_t tx_rs_thresh_tmp;
2840 uint16_t nb_rx_desc_tmp;
2841 uint16_t nb_tx_desc_tmp;
2842 uint64_t offloads_tmp;
2843 uint8_t pthresh_tmp;
2844 uint8_t hthresh_tmp;
2845 uint8_t wthresh_tmp;
2848 /* per port config */
2849 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
2850 (unsigned int)pid, nb_rxq, nb_txq);
2852 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
2853 ports[pid].dev_conf.rxmode.offloads,
2854 ports[pid].dev_conf.txmode.offloads);
2856 /* per rx queue config only for first queue to be less verbose */
2857 for (qid = 0; qid < 1; qid++) {
2858 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
2860 nb_rx_desc_tmp = nb_rx_desc[qid];
2861 rx_free_thresh_tmp =
2862 rx_conf[qid].rx_free_thresh;
2863 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
2864 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
2865 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
2866 offloads_tmp = rx_conf[qid].offloads;
2868 nb_rx_desc_tmp = rx_qinfo.nb_desc;
2869 rx_free_thresh_tmp =
2870 rx_qinfo.conf.rx_free_thresh;
2871 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
2872 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
2873 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
2874 offloads_tmp = rx_qinfo.conf.offloads;
2877 printf(" RX queue: %d\n", qid);
2878 printf(" RX desc=%d - RX free threshold=%d\n",
2879 nb_rx_desc_tmp, rx_free_thresh_tmp);
2880 printf(" RX threshold registers: pthresh=%d hthresh=%d "
2882 pthresh_tmp, hthresh_tmp, wthresh_tmp);
2883 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp);
2886 /* per tx queue config only for first queue to be less verbose */
2887 for (qid = 0; qid < 1; qid++) {
2888 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
2890 nb_tx_desc_tmp = nb_tx_desc[qid];
2891 tx_free_thresh_tmp =
2892 tx_conf[qid].tx_free_thresh;
2893 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
2894 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
2895 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
2896 offloads_tmp = tx_conf[qid].offloads;
2897 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
2899 nb_tx_desc_tmp = tx_qinfo.nb_desc;
2900 tx_free_thresh_tmp =
2901 tx_qinfo.conf.tx_free_thresh;
2902 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
2903 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
2904 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
2905 offloads_tmp = tx_qinfo.conf.offloads;
2906 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
2909 printf(" TX queue: %d\n", qid);
2910 printf(" TX desc=%d - TX free threshold=%d\n",
2911 nb_tx_desc_tmp, tx_free_thresh_tmp);
2912 printf(" TX threshold registers: pthresh=%d hthresh=%d "
2914 pthresh_tmp, hthresh_tmp, wthresh_tmp);
2915 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
2916 offloads_tmp, tx_rs_thresh_tmp);
2922 port_rss_reta_info(portid_t port_id,
2923 struct rte_eth_rss_reta_entry64 *reta_conf,
2924 uint16_t nb_entries)
2926 uint16_t i, idx, shift;
2929 if (port_id_is_invalid(port_id, ENABLED_WARN))
2932 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
2934 printf("Failed to get RSS RETA info, return code = %d\n", ret);
2938 for (i = 0; i < nb_entries; i++) {
2939 idx = i / RTE_RETA_GROUP_SIZE;
2940 shift = i % RTE_RETA_GROUP_SIZE;
2941 if (!(reta_conf[idx].mask & (1ULL << shift)))
2943 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
2944 i, reta_conf[idx].reta[shift]);
2949 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
2953 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
2955 struct rte_eth_rss_conf rss_conf = {0};
2956 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
2960 struct rte_eth_dev_info dev_info;
2961 uint8_t hash_key_size;
2964 if (port_id_is_invalid(port_id, ENABLED_WARN))
2967 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2971 if (dev_info.hash_key_size > 0 &&
2972 dev_info.hash_key_size <= sizeof(rss_key))
2973 hash_key_size = dev_info.hash_key_size;
2975 printf("dev_info did not provide a valid hash key size\n");
2979 /* Get RSS hash key if asked to display it */
2980 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
2981 rss_conf.rss_key_len = hash_key_size;
2982 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2986 printf("port index %d invalid\n", port_id);
2989 printf("operation not supported by device\n");
2992 printf("operation failed - diag=%d\n", diag);
2997 rss_hf = rss_conf.rss_hf;
2999 printf("RSS disabled\n");
3002 printf("RSS functions:\n ");
3003 for (i = 0; rss_type_table[i].str; i++) {
3004 if (rss_hf & rss_type_table[i].rss_type)
3005 printf("%s ", rss_type_table[i].str);
3010 printf("RSS key:\n");
3011 for (i = 0; i < hash_key_size; i++)
3012 printf("%02X", rss_key[i]);
3017 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
3020 struct rte_eth_rss_conf rss_conf;
3024 rss_conf.rss_key = NULL;
3025 rss_conf.rss_key_len = hash_key_len;
3026 rss_conf.rss_hf = 0;
3027 for (i = 0; rss_type_table[i].str; i++) {
3028 if (!strcmp(rss_type_table[i].str, rss_type))
3029 rss_conf.rss_hf = rss_type_table[i].rss_type;
3031 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
3033 rss_conf.rss_key = hash_key;
3034 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
3041 printf("port index %d invalid\n", port_id);
3044 printf("operation not supported by device\n");
3047 printf("operation failed - diag=%d\n", diag);
3053 * Setup forwarding configuration for each logical core.
3056 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
3058 streamid_t nb_fs_per_lcore;
3066 nb_fs = cfg->nb_fwd_streams;
3067 nb_fc = cfg->nb_fwd_lcores;
3068 if (nb_fs <= nb_fc) {
3069 nb_fs_per_lcore = 1;
3072 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
3073 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
3076 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
3078 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
3079 fwd_lcores[lc_id]->stream_idx = sm_id;
3080 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
3081 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
3085 * Assign extra remaining streams, if any.
3087 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
3088 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
3089 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
3090 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
3091 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
3096 fwd_topology_tx_port_get(portid_t rxp)
3098 static int warning_once = 1;
3100 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
3102 switch (port_topology) {
3104 case PORT_TOPOLOGY_PAIRED:
3105 if ((rxp & 0x1) == 0) {
3106 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
3109 printf("\nWarning! port-topology=paired"
3110 " and odd forward ports number,"
3111 " the last port will pair with"
3118 case PORT_TOPOLOGY_CHAINED:
3119 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
3120 case PORT_TOPOLOGY_LOOP:
3126 simple_fwd_config_setup(void)
3130 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
3131 cur_fwd_config.nb_fwd_streams =
3132 (streamid_t) cur_fwd_config.nb_fwd_ports;
3134 /* reinitialize forwarding streams */
3138 * In the simple forwarding test, the number of forwarding cores
3139 * must be lower or equal to the number of forwarding ports.
3141 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3142 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
3143 cur_fwd_config.nb_fwd_lcores =
3144 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
3145 setup_fwd_config_of_each_lcore(&cur_fwd_config);
3147 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
3148 fwd_streams[i]->rx_port = fwd_ports_ids[i];
3149 fwd_streams[i]->rx_queue = 0;
3150 fwd_streams[i]->tx_port =
3151 fwd_ports_ids[fwd_topology_tx_port_get(i)];
3152 fwd_streams[i]->tx_queue = 0;
3153 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
3154 fwd_streams[i]->retry_enabled = retry_enabled;
3159 * For the RSS forwarding test all streams distributed over lcores. Each stream
3160 * being composed of a RX queue to poll on a RX port for input messages,
3161 * associated with a TX queue of a TX port where to send forwarded packets.
3164 rss_fwd_config_setup(void)
3175 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3176 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3177 cur_fwd_config.nb_fwd_streams =
3178 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
3180 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
3181 cur_fwd_config.nb_fwd_lcores =
3182 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
3184 /* reinitialize forwarding streams */
3187 setup_fwd_config_of_each_lcore(&cur_fwd_config);
3189 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
3190 struct fwd_stream *fs;
3192 fs = fwd_streams[sm_id];
3193 txp = fwd_topology_tx_port_get(rxp);
3194 fs->rx_port = fwd_ports_ids[rxp];
3196 fs->tx_port = fwd_ports_ids[txp];
3198 fs->peer_addr = fs->tx_port;
3199 fs->retry_enabled = retry_enabled;
3201 if (rxp < nb_fwd_ports)
3209 * For the DCB forwarding test, each core is assigned on each traffic class.
3211 * Each core is assigned a multi-stream, each stream being composed of
3212 * a RX queue to poll on a RX port for input messages, associated with
3213 * a TX queue of a TX port where to send forwarded packets. All RX and
3214 * TX queues are mapping to the same traffic class.
3215 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
3219 dcb_fwd_config_setup(void)
3221 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
3222 portid_t txp, rxp = 0;
3223 queueid_t txq, rxq = 0;
3225 uint16_t nb_rx_queue, nb_tx_queue;
3226 uint16_t i, j, k, sm_id = 0;
3229 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3230 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3231 cur_fwd_config.nb_fwd_streams =
3232 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
3234 /* reinitialize forwarding streams */
3238 /* get the dcb info on the first RX and TX ports */
3239 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
3240 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
3242 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
3243 fwd_lcores[lc_id]->stream_nb = 0;
3244 fwd_lcores[lc_id]->stream_idx = sm_id;
3245 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
3246 /* if the nb_queue is zero, means this tc is
3247 * not enabled on the POOL
3249 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
3251 k = fwd_lcores[lc_id]->stream_nb +
3252 fwd_lcores[lc_id]->stream_idx;
3253 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
3254 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
3255 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
3256 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
3257 for (j = 0; j < nb_rx_queue; j++) {
3258 struct fwd_stream *fs;
3260 fs = fwd_streams[k + j];
3261 fs->rx_port = fwd_ports_ids[rxp];
3262 fs->rx_queue = rxq + j;
3263 fs->tx_port = fwd_ports_ids[txp];
3264 fs->tx_queue = txq + j % nb_tx_queue;
3265 fs->peer_addr = fs->tx_port;
3266 fs->retry_enabled = retry_enabled;
3268 fwd_lcores[lc_id]->stream_nb +=
3269 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
3271 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
3274 if (tc < rxp_dcb_info.nb_tcs)
3276 /* Restart from TC 0 on next RX port */
3278 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
3280 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
3283 if (rxp >= nb_fwd_ports)
3285 /* get the dcb information on next RX and TX ports */
3286 if ((rxp & 0x1) == 0)
3287 txp = (portid_t) (rxp + 1);
3289 txp = (portid_t) (rxp - 1);
3290 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
3291 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
3296 icmp_echo_config_setup(void)
3303 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
3304 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
3305 (nb_txq * nb_fwd_ports);
3307 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3308 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3309 cur_fwd_config.nb_fwd_streams =
3310 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
3311 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
3312 cur_fwd_config.nb_fwd_lcores =
3313 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
3314 if (verbose_level > 0) {
3315 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
3317 cur_fwd_config.nb_fwd_lcores,
3318 cur_fwd_config.nb_fwd_ports,
3319 cur_fwd_config.nb_fwd_streams);
3322 /* reinitialize forwarding streams */
3324 setup_fwd_config_of_each_lcore(&cur_fwd_config);
3326 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
3327 if (verbose_level > 0)
3328 printf(" core=%d: \n", lc_id);
3329 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3330 struct fwd_stream *fs;
3331 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3332 fs->rx_port = fwd_ports_ids[rxp];
3334 fs->tx_port = fs->rx_port;
3336 fs->peer_addr = fs->tx_port;
3337 fs->retry_enabled = retry_enabled;
3338 if (verbose_level > 0)
3339 printf(" stream=%d port=%d rxq=%d txq=%d\n",
3340 sm_id, fs->rx_port, fs->rx_queue,
3342 rxq = (queueid_t) (rxq + 1);
3343 if (rxq == nb_rxq) {
3345 rxp = (portid_t) (rxp + 1);
3352 fwd_config_setup(void)
3354 cur_fwd_config.fwd_eng = cur_fwd_eng;
3355 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
3356 icmp_echo_config_setup();
3360 if ((nb_rxq > 1) && (nb_txq > 1)){
3362 dcb_fwd_config_setup();
3364 rss_fwd_config_setup();
3367 simple_fwd_config_setup();
3371 mp_alloc_to_str(uint8_t mode)
3374 case MP_ALLOC_NATIVE:
3380 case MP_ALLOC_XMEM_HUGE:
3390 pkt_fwd_config_display(struct fwd_config *cfg)
3392 struct fwd_stream *fs;
3396 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
3397 "NUMA support %s, MP allocation mode: %s\n",
3398 cfg->fwd_eng->fwd_mode_name,
3399 retry_enabled == 0 ? "" : " with retry",
3400 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
3401 numa_support == 1 ? "enabled" : "disabled",
3402 mp_alloc_to_str(mp_alloc_type));
3405 printf("TX retry num: %u, delay between TX retries: %uus\n",
3406 burst_tx_retry_num, burst_tx_delay_time);
3407 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
3408 printf("Logical Core %u (socket %u) forwards packets on "
3410 fwd_lcores_cpuids[lc_id],
3411 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
3412 fwd_lcores[lc_id]->stream_nb);
3413 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3414 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3415 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
3416 "P=%d/Q=%d (socket %u) ",
3417 fs->rx_port, fs->rx_queue,
3418 ports[fs->rx_port].socket_id,
3419 fs->tx_port, fs->tx_queue,
3420 ports[fs->tx_port].socket_id);
3421 print_ethaddr("peer=",
3422 &peer_eth_addrs[fs->peer_addr]);
3430 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
3432 struct rte_ether_addr new_peer_addr;
3433 if (!rte_eth_dev_is_valid_port(port_id)) {
3434 printf("Error: Invalid port number %i\n", port_id);
3437 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
3438 printf("Error: Invalid ethernet address: %s\n", peer_addr);
3441 peer_eth_addrs[port_id] = new_peer_addr;
3445 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
3448 unsigned int lcore_cpuid;
3453 for (i = 0; i < nb_lc; i++) {
3454 lcore_cpuid = lcorelist[i];
3455 if (! rte_lcore_is_enabled(lcore_cpuid)) {
3456 printf("lcore %u not enabled\n", lcore_cpuid);
3459 if (lcore_cpuid == rte_get_master_lcore()) {
3460 printf("lcore %u cannot be masked on for running "
3461 "packet forwarding, which is the master lcore "
3462 "and reserved for command line parsing only\n",
3467 fwd_lcores_cpuids[i] = lcore_cpuid;
3469 if (record_now == 0) {
3473 nb_cfg_lcores = (lcoreid_t) nb_lc;
3474 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
3475 printf("previous number of forwarding cores %u - changed to "
3476 "number of configured cores %u\n",
3477 (unsigned int) nb_fwd_lcores, nb_lc);
3478 nb_fwd_lcores = (lcoreid_t) nb_lc;
3485 set_fwd_lcores_mask(uint64_t lcoremask)
3487 unsigned int lcorelist[64];
3491 if (lcoremask == 0) {
3492 printf("Invalid NULL mask of cores\n");
3496 for (i = 0; i < 64; i++) {
3497 if (! ((uint64_t)(1ULL << i) & lcoremask))
3499 lcorelist[nb_lc++] = i;
3501 return set_fwd_lcores_list(lcorelist, nb_lc);
3505 set_fwd_lcores_number(uint16_t nb_lc)
3507 if (nb_lc > nb_cfg_lcores) {
3508 printf("nb fwd cores %u > %u (max. number of configured "
3509 "lcores) - ignored\n",
3510 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
3513 nb_fwd_lcores = (lcoreid_t) nb_lc;
3514 printf("Number of forwarding cores set to %u\n",
3515 (unsigned int) nb_fwd_lcores);
3519 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
3527 for (i = 0; i < nb_pt; i++) {
3528 port_id = (portid_t) portlist[i];
3529 if (port_id_is_invalid(port_id, ENABLED_WARN))
3532 fwd_ports_ids[i] = port_id;
3534 if (record_now == 0) {
3538 nb_cfg_ports = (portid_t) nb_pt;
3539 if (nb_fwd_ports != (portid_t) nb_pt) {
3540 printf("previous number of forwarding ports %u - changed to "
3541 "number of configured ports %u\n",
3542 (unsigned int) nb_fwd_ports, nb_pt);
3543 nb_fwd_ports = (portid_t) nb_pt;
3548 * Parse the user input and obtain the list of forwarding ports
3551 * String containing the user input. User can specify
3552 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
3553 * For example, if the user wants to use all the available
3554 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3.
3555 * If the user wants to use only the ports 1,2 then the input
3557 * valid characters are '-' and ','
3558 * @param[out] values
3559 * This array will be filled with a list of port IDs
3560 * based on the user input
3561 * Note that duplicate entries are discarded and only the first
3562 * count entries in this array are port IDs and all the rest
3563 * will contain default values
3564 * @param[in] maxsize
3565 * This parameter denotes 2 things
3566 * 1) Number of elements in the values array
3567 * 2) Maximum value of each element in the values array
3569 * On success, returns total count of parsed port IDs
3570 * On failure, returns 0
3573 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
3575 unsigned int count = 0;
3579 unsigned int marked[maxsize];
3581 if (list == NULL || values == NULL)
3584 for (i = 0; i < (int)maxsize; i++)
3590 /*Remove the blank spaces if any*/
3591 while (isblank(*list))
3596 value = strtol(list, &end, 10);
3597 if (errno || end == NULL)
3599 if (value < 0 || value >= (int)maxsize)
3601 while (isblank(*end))
3603 if (*end == '-' && min == INT_MAX) {
3605 } else if ((*end == ',') || (*end == '\0')) {
3609 for (i = min; i <= max; i++) {
3610 if (count < maxsize) {
3622 } while (*end != '\0');
3628 parse_fwd_portlist(const char *portlist)
3630 unsigned int portcount;
3631 unsigned int portindex[RTE_MAX_ETHPORTS];
3632 unsigned int i, valid_port_count = 0;
3634 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
3636 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
3639 * Here we verify the validity of the ports
3640 * and thereby calculate the total number of
3643 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
3644 if (rte_eth_dev_is_valid_port(portindex[i])) {
3645 portindex[valid_port_count] = portindex[i];
3650 set_fwd_ports_list(portindex, valid_port_count);
3654 set_fwd_ports_mask(uint64_t portmask)
3656 unsigned int portlist[64];
3660 if (portmask == 0) {
3661 printf("Invalid NULL mask of ports\n");
3665 RTE_ETH_FOREACH_DEV(i) {
3666 if (! ((uint64_t)(1ULL << i) & portmask))
3668 portlist[nb_pt++] = i;
3670 set_fwd_ports_list(portlist, nb_pt);
3674 set_fwd_ports_number(uint16_t nb_pt)
3676 if (nb_pt > nb_cfg_ports) {
3677 printf("nb fwd ports %u > %u (number of configured "
3678 "ports) - ignored\n",
3679 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
3682 nb_fwd_ports = (portid_t) nb_pt;
3683 printf("Number of forwarding ports set to %u\n",
3684 (unsigned int) nb_fwd_ports);
3688 port_is_forwarding(portid_t port_id)
3692 if (port_id_is_invalid(port_id, ENABLED_WARN))
3695 for (i = 0; i < nb_fwd_ports; i++) {
3696 if (fwd_ports_ids[i] == port_id)
3704 set_nb_pkt_per_burst(uint16_t nb)
3706 if (nb > MAX_PKT_BURST) {
3707 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
3709 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
3712 nb_pkt_per_burst = nb;
3713 printf("Number of packets per burst set to %u\n",
3714 (unsigned int) nb_pkt_per_burst);
3718 tx_split_get_name(enum tx_pkt_split split)
3722 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3723 if (tx_split_name[i].split == split)
3724 return tx_split_name[i].name;
3730 set_tx_pkt_split(const char *name)
3734 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3735 if (strcmp(tx_split_name[i].name, name) == 0) {
3736 tx_pkt_split = tx_split_name[i].split;
3740 printf("unknown value: \"%s\"\n", name);
3744 parse_fec_mode(const char *name, uint32_t *mode)
3748 for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
3749 if (strcmp(fec_mode_name[i].name, name) == 0) {
3750 *mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
3758 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa)
3762 printf("FEC capabilities:\n");
3764 for (i = 0; i < num; i++) {
3766 rte_eth_link_speed_to_str(speed_fec_capa[i].speed));
3768 for (j = RTE_ETH_FEC_AUTO; j < RTE_DIM(fec_mode_name); j++) {
3769 if (RTE_ETH_FEC_MODE_TO_CAPA(j) &
3770 speed_fec_capa[i].capa)
3771 printf("%s ", fec_mode_name[j].name);
3778 show_tx_pkt_segments(void)
3784 split = tx_split_get_name(tx_pkt_split);
3786 printf("Number of segments: %u\n", n);
3787 printf("Segment sizes: ");
3788 for (i = 0; i != n - 1; i++)
3789 printf("%hu,", tx_pkt_seg_lengths[i]);
3790 printf("%hu\n", tx_pkt_seg_lengths[i]);
3791 printf("Split packet: %s\n", split);
3795 nb_segs_is_invalid(unsigned int nb_segs)
3802 RTE_ETH_FOREACH_DEV(port_id) {
3803 for (queue_id = 0; queue_id < nb_txq; queue_id++) {
3804 ret = get_tx_ring_size(port_id, queue_id, &ring_size);
3809 if (ring_size < nb_segs) {
3810 printf("nb segments per TX packets=%u >= "
3811 "TX queue(%u) ring_size=%u - ignored\n",
3812 nb_segs, queue_id, ring_size);
3822 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
3824 uint16_t tx_pkt_len;
3827 if (nb_segs_is_invalid(nb_segs))
3831 * Check that each segment length is greater or equal than
3832 * the mbuf data sise.
3833 * Check also that the total packet length is greater or equal than the
3834 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
3838 for (i = 0; i < nb_segs; i++) {
3839 if (seg_lengths[i] > mbuf_data_size[0]) {
3840 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
3841 i, seg_lengths[i], mbuf_data_size[0]);
3844 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
3846 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
3847 printf("total packet length=%u < %d - give up\n",
3848 (unsigned) tx_pkt_len,
3849 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
3853 for (i = 0; i < nb_segs; i++)
3854 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
3856 tx_pkt_length = tx_pkt_len;
3857 tx_pkt_nb_segs = (uint8_t) nb_segs;
3861 show_tx_pkt_times(void)
3863 printf("Interburst gap: %u\n", tx_pkt_times_inter);
3864 printf("Intraburst gap: %u\n", tx_pkt_times_intra);
3868 set_tx_pkt_times(unsigned int *tx_times)
3871 int offload_found = 0;
3875 static const struct rte_mbuf_dynfield desc_offs = {
3876 .name = RTE_MBUF_DYNFIELD_TIMESTAMP_NAME,
3877 .size = sizeof(uint64_t),
3878 .align = __alignof__(uint64_t),
3880 static const struct rte_mbuf_dynflag desc_flag = {
3881 .name = RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME,
3884 RTE_ETH_FOREACH_DEV(port_id) {
3885 struct rte_eth_dev_info dev_info = { 0 };
3888 ret = rte_eth_dev_info_get(port_id, &dev_info);
3889 if (ret == 0 && dev_info.tx_offload_capa &
3890 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) {
3895 if (!offload_found) {
3896 printf("No device supporting Tx timestamp scheduling found, "
3897 "dynamic flag and field not registered\n");
3900 offset = rte_mbuf_dynfield_register(&desc_offs);
3901 if (offset < 0 && rte_errno != EEXIST)
3902 printf("Dynamic timestamp field registration error: %d",
3904 flag = rte_mbuf_dynflag_register(&desc_flag);
3905 if (flag < 0 && rte_errno != EEXIST)
3906 printf("Dynamic timestamp flag registration error: %d",
3908 tx_pkt_times_inter = tx_times[0];
3909 tx_pkt_times_intra = tx_times[1];
3913 setup_gro(const char *onoff, portid_t port_id)
3915 if (!rte_eth_dev_is_valid_port(port_id)) {
3916 printf("invalid port id %u\n", port_id);
3919 if (test_done == 0) {
3920 printf("Before enable/disable GRO,"
3921 " please stop forwarding first\n");
3924 if (strcmp(onoff, "on") == 0) {
3925 if (gro_ports[port_id].enable != 0) {
3926 printf("Port %u has enabled GRO. Please"
3927 " disable GRO first\n", port_id);
3930 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3931 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
3932 gro_ports[port_id].param.max_flow_num =
3933 GRO_DEFAULT_FLOW_NUM;
3934 gro_ports[port_id].param.max_item_per_flow =
3935 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
3937 gro_ports[port_id].enable = 1;
3939 if (gro_ports[port_id].enable == 0) {
3940 printf("Port %u has disabled GRO\n", port_id);
3943 gro_ports[port_id].enable = 0;
3948 setup_gro_flush_cycles(uint8_t cycles)
3950 if (test_done == 0) {
3951 printf("Before change flush interval for GRO,"
3952 " please stop forwarding first.\n");
3956 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
3957 GRO_DEFAULT_FLUSH_CYCLES) {
3958 printf("The flushing cycle be in the range"
3959 " of 1 to %u. Revert to the default"
3961 GRO_MAX_FLUSH_CYCLES,
3962 GRO_DEFAULT_FLUSH_CYCLES);
3963 cycles = GRO_DEFAULT_FLUSH_CYCLES;
3966 gro_flush_cycles = cycles;
3970 show_gro(portid_t port_id)
3972 struct rte_gro_param *param;
3973 uint32_t max_pkts_num;
3975 param = &gro_ports[port_id].param;
3977 if (!rte_eth_dev_is_valid_port(port_id)) {
3978 printf("Invalid port id %u.\n", port_id);
3981 if (gro_ports[port_id].enable) {
3982 printf("GRO type: TCP/IPv4\n");
3983 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3984 max_pkts_num = param->max_flow_num *
3985 param->max_item_per_flow;
3987 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
3988 printf("Max number of packets to perform GRO: %u\n",
3990 printf("Flushing cycles: %u\n", gro_flush_cycles);
3992 printf("Port %u doesn't enable GRO.\n", port_id);
3996 setup_gso(const char *mode, portid_t port_id)
3998 if (!rte_eth_dev_is_valid_port(port_id)) {
3999 printf("invalid port id %u\n", port_id);
4002 if (strcmp(mode, "on") == 0) {
4003 if (test_done == 0) {
4004 printf("before enabling GSO,"
4005 " please stop forwarding first\n");
4008 gso_ports[port_id].enable = 1;
4009 } else if (strcmp(mode, "off") == 0) {
4010 if (test_done == 0) {
4011 printf("before disabling GSO,"
4012 " please stop forwarding first\n");
4015 gso_ports[port_id].enable = 0;
4020 list_pkt_forwarding_modes(void)
4022 static char fwd_modes[128] = "";
4023 const char *separator = "|";
4024 struct fwd_engine *fwd_eng;
4027 if (strlen (fwd_modes) == 0) {
4028 while ((fwd_eng = fwd_engines[i++]) != NULL) {
4029 strncat(fwd_modes, fwd_eng->fwd_mode_name,
4030 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
4031 strncat(fwd_modes, separator,
4032 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
4034 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
4041 list_pkt_forwarding_retry_modes(void)
4043 static char fwd_modes[128] = "";
4044 const char *separator = "|";
4045 struct fwd_engine *fwd_eng;
4048 if (strlen(fwd_modes) == 0) {
4049 while ((fwd_eng = fwd_engines[i++]) != NULL) {
4050 if (fwd_eng == &rx_only_engine)
4052 strncat(fwd_modes, fwd_eng->fwd_mode_name,
4054 strlen(fwd_modes) - 1);
4055 strncat(fwd_modes, separator,
4057 strlen(fwd_modes) - 1);
4059 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
4066 set_pkt_forwarding_mode(const char *fwd_mode_name)
4068 struct fwd_engine *fwd_eng;
4072 while ((fwd_eng = fwd_engines[i]) != NULL) {
4073 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
4074 printf("Set %s packet forwarding mode%s\n",
4076 retry_enabled == 0 ? "" : " with retry");
4077 cur_fwd_eng = fwd_eng;
4082 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
4086 add_rx_dump_callbacks(portid_t portid)
4088 struct rte_eth_dev_info dev_info;
4092 if (port_id_is_invalid(portid, ENABLED_WARN))
4095 ret = eth_dev_info_get_print_err(portid, &dev_info);
4099 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
4100 if (!ports[portid].rx_dump_cb[queue])
4101 ports[portid].rx_dump_cb[queue] =
4102 rte_eth_add_rx_callback(portid, queue,
4103 dump_rx_pkts, NULL);
4107 add_tx_dump_callbacks(portid_t portid)
4109 struct rte_eth_dev_info dev_info;
4113 if (port_id_is_invalid(portid, ENABLED_WARN))
4116 ret = eth_dev_info_get_print_err(portid, &dev_info);
4120 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
4121 if (!ports[portid].tx_dump_cb[queue])
4122 ports[portid].tx_dump_cb[queue] =
4123 rte_eth_add_tx_callback(portid, queue,
4124 dump_tx_pkts, NULL);
4128 remove_rx_dump_callbacks(portid_t portid)
4130 struct rte_eth_dev_info dev_info;
4134 if (port_id_is_invalid(portid, ENABLED_WARN))
4137 ret = eth_dev_info_get_print_err(portid, &dev_info);
4141 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
4142 if (ports[portid].rx_dump_cb[queue]) {
4143 rte_eth_remove_rx_callback(portid, queue,
4144 ports[portid].rx_dump_cb[queue]);
4145 ports[portid].rx_dump_cb[queue] = NULL;
4150 remove_tx_dump_callbacks(portid_t portid)
4152 struct rte_eth_dev_info dev_info;
4156 if (port_id_is_invalid(portid, ENABLED_WARN))
4159 ret = eth_dev_info_get_print_err(portid, &dev_info);
4163 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
4164 if (ports[portid].tx_dump_cb[queue]) {
4165 rte_eth_remove_tx_callback(portid, queue,
4166 ports[portid].tx_dump_cb[queue]);
4167 ports[portid].tx_dump_cb[queue] = NULL;
4172 configure_rxtx_dump_callbacks(uint16_t verbose)
4176 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4177 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
4181 RTE_ETH_FOREACH_DEV(portid)
4183 if (verbose == 1 || verbose > 2)
4184 add_rx_dump_callbacks(portid);
4186 remove_rx_dump_callbacks(portid);
4188 add_tx_dump_callbacks(portid);
4190 remove_tx_dump_callbacks(portid);
4195 set_verbose_level(uint16_t vb_level)
4197 printf("Change verbose level from %u to %u\n",
4198 (unsigned int) verbose_level, (unsigned int) vb_level);
4199 verbose_level = vb_level;
4200 configure_rxtx_dump_callbacks(verbose_level);
4204 vlan_extend_set(portid_t port_id, int on)
4208 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4210 if (port_id_is_invalid(port_id, ENABLED_WARN))
4213 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4216 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
4217 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
4219 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
4220 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
4223 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4225 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
4226 "diag=%d\n", port_id, on, diag);
4229 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4233 rx_vlan_strip_set(portid_t port_id, int on)
4237 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4239 if (port_id_is_invalid(port_id, ENABLED_WARN))
4242 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4245 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
4246 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
4248 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
4249 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4252 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4254 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
4255 "diag=%d\n", port_id, on, diag);
4258 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4262 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
4266 if (port_id_is_invalid(port_id, ENABLED_WARN))
4269 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
4271 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
4272 "diag=%d\n", port_id, queue_id, on, diag);
4276 rx_vlan_filter_set(portid_t port_id, int on)
4280 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4282 if (port_id_is_invalid(port_id, ENABLED_WARN))
4285 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4288 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
4289 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
4291 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
4292 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
4295 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4297 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
4298 "diag=%d\n", port_id, on, diag);
4301 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4305 rx_vlan_qinq_strip_set(portid_t port_id, int on)
4309 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4311 if (port_id_is_invalid(port_id, ENABLED_WARN))
4314 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4317 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
4318 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
4320 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
4321 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
4324 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4326 printf("%s(port_pi=%d, on=%d) failed "
4327 "diag=%d\n", __func__, port_id, on, diag);
4330 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4334 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
4338 if (port_id_is_invalid(port_id, ENABLED_WARN))
4340 if (vlan_id_is_invalid(vlan_id))
4342 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
4345 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
4347 port_id, vlan_id, on, diag);
4352 rx_vlan_all_filter_set(portid_t port_id, int on)
4356 if (port_id_is_invalid(port_id, ENABLED_WARN))
4358 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
4359 if (rx_vft_set(port_id, vlan_id, on))
4365 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
4369 if (port_id_is_invalid(port_id, ENABLED_WARN))
4372 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
4376 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
4378 port_id, vlan_type, tp_id, diag);
4382 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
4384 struct rte_eth_dev_info dev_info;
4387 if (vlan_id_is_invalid(vlan_id))
4390 if (ports[port_id].dev_conf.txmode.offloads &
4391 DEV_TX_OFFLOAD_QINQ_INSERT) {
4392 printf("Error, as QinQ has been enabled.\n");
4396 ret = eth_dev_info_get_print_err(port_id, &dev_info);
4400 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
4401 printf("Error: vlan insert is not supported by port %d\n",
4406 tx_vlan_reset(port_id);
4407 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
4408 ports[port_id].tx_vlan_id = vlan_id;
4412 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
4414 struct rte_eth_dev_info dev_info;
4417 if (vlan_id_is_invalid(vlan_id))
4419 if (vlan_id_is_invalid(vlan_id_outer))
4422 ret = eth_dev_info_get_print_err(port_id, &dev_info);
4426 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
4427 printf("Error: qinq insert not supported by port %d\n",
4432 tx_vlan_reset(port_id);
4433 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
4434 DEV_TX_OFFLOAD_QINQ_INSERT);
4435 ports[port_id].tx_vlan_id = vlan_id;
4436 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
4440 tx_vlan_reset(portid_t port_id)
4442 ports[port_id].dev_conf.txmode.offloads &=
4443 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
4444 DEV_TX_OFFLOAD_QINQ_INSERT);
4445 ports[port_id].tx_vlan_id = 0;
4446 ports[port_id].tx_vlan_id_outer = 0;
4450 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
4452 if (port_id_is_invalid(port_id, ENABLED_WARN))
4455 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
4459 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
4462 uint8_t existing_mapping_found = 0;
4464 if (port_id_is_invalid(port_id, ENABLED_WARN))
4467 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
4470 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
4471 printf("map_value not in required range 0..%d\n",
4472 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
4476 if (!is_rx) { /*then tx*/
4477 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
4478 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
4479 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
4480 tx_queue_stats_mappings[i].stats_counter_id = map_value;
4481 existing_mapping_found = 1;
4485 if (!existing_mapping_found) { /* A new additional mapping... */
4486 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
4487 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
4488 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
4489 nb_tx_queue_stats_mappings++;
4493 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
4494 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
4495 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
4496 rx_queue_stats_mappings[i].stats_counter_id = map_value;
4497 existing_mapping_found = 1;
4501 if (!existing_mapping_found) { /* A new additional mapping... */
4502 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
4503 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
4504 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
4505 nb_rx_queue_stats_mappings++;
4511 set_xstats_hide_zero(uint8_t on_off)
4513 xstats_hide_zero = on_off;
4517 set_record_core_cycles(uint8_t on_off)
4519 record_core_cycles = on_off;
4523 set_record_burst_stats(uint8_t on_off)
4525 record_burst_stats = on_off;
4529 print_fdir_mask(struct rte_eth_fdir_masks *mask)
4531 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
4533 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4534 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
4535 " tunnel_id: 0x%08x",
4536 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
4537 rte_be_to_cpu_32(mask->tunnel_id_mask));
4538 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
4539 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
4540 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
4541 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
4543 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
4544 rte_be_to_cpu_16(mask->src_port_mask),
4545 rte_be_to_cpu_16(mask->dst_port_mask));
4547 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4548 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
4549 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
4550 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
4551 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
4553 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4554 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
4555 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
4556 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
4557 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
4564 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4566 struct rte_eth_flex_payload_cfg *cfg;
4569 for (i = 0; i < flex_conf->nb_payloads; i++) {
4570 cfg = &flex_conf->flex_set[i];
4571 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
4573 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
4574 printf("\n L2_PAYLOAD: ");
4575 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
4576 printf("\n L3_PAYLOAD: ");
4577 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
4578 printf("\n L4_PAYLOAD: ");
4580 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
4581 for (j = 0; j < num; j++)
4582 printf(" %-5u", cfg->src_offset[j]);
4588 flowtype_to_str(uint16_t flow_type)
4590 struct flow_type_info {
4596 static struct flow_type_info flowtype_str_table[] = {
4597 {"raw", RTE_ETH_FLOW_RAW},
4598 {"ipv4", RTE_ETH_FLOW_IPV4},
4599 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
4600 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
4601 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
4602 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
4603 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
4604 {"ipv6", RTE_ETH_FLOW_IPV6},
4605 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
4606 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
4607 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
4608 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
4609 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
4610 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
4611 {"port", RTE_ETH_FLOW_PORT},
4612 {"vxlan", RTE_ETH_FLOW_VXLAN},
4613 {"geneve", RTE_ETH_FLOW_GENEVE},
4614 {"nvgre", RTE_ETH_FLOW_NVGRE},
4615 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
4618 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
4619 if (flowtype_str_table[i].ftype == flow_type)
4620 return flowtype_str_table[i].str;
4627 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4629 struct rte_eth_fdir_flex_mask *mask;
4633 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
4634 mask = &flex_conf->flex_mask[i];
4635 p = flowtype_to_str(mask->flow_type);
4636 printf("\n %s:\t", p ? p : "unknown");
4637 for (j = 0; j < num; j++)
4638 printf(" %02x", mask->mask[j]);
4644 print_fdir_flow_type(uint32_t flow_types_mask)
4649 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
4650 if (!(flow_types_mask & (1 << i)))
4652 p = flowtype_to_str(i);
4662 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
4663 struct rte_eth_fdir_stats *fdir_stat)
4667 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
4669 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
4670 RTE_ETH_FILTER_INFO, fdir_info);
4671 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
4672 RTE_ETH_FILTER_STATS, fdir_stat);
4676 #ifdef RTE_LIBRTE_I40E_PMD
4677 if (ret == -ENOTSUP) {
4678 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
4680 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
4683 #ifdef RTE_LIBRTE_IXGBE_PMD
4684 if (ret == -ENOTSUP) {
4685 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
4687 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
4694 printf("\n FDIR is not supported on port %-2d\n",
4698 printf("programming error: (%s)\n", strerror(-ret));
4705 fdir_get_infos(portid_t port_id)
4707 struct rte_eth_fdir_stats fdir_stat;
4708 struct rte_eth_fdir_info fdir_info;
4710 static const char *fdir_stats_border = "########################";
4712 if (port_id_is_invalid(port_id, ENABLED_WARN))
4715 memset(&fdir_info, 0, sizeof(fdir_info));
4716 memset(&fdir_stat, 0, sizeof(fdir_stat));
4717 if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
4720 printf("\n %s FDIR infos for port %-2d %s\n",
4721 fdir_stats_border, port_id, fdir_stats_border);
4723 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
4724 printf(" PERFECT\n");
4725 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
4726 printf(" PERFECT-MAC-VLAN\n");
4727 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4728 printf(" PERFECT-TUNNEL\n");
4729 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
4730 printf(" SIGNATURE\n");
4732 printf(" DISABLE\n");
4733 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
4734 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
4735 printf(" SUPPORTED FLOW TYPE: ");
4736 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
4738 printf(" FLEX PAYLOAD INFO:\n");
4739 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
4740 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
4741 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
4742 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
4743 fdir_info.flex_payload_unit,
4744 fdir_info.max_flex_payload_segment_num,
4745 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
4747 print_fdir_mask(&fdir_info.mask);
4748 if (fdir_info.flex_conf.nb_payloads > 0) {
4749 printf(" FLEX PAYLOAD SRC OFFSET:");
4750 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4752 if (fdir_info.flex_conf.nb_flexmasks > 0) {
4753 printf(" FLEX MASK CFG:");
4754 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4756 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
4757 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
4758 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
4759 fdir_info.guarant_spc, fdir_info.best_spc);
4760 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
4761 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
4762 " add: %-10"PRIu64" remove: %"PRIu64"\n"
4763 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
4764 fdir_stat.collision, fdir_stat.free,
4765 fdir_stat.maxhash, fdir_stat.maxlen,
4766 fdir_stat.add, fdir_stat.remove,
4767 fdir_stat.f_add, fdir_stat.f_remove);
4768 printf(" %s############################%s\n",
4769 fdir_stats_border, fdir_stats_border);
4773 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
4775 struct rte_port *port;
4776 struct rte_eth_fdir_flex_conf *flex_conf;
4779 port = &ports[port_id];
4780 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4781 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
4782 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
4787 if (i >= RTE_ETH_FLOW_MAX) {
4788 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
4789 idx = flex_conf->nb_flexmasks;
4790 flex_conf->nb_flexmasks++;
4792 printf("The flex mask table is full. Can not set flex"
4793 " mask for flow_type(%u).", cfg->flow_type);
4797 rte_memcpy(&flex_conf->flex_mask[idx],
4799 sizeof(struct rte_eth_fdir_flex_mask));
4803 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
4805 struct rte_port *port;
4806 struct rte_eth_fdir_flex_conf *flex_conf;
4809 port = &ports[port_id];
4810 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4811 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
4812 if (cfg->type == flex_conf->flex_set[i].type) {
4817 if (i >= RTE_ETH_PAYLOAD_MAX) {
4818 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
4819 idx = flex_conf->nb_payloads;
4820 flex_conf->nb_payloads++;
4822 printf("The flex payload table is full. Can not set"
4823 " flex payload for type(%u).", cfg->type);
4827 rte_memcpy(&flex_conf->flex_set[idx],
4829 sizeof(struct rte_eth_flex_payload_cfg));
4834 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
4836 #ifdef RTE_LIBRTE_IXGBE_PMD
4840 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
4842 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
4846 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
4847 is_rx ? "rx" : "tx", port_id, diag);
4850 printf("VF %s setting not supported for port %d\n",
4851 is_rx ? "Rx" : "Tx", port_id);
4857 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
4860 struct rte_eth_link link;
4863 if (port_id_is_invalid(port_id, ENABLED_WARN))
4865 ret = eth_link_get_nowait_print_err(port_id, &link);
4868 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
4869 rate > link.link_speed) {
4870 printf("Invalid rate value:%u bigger than link speed: %u\n",
4871 rate, link.link_speed);
4874 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
4877 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
4883 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
4885 int diag = -ENOTSUP;
4889 RTE_SET_USED(q_msk);
4891 #ifdef RTE_LIBRTE_IXGBE_PMD
4892 if (diag == -ENOTSUP)
4893 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
4896 #ifdef RTE_LIBRTE_BNXT_PMD
4897 if (diag == -ENOTSUP)
4898 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
4903 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
4909 * Functions to manage the set of filtered Multicast MAC addresses.
4911 * A pool of filtered multicast MAC addresses is associated with each port.
4912 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
4913 * The address of the pool and the number of valid multicast MAC addresses
4914 * recorded in the pool are stored in the fields "mc_addr_pool" and
4915 * "mc_addr_nb" of the "rte_port" data structure.
4917 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
4918 * to be supplied a contiguous array of multicast MAC addresses.
4919 * To comply with this constraint, the set of multicast addresses recorded
4920 * into the pool are systematically compacted at the beginning of the pool.
4921 * Hence, when a multicast address is removed from the pool, all following
4922 * addresses, if any, are copied back to keep the set contiguous.
4924 #define MCAST_POOL_INC 32
4927 mcast_addr_pool_extend(struct rte_port *port)
4929 struct rte_ether_addr *mc_pool;
4930 size_t mc_pool_size;
4933 * If a free entry is available at the end of the pool, just
4934 * increment the number of recorded multicast addresses.
4936 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
4942 * [re]allocate a pool with MCAST_POOL_INC more entries.
4943 * The previous test guarantees that port->mc_addr_nb is a multiple
4944 * of MCAST_POOL_INC.
4946 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
4948 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
4950 if (mc_pool == NULL) {
4951 printf("allocation of pool of %u multicast addresses failed\n",
4952 port->mc_addr_nb + MCAST_POOL_INC);
4956 port->mc_addr_pool = mc_pool;
4963 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
4965 if (mcast_addr_pool_extend(port) != 0)
4967 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
4971 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
4974 if (addr_idx == port->mc_addr_nb) {
4975 /* No need to recompact the set of multicast addressses. */
4976 if (port->mc_addr_nb == 0) {
4977 /* free the pool of multicast addresses. */
4978 free(port->mc_addr_pool);
4979 port->mc_addr_pool = NULL;
4983 memmove(&port->mc_addr_pool[addr_idx],
4984 &port->mc_addr_pool[addr_idx + 1],
4985 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
4989 eth_port_multicast_addr_list_set(portid_t port_id)
4991 struct rte_port *port;
4994 port = &ports[port_id];
4995 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
4998 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
4999 port_id, port->mc_addr_nb, diag);
5005 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
5007 struct rte_port *port;
5010 if (port_id_is_invalid(port_id, ENABLED_WARN))
5013 port = &ports[port_id];
5016 * Check that the added multicast MAC address is not already recorded
5017 * in the pool of multicast addresses.
5019 for (i = 0; i < port->mc_addr_nb; i++) {
5020 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
5021 printf("multicast address already filtered by port\n");
5026 mcast_addr_pool_append(port, mc_addr);
5027 if (eth_port_multicast_addr_list_set(port_id) < 0)
5028 /* Rollback on failure, remove the address from the pool */
5029 mcast_addr_pool_remove(port, i);
5033 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
5035 struct rte_port *port;
5038 if (port_id_is_invalid(port_id, ENABLED_WARN))
5041 port = &ports[port_id];
5044 * Search the pool of multicast MAC addresses for the removed address.
5046 for (i = 0; i < port->mc_addr_nb; i++) {
5047 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
5050 if (i == port->mc_addr_nb) {
5051 printf("multicast address not filtered by port %d\n", port_id);
5055 mcast_addr_pool_remove(port, i);
5056 if (eth_port_multicast_addr_list_set(port_id) < 0)
5057 /* Rollback on failure, add the address back into the pool */
5058 mcast_addr_pool_append(port, mc_addr);
5062 port_dcb_info_display(portid_t port_id)
5064 struct rte_eth_dcb_info dcb_info;
5067 static const char *border = "================";
5069 if (port_id_is_invalid(port_id, ENABLED_WARN))
5072 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
5074 printf("\n Failed to get dcb infos on port %-2d\n",
5078 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
5079 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
5081 for (i = 0; i < dcb_info.nb_tcs; i++)
5083 printf("\n Priority : ");
5084 for (i = 0; i < dcb_info.nb_tcs; i++)
5085 printf("\t%4d", dcb_info.prio_tc[i]);
5086 printf("\n BW percent :");
5087 for (i = 0; i < dcb_info.nb_tcs; i++)
5088 printf("\t%4d%%", dcb_info.tc_bws[i]);
5089 printf("\n RXQ base : ");
5090 for (i = 0; i < dcb_info.nb_tcs; i++)
5091 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
5092 printf("\n RXQ number :");
5093 for (i = 0; i < dcb_info.nb_tcs; i++)
5094 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
5095 printf("\n TXQ base : ");
5096 for (i = 0; i < dcb_info.nb_tcs; i++)
5097 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
5098 printf("\n TXQ number :");
5099 for (i = 0; i < dcb_info.nb_tcs; i++)
5100 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
5105 open_file(const char *file_path, uint32_t *size)
5107 int fd = open(file_path, O_RDONLY);
5109 uint8_t *buf = NULL;
5117 printf("%s: Failed to open %s\n", __func__, file_path);
5121 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
5123 printf("%s: File operations failed\n", __func__);
5127 pkg_size = st_buf.st_size;
5130 printf("%s: File operations failed\n", __func__);
5134 buf = (uint8_t *)malloc(pkg_size);
5137 printf("%s: Failed to malloc memory\n", __func__);
5141 ret = read(fd, buf, pkg_size);
5144 printf("%s: File read operation failed\n", __func__);
5158 save_file(const char *file_path, uint8_t *buf, uint32_t size)
5160 FILE *fh = fopen(file_path, "wb");
5163 printf("%s: Failed to open %s\n", __func__, file_path);
5167 if (fwrite(buf, 1, size, fh) != size) {
5169 printf("%s: File write operation failed\n", __func__);
5179 close_file(uint8_t *buf)
5190 port_queue_region_info_display(portid_t port_id, void *buf)
5192 #ifdef RTE_LIBRTE_I40E_PMD
5194 struct rte_pmd_i40e_queue_regions *info =
5195 (struct rte_pmd_i40e_queue_regions *)buf;
5196 static const char *queue_region_info_stats_border = "-------";
5198 if (!info->queue_region_number)
5199 printf("there is no region has been set before");
5201 printf("\n %s All queue region info for port=%2d %s",
5202 queue_region_info_stats_border, port_id,
5203 queue_region_info_stats_border);
5204 printf("\n queue_region_number: %-14u \n",
5205 info->queue_region_number);
5207 for (i = 0; i < info->queue_region_number; i++) {
5208 printf("\n region_id: %-14u queue_number: %-14u "
5209 "queue_start_index: %-14u \n",
5210 info->region[i].region_id,
5211 info->region[i].queue_num,
5212 info->region[i].queue_start_index);
5214 printf(" user_priority_num is %-14u :",
5215 info->region[i].user_priority_num);
5216 for (j = 0; j < info->region[i].user_priority_num; j++)
5217 printf(" %-14u ", info->region[i].user_priority[j]);
5219 printf("\n flowtype_num is %-14u :",
5220 info->region[i].flowtype_num);
5221 for (j = 0; j < info->region[i].flowtype_num; j++)
5222 printf(" %-14u ", info->region[i].hw_flowtype[j]);
5225 RTE_SET_USED(port_id);
5233 show_macs(portid_t port_id)
5235 char buf[RTE_ETHER_ADDR_FMT_SIZE];
5236 struct rte_eth_dev_info dev_info;
5237 struct rte_ether_addr *addr;
5238 uint32_t i, num_macs = 0;
5239 struct rte_eth_dev *dev;
5241 dev = &rte_eth_devices[port_id];
5243 rte_eth_dev_info_get(port_id, &dev_info);
5245 for (i = 0; i < dev_info.max_mac_addrs; i++) {
5246 addr = &dev->data->mac_addrs[i];
5248 /* skip zero address */
5249 if (rte_is_zero_ether_addr(addr))
5255 printf("Number of MAC address added: %d\n", num_macs);
5257 for (i = 0; i < dev_info.max_mac_addrs; i++) {
5258 addr = &dev->data->mac_addrs[i];
5260 /* skip zero address */
5261 if (rte_is_zero_ether_addr(addr))
5264 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
5265 printf(" %s\n", buf);
5270 show_mcast_macs(portid_t port_id)
5272 char buf[RTE_ETHER_ADDR_FMT_SIZE];
5273 struct rte_ether_addr *addr;
5274 struct rte_port *port;
5277 port = &ports[port_id];
5279 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
5281 for (i = 0; i < port->mc_addr_nb; i++) {
5282 addr = &port->mc_addr_pool[i];
5284 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
5285 printf(" %s\n", buf);