1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_mempool.h>
33 #include <rte_interrupts.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_string_fns.h>
38 #include <rte_cycles.h>
41 #include <rte_errno.h>
43 #include <rte_pmd_ixgbe.h>
46 #include <rte_pmd_i40e.h>
49 #include <rte_pmd_bnxt.h>
54 #include <rte_hexdump.h>
57 #include "cmdline_mtr.h"
59 #define ETHDEV_FWVERS_LEN 32
61 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
62 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
67 #define NS_PER_SEC 1E9
70 enum tx_pkt_split split;
74 .split = TX_PKT_SPLIT_OFF,
78 .split = TX_PKT_SPLIT_ON,
82 .split = TX_PKT_SPLIT_RND,
87 const struct rss_type_info rss_type_table[] = {
89 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
90 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
91 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
92 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2},
94 { "ip", RTE_ETH_RSS_IP },
95 { "udp", RTE_ETH_RSS_UDP },
96 { "tcp", RTE_ETH_RSS_TCP },
97 { "sctp", RTE_ETH_RSS_SCTP },
98 { "tunnel", RTE_ETH_RSS_TUNNEL },
99 { "vlan", RTE_ETH_RSS_VLAN },
101 /* Individual type */
102 { "ipv4", RTE_ETH_RSS_IPV4 },
103 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
104 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
105 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
106 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
107 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
108 { "ipv6", RTE_ETH_RSS_IPV6 },
109 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
110 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
111 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
112 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
113 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
114 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
115 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
116 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
117 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
118 { "port", RTE_ETH_RSS_PORT },
119 { "vxlan", RTE_ETH_RSS_VXLAN },
120 { "geneve", RTE_ETH_RSS_GENEVE },
121 { "nvgre", RTE_ETH_RSS_NVGRE },
122 { "gtpu", RTE_ETH_RSS_GTPU },
123 { "eth", RTE_ETH_RSS_ETH },
124 { "s-vlan", RTE_ETH_RSS_S_VLAN },
125 { "c-vlan", RTE_ETH_RSS_C_VLAN },
126 { "esp", RTE_ETH_RSS_ESP },
127 { "ah", RTE_ETH_RSS_AH },
128 { "l2tpv3", RTE_ETH_RSS_L2TPV3 },
129 { "pfcp", RTE_ETH_RSS_PFCP },
130 { "pppoe", RTE_ETH_RSS_PPPOE },
131 { "ecpri", RTE_ETH_RSS_ECPRI },
132 { "mpls", RTE_ETH_RSS_MPLS },
133 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
134 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
135 { "l2tpv2", RTE_ETH_RSS_L2TPV2 },
136 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
137 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
138 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
139 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
140 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
141 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
142 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
143 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
144 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
145 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
146 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
147 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
151 static const struct {
152 enum rte_eth_fec_mode mode;
154 } fec_mode_name[] = {
156 .mode = RTE_ETH_FEC_NOFEC,
160 .mode = RTE_ETH_FEC_AUTO,
164 .mode = RTE_ETH_FEC_BASER,
168 .mode = RTE_ETH_FEC_RS,
173 static const struct {
176 } flowtype_str_table[] = {
177 {"raw", RTE_ETH_FLOW_RAW},
178 {"ipv4", RTE_ETH_FLOW_IPV4},
179 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
180 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
181 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
182 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
183 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
184 {"ipv6", RTE_ETH_FLOW_IPV6},
185 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
186 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
187 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
188 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
189 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
190 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
191 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX},
192 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX},
193 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX},
194 {"port", RTE_ETH_FLOW_PORT},
195 {"vxlan", RTE_ETH_FLOW_VXLAN},
196 {"geneve", RTE_ETH_FLOW_GENEVE},
197 {"nvgre", RTE_ETH_FLOW_NVGRE},
198 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
199 {"gtpu", RTE_ETH_FLOW_GTPU},
203 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
205 char buf[RTE_ETHER_ADDR_FMT_SIZE];
206 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
207 printf("%s%s", name, buf);
211 nic_xstats_display_periodic(portid_t port_id)
213 struct xstat_display_info *xstats_info;
214 uint64_t *prev_values, *curr_values;
215 uint64_t diff_value, value_rate;
216 struct timespec cur_time;
223 xstats_info = &ports[port_id].xstats_info;
225 ids_supp_sz = xstats_info->ids_supp_sz;
226 if (ids_supp_sz == 0)
231 ids_supp = xstats_info->ids_supp;
232 prev_values = xstats_info->prev_values;
233 curr_values = xstats_info->curr_values;
235 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values,
237 if (rc != (int)ids_supp_sz) {
239 "Failed to get values of %zu xstats for port %u - return code %d\n",
240 ids_supp_sz, port_id, rc);
245 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
248 ns = cur_time.tv_sec * NS_PER_SEC;
249 ns += cur_time.tv_nsec;
251 if (xstats_info->prev_ns != 0)
252 diff_ns = ns - xstats_info->prev_ns;
253 xstats_info->prev_ns = ns;
256 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)");
257 for (i = 0; i < ids_supp_sz; i++) {
258 diff_value = (curr_values[i] > prev_values[i]) ?
259 (curr_values[i] - prev_values[i]) : 0;
260 prev_values[i] = curr_values[i];
261 value_rate = diff_ns > 0 ?
262 (double)diff_value / diff_ns * NS_PER_SEC : 0;
264 printf(" %-25s%12"PRIu64" %15"PRIu64"\n",
265 xstats_display[i].name, curr_values[i], value_rate);
270 nic_stats_display(portid_t port_id)
272 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
273 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
274 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
275 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
276 static uint64_t prev_ns[RTE_MAX_ETHPORTS];
277 struct timespec cur_time;
278 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
280 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
281 struct rte_eth_stats stats;
282 static const char *nic_stats_border = "########################";
285 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
289 ret = rte_eth_stats_get(port_id, &stats);
292 "%s: Error: failed to get stats (port %u): %d",
293 __func__, port_id, ret);
296 printf("\n %s NIC statistics for port %-2d %s\n",
297 nic_stats_border, port_id, nic_stats_border);
299 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
300 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes);
301 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
302 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf);
303 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
304 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes);
307 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
310 ns = cur_time.tv_sec * NS_PER_SEC;
311 ns += cur_time.tv_nsec;
313 if (prev_ns[port_id] != 0)
314 diff_ns = ns - prev_ns[port_id];
315 prev_ns[port_id] = ns;
318 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
319 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
320 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
321 (stats.opackets - prev_pkts_tx[port_id]) : 0;
322 prev_pkts_rx[port_id] = stats.ipackets;
323 prev_pkts_tx[port_id] = stats.opackets;
324 mpps_rx = diff_ns > 0 ?
325 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
326 mpps_tx = diff_ns > 0 ?
327 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
329 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
330 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
331 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
332 (stats.obytes - prev_bytes_tx[port_id]) : 0;
333 prev_bytes_rx[port_id] = stats.ibytes;
334 prev_bytes_tx[port_id] = stats.obytes;
335 mbps_rx = diff_ns > 0 ?
336 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
337 mbps_tx = diff_ns > 0 ?
338 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
340 printf("\n Throughput (since last show)\n");
341 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
342 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
343 mpps_tx, mbps_tx * 8);
345 if (xstats_display_num > 0)
346 nic_xstats_display_periodic(port_id);
348 printf(" %s############################%s\n",
349 nic_stats_border, nic_stats_border);
353 nic_stats_clear(portid_t port_id)
357 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
362 ret = rte_eth_stats_reset(port_id);
365 "%s: Error: failed to reset stats (port %u): %s",
366 __func__, port_id, strerror(-ret));
370 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
375 "%s: Error: failed to get stats (port %u): %s",
376 __func__, port_id, strerror(ret));
379 printf("\n NIC statistics for port %d cleared\n", port_id);
383 nic_xstats_display(portid_t port_id)
385 struct rte_eth_xstat *xstats;
386 int cnt_xstats, idx_xstat;
387 struct rte_eth_xstat_name *xstats_names;
389 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
393 printf("###### NIC extended statistics for port %-2d\n", port_id);
394 if (!rte_eth_dev_is_valid_port(port_id)) {
395 fprintf(stderr, "Error: Invalid port number %i\n", port_id);
400 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
401 if (cnt_xstats < 0) {
402 fprintf(stderr, "Error: Cannot get count of xstats\n");
406 /* Get id-name lookup table */
407 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
408 if (xstats_names == NULL) {
409 fprintf(stderr, "Cannot allocate memory for xstats lookup\n");
412 if (cnt_xstats != rte_eth_xstats_get_names(
413 port_id, xstats_names, cnt_xstats)) {
414 fprintf(stderr, "Error: Cannot get xstats lookup\n");
419 /* Get stats themselves */
420 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
421 if (xstats == NULL) {
422 fprintf(stderr, "Cannot allocate memory for xstats\n");
426 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
427 fprintf(stderr, "Error: Unable to get xstats\n");
434 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
435 if (xstats_hide_zero && !xstats[idx_xstat].value)
437 printf("%s: %"PRIu64"\n",
438 xstats_names[idx_xstat].name,
439 xstats[idx_xstat].value);
446 nic_xstats_clear(portid_t port_id)
450 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
455 ret = rte_eth_xstats_reset(port_id);
458 "%s: Error: failed to reset xstats (port %u): %s\n",
459 __func__, port_id, strerror(-ret));
463 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
467 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s",
468 __func__, port_id, strerror(ret));
474 get_queue_state_name(uint8_t queue_state)
476 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED)
478 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED)
480 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN)
487 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
489 struct rte_eth_burst_mode mode;
490 struct rte_eth_rxq_info qinfo;
492 static const char *info_border = "*********************";
494 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
497 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n",
498 port_id, queue_id, strerror(-rc), rc);
502 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
503 info_border, port_id, queue_id, info_border);
505 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
506 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
507 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
508 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
509 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
510 printf("\nRX drop packets: %s",
511 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
512 printf("\nRX deferred start: %s",
513 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
514 printf("\nRX scattered packets: %s",
515 (qinfo.scattered_rx != 0) ? "on" : "off");
516 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state));
517 if (qinfo.rx_buf_size != 0)
518 printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
519 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
521 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
522 printf("\nBurst mode: %s%s",
524 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
525 " (per queue)" : "");
531 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
533 struct rte_eth_burst_mode mode;
534 struct rte_eth_txq_info qinfo;
536 static const char *info_border = "*********************";
538 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
541 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n",
542 port_id, queue_id, strerror(-rc), rc);
546 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
547 info_border, port_id, queue_id, info_border);
549 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
550 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
551 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
552 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
553 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
554 printf("\nTX deferred start: %s",
555 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
556 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
557 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state));
559 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
560 printf("\nBurst mode: %s%s",
562 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
563 " (per queue)" : "");
568 static int bus_match_all(const struct rte_bus *bus, const void *data)
576 device_infos_display_speeds(uint32_t speed_capa)
578 printf("\n\tDevice speed capability:");
579 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
580 printf(" Autonegotiate (all speeds)");
581 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
582 printf(" Disable autonegotiate (fixed speed) ");
583 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
584 printf(" 10 Mbps half-duplex ");
585 if (speed_capa & RTE_ETH_LINK_SPEED_10M)
586 printf(" 10 Mbps full-duplex ");
587 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
588 printf(" 100 Mbps half-duplex ");
589 if (speed_capa & RTE_ETH_LINK_SPEED_100M)
590 printf(" 100 Mbps full-duplex ");
591 if (speed_capa & RTE_ETH_LINK_SPEED_1G)
593 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
594 printf(" 2.5 Gbps ");
595 if (speed_capa & RTE_ETH_LINK_SPEED_5G)
597 if (speed_capa & RTE_ETH_LINK_SPEED_10G)
599 if (speed_capa & RTE_ETH_LINK_SPEED_20G)
601 if (speed_capa & RTE_ETH_LINK_SPEED_25G)
603 if (speed_capa & RTE_ETH_LINK_SPEED_40G)
605 if (speed_capa & RTE_ETH_LINK_SPEED_50G)
607 if (speed_capa & RTE_ETH_LINK_SPEED_56G)
609 if (speed_capa & RTE_ETH_LINK_SPEED_100G)
610 printf(" 100 Gbps ");
611 if (speed_capa & RTE_ETH_LINK_SPEED_200G)
612 printf(" 200 Gbps ");
616 device_infos_display(const char *identifier)
618 static const char *info_border = "*********************";
619 struct rte_bus *start = NULL, *next;
620 struct rte_dev_iterator dev_iter;
621 char name[RTE_ETH_NAME_MAX_LEN];
622 struct rte_ether_addr mac_addr;
623 struct rte_device *dev;
624 struct rte_devargs da;
626 struct rte_eth_dev_info dev_info;
629 memset(&da, 0, sizeof(da));
633 if (rte_devargs_parsef(&da, "%s", identifier)) {
634 fprintf(stderr, "cannot parse identifier\n");
639 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
642 if (identifier && da.bus != next)
645 /* Skip buses that don't have iterate method */
646 if (!next->dev_iterate)
649 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
650 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
654 /* Check for matching device if identifier is present */
656 strncmp(da.name, dev->name, strlen(dev->name)))
658 printf("\n%s Infos for device %s %s\n",
659 info_border, dev->name, info_border);
660 printf("Bus name: %s", dev->bus->name);
661 printf("\nDriver name: %s", dev->driver->name);
662 printf("\nDevargs: %s",
663 dev->devargs ? dev->devargs->args : "");
664 printf("\nConnect to socket: %d", dev->numa_node);
667 /* List ports with matching device name */
668 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
669 printf("\n\tPort id: %-2d", port_id);
670 if (eth_macaddr_get_print_err(port_id,
672 print_ethaddr("\n\tMAC address: ",
674 rte_eth_dev_get_name_by_port(port_id, name);
675 printf("\n\tDevice name: %s", name);
676 if (rte_eth_dev_info_get(port_id, &dev_info) == 0)
677 device_infos_display_speeds(dev_info.speed_capa);
682 rte_devargs_reset(&da);
686 print_dev_capabilities(uint64_t capabilities)
688 uint64_t single_capa;
693 if (capabilities == 0)
696 begin = __builtin_ctzll(capabilities);
697 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities);
699 single_capa = 1ULL << begin;
700 for (bit = begin; bit < end; bit++) {
701 if (capabilities & single_capa)
703 rte_eth_dev_capability_name(single_capa));
709 str_to_rsstypes(const char *str)
713 for (i = 0; rss_type_table[i].str != NULL; i++) {
714 if (strcmp(rss_type_table[i].str, str) == 0)
715 return rss_type_table[i].rss_type;
722 rsstypes_to_str(uint64_t rss_type)
726 for (i = 0; rss_type_table[i].str != NULL; i++) {
727 if (rss_type_table[i].rss_type == rss_type)
728 return rss_type_table[i].str;
735 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line)
737 uint16_t user_defined_str_len;
738 uint16_t total_len = 0;
739 uint16_t str_len = 0;
740 uint64_t rss_offload;
743 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) {
744 rss_offload = RTE_BIT64(i);
745 if ((offload_types & rss_offload) != 0) {
746 const char *p = rsstypes_to_str(rss_offload);
748 user_defined_str_len =
749 strlen("user-defined-") + (i / 10 + 1);
750 str_len = p ? strlen(p) : user_defined_str_len;
751 str_len += 2; /* add two spaces */
752 if (total_len + str_len >= char_num_per_line) {
760 printf(" user-defined-%u", i);
761 total_len += str_len;
767 port_infos_display(portid_t port_id)
769 struct rte_port *port;
770 struct rte_ether_addr mac_addr;
771 struct rte_eth_link link;
772 struct rte_eth_dev_info dev_info;
774 struct rte_mempool * mp;
775 static const char *info_border = "*********************";
777 char name[RTE_ETH_NAME_MAX_LEN];
779 char fw_version[ETHDEV_FWVERS_LEN];
781 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
785 port = &ports[port_id];
786 ret = eth_link_get_nowait_print_err(port_id, &link);
790 ret = eth_dev_info_get_print_err(port_id, &dev_info);
794 printf("\n%s Infos for port %-2d %s\n",
795 info_border, port_id, info_border);
796 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
797 print_ethaddr("MAC address: ", &mac_addr);
798 rte_eth_dev_get_name_by_port(port_id, name);
799 printf("\nDevice name: %s", name);
800 printf("\nDriver name: %s", dev_info.driver_name);
802 if (rte_eth_dev_fw_version_get(port_id, fw_version,
803 ETHDEV_FWVERS_LEN) == 0)
804 printf("\nFirmware-version: %s", fw_version);
806 printf("\nFirmware-version: %s", "not available");
808 if (dev_info.device->devargs && dev_info.device->devargs->args)
809 printf("\nDevargs: %s", dev_info.device->devargs->args);
810 printf("\nConnect to socket: %u", port->socket_id);
812 if (port_numa[port_id] != NUMA_NO_CONFIG) {
813 mp = mbuf_pool_find(port_numa[port_id], 0);
815 printf("\nmemory allocation on the socket: %d",
818 printf("\nmemory allocation on the socket: %u",port->socket_id);
820 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
821 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
822 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
823 ("full-duplex") : ("half-duplex"));
824 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
827 if (!rte_eth_dev_get_mtu(port_id, &mtu))
828 printf("MTU: %u\n", mtu);
830 printf("Promiscuous mode: %s\n",
831 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
832 printf("Allmulticast mode: %s\n",
833 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
834 printf("Maximum number of MAC addresses: %u\n",
835 (unsigned int)(port->dev_info.max_mac_addrs));
836 printf("Maximum number of MAC addresses of hash filtering: %u\n",
837 (unsigned int)(port->dev_info.max_hash_mac_addrs));
839 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
840 if (vlan_offload >= 0){
841 printf("VLAN offload: \n");
842 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
843 printf(" strip on, ");
845 printf(" strip off, ");
847 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
848 printf("filter on, ");
850 printf("filter off, ");
852 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
853 printf("extend on, ");
855 printf("extend off, ");
857 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
858 printf("qinq strip on\n");
860 printf("qinq strip off\n");
863 if (dev_info.hash_key_size > 0)
864 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
865 if (dev_info.reta_size > 0)
866 printf("Redirection table size: %u\n", dev_info.reta_size);
867 if (!dev_info.flow_type_rss_offloads)
868 printf("No RSS offload flow type is supported.\n");
870 printf("Supported RSS offload flow types:\n");
871 rss_offload_types_display(dev_info.flow_type_rss_offloads,
872 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
876 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
877 printf("Maximum configurable length of RX packet: %u\n",
878 dev_info.max_rx_pktlen);
879 printf("Maximum configurable size of LRO aggregated packet: %u\n",
880 dev_info.max_lro_pkt_size);
881 if (dev_info.max_vfs)
882 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
883 if (dev_info.max_vmdq_pools)
884 printf("Maximum number of VMDq pools: %u\n",
885 dev_info.max_vmdq_pools);
887 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
888 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
889 printf("Max possible number of RXDs per queue: %hu\n",
890 dev_info.rx_desc_lim.nb_max);
891 printf("Min possible number of RXDs per queue: %hu\n",
892 dev_info.rx_desc_lim.nb_min);
893 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
895 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
896 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
897 printf("Max possible number of TXDs per queue: %hu\n",
898 dev_info.tx_desc_lim.nb_max);
899 printf("Min possible number of TXDs per queue: %hu\n",
900 dev_info.tx_desc_lim.nb_min);
901 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
902 printf("Max segment number per packet: %hu\n",
903 dev_info.tx_desc_lim.nb_seg_max);
904 printf("Max segment number per MTU/TSO: %hu\n",
905 dev_info.tx_desc_lim.nb_mtu_seg_max);
907 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa);
908 print_dev_capabilities(dev_info.dev_capa);
910 /* Show switch info only if valid switch domain and port id is set */
911 if (dev_info.switch_info.domain_id !=
912 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
913 if (dev_info.switch_info.name)
914 printf("Switch name: %s\n", dev_info.switch_info.name);
916 printf("Switch domain Id: %u\n",
917 dev_info.switch_info.domain_id);
918 printf("Switch Port Id: %u\n",
919 dev_info.switch_info.port_id);
920 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0)
921 printf("Switch Rx domain: %u\n",
922 dev_info.switch_info.rx_domain);
927 port_summary_header_display(void)
929 uint16_t port_number;
931 port_number = rte_eth_dev_count_avail();
932 printf("Number of available ports: %i\n", port_number);
933 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
934 "Driver", "Status", "Link");
938 port_summary_display(portid_t port_id)
940 struct rte_ether_addr mac_addr;
941 struct rte_eth_link link;
942 struct rte_eth_dev_info dev_info;
943 char name[RTE_ETH_NAME_MAX_LEN];
946 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
951 ret = eth_link_get_nowait_print_err(port_id, &link);
955 ret = eth_dev_info_get_print_err(port_id, &dev_info);
959 rte_eth_dev_get_name_by_port(port_id, name);
960 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
964 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n",
965 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name,
966 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
967 rte_eth_link_speed_to_str(link.link_speed));
971 port_eeprom_display(portid_t port_id)
973 struct rte_dev_eeprom_info einfo;
975 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
980 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
981 if (len_eeprom < 0) {
982 switch (len_eeprom) {
984 fprintf(stderr, "port index %d invalid\n", port_id);
987 fprintf(stderr, "operation not supported by device\n");
990 fprintf(stderr, "device is removed\n");
993 fprintf(stderr, "Unable to get EEPROM: %d\n",
1001 einfo.length = len_eeprom;
1002 einfo.data = calloc(1, len_eeprom);
1005 "Allocation of port %u eeprom data failed\n",
1010 ret = rte_eth_dev_get_eeprom(port_id, &einfo);
1014 fprintf(stderr, "port index %d invalid\n", port_id);
1017 fprintf(stderr, "operation not supported by device\n");
1020 fprintf(stderr, "device is removed\n");
1023 fprintf(stderr, "Unable to get EEPROM: %d\n", ret);
1029 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
1030 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
1035 port_module_eeprom_display(portid_t port_id)
1037 struct rte_eth_dev_module_info minfo;
1038 struct rte_dev_eeprom_info einfo;
1041 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
1042 print_valid_ports();
1047 ret = rte_eth_dev_get_module_info(port_id, &minfo);
1051 fprintf(stderr, "port index %d invalid\n", port_id);
1054 fprintf(stderr, "operation not supported by device\n");
1057 fprintf(stderr, "device is removed\n");
1060 fprintf(stderr, "Unable to get module EEPROM: %d\n",
1068 einfo.length = minfo.eeprom_len;
1069 einfo.data = calloc(1, minfo.eeprom_len);
1072 "Allocation of port %u eeprom data failed\n",
1077 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
1081 fprintf(stderr, "port index %d invalid\n", port_id);
1084 fprintf(stderr, "operation not supported by device\n");
1087 fprintf(stderr, "device is removed\n");
1090 fprintf(stderr, "Unable to get module EEPROM: %d\n",
1098 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
1099 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
1104 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1108 if (port_id == (portid_t)RTE_PORT_ALL)
1111 RTE_ETH_FOREACH_DEV(pid)
1115 if (warning == ENABLED_WARN)
1116 fprintf(stderr, "Invalid port %d\n", port_id);
1121 void print_valid_ports(void)
1125 printf("The valid ports array is [");
1126 RTE_ETH_FOREACH_DEV(pid) {
1133 vlan_id_is_invalid(uint16_t vlan_id)
1137 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1142 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1144 const struct rte_pci_device *pci_dev;
1145 const struct rte_bus *bus;
1148 if (reg_off & 0x3) {
1150 "Port register offset 0x%X not aligned on a 4-byte boundary\n",
1151 (unsigned int)reg_off);
1155 if (!ports[port_id].dev_info.device) {
1156 fprintf(stderr, "Invalid device\n");
1160 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1161 if (bus && !strcmp(bus->name, "pci")) {
1162 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1164 fprintf(stderr, "Not a PCI device\n");
1168 pci_len = pci_dev->mem_resource[0].len;
1169 if (reg_off >= pci_len) {
1171 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n",
1172 port_id, (unsigned int)reg_off, (unsigned int)reg_off,
1180 reg_bit_pos_is_invalid(uint8_t bit_pos)
1184 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos);
1188 #define display_port_and_reg_off(port_id, reg_off) \
1189 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1192 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1194 display_port_and_reg_off(port_id, (unsigned)reg_off);
1195 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1199 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1204 if (port_id_is_invalid(port_id, ENABLED_WARN))
1206 if (port_reg_off_is_invalid(port_id, reg_off))
1208 if (reg_bit_pos_is_invalid(bit_x))
1210 reg_v = port_id_pci_reg_read(port_id, reg_off);
1211 display_port_and_reg_off(port_id, (unsigned)reg_off);
1212 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1216 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1217 uint8_t bit1_pos, uint8_t bit2_pos)
1223 if (port_id_is_invalid(port_id, ENABLED_WARN))
1225 if (port_reg_off_is_invalid(port_id, reg_off))
1227 if (reg_bit_pos_is_invalid(bit1_pos))
1229 if (reg_bit_pos_is_invalid(bit2_pos))
1231 if (bit1_pos > bit2_pos)
1232 l_bit = bit2_pos, h_bit = bit1_pos;
1234 l_bit = bit1_pos, h_bit = bit2_pos;
1236 reg_v = port_id_pci_reg_read(port_id, reg_off);
1239 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1240 display_port_and_reg_off(port_id, (unsigned)reg_off);
1241 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1242 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1246 port_reg_display(portid_t port_id, uint32_t reg_off)
1250 if (port_id_is_invalid(port_id, ENABLED_WARN))
1252 if (port_reg_off_is_invalid(port_id, reg_off))
1254 reg_v = port_id_pci_reg_read(port_id, reg_off);
1255 display_port_reg_value(port_id, reg_off, reg_v);
1259 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1264 if (port_id_is_invalid(port_id, ENABLED_WARN))
1266 if (port_reg_off_is_invalid(port_id, reg_off))
1268 if (reg_bit_pos_is_invalid(bit_pos))
1271 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n",
1275 reg_v = port_id_pci_reg_read(port_id, reg_off);
1277 reg_v &= ~(1 << bit_pos);
1279 reg_v |= (1 << bit_pos);
1280 port_id_pci_reg_write(port_id, reg_off, reg_v);
1281 display_port_reg_value(port_id, reg_off, reg_v);
1285 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1286 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1293 if (port_id_is_invalid(port_id, ENABLED_WARN))
1295 if (port_reg_off_is_invalid(port_id, reg_off))
1297 if (reg_bit_pos_is_invalid(bit1_pos))
1299 if (reg_bit_pos_is_invalid(bit2_pos))
1301 if (bit1_pos > bit2_pos)
1302 l_bit = bit2_pos, h_bit = bit1_pos;
1304 l_bit = bit1_pos, h_bit = bit2_pos;
1306 if ((h_bit - l_bit) < 31)
1307 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1311 if (value > max_v) {
1312 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n",
1313 (unsigned)value, (unsigned)value,
1314 (unsigned)max_v, (unsigned)max_v);
1317 reg_v = port_id_pci_reg_read(port_id, reg_off);
1318 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1319 reg_v |= (value << l_bit); /* Set changed bits */
1320 port_id_pci_reg_write(port_id, reg_off, reg_v);
1321 display_port_reg_value(port_id, reg_off, reg_v);
1325 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1327 if (port_id_is_invalid(port_id, ENABLED_WARN))
1329 if (port_reg_off_is_invalid(port_id, reg_off))
1331 port_id_pci_reg_write(port_id, reg_off, reg_v);
1332 display_port_reg_value(port_id, reg_off, reg_v);
1336 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1338 uint32_t overhead_len;
1340 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1341 overhead_len = max_rx_pktlen - max_mtu;
1343 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1345 return overhead_len;
1349 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu)
1351 struct rte_eth_dev_info dev_info;
1352 uint32_t overhead_len;
1353 uint32_t frame_size;
1356 ret = rte_eth_dev_info_get(port_id, &dev_info);
1360 if (mtu < dev_info.min_mtu) {
1362 "MTU (%u) < device min MTU (%u) for port_id %u\n",
1363 mtu, dev_info.min_mtu, port_id);
1366 if (mtu > dev_info.max_mtu) {
1368 "MTU (%u) > device max MTU (%u) for port_id %u\n",
1369 mtu, dev_info.max_mtu, port_id);
1373 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1375 frame_size = mtu + overhead_len;
1376 if (frame_size > dev_info.max_rx_pktlen) {
1378 "Frame size (%u) > device max frame size (%u) for port_id %u\n",
1379 frame_size, dev_info.max_rx_pktlen, port_id);
1387 port_mtu_set(portid_t port_id, uint16_t mtu)
1389 struct rte_port *port = &ports[port_id];
1392 if (port_id_is_invalid(port_id, ENABLED_WARN))
1395 diag = eth_dev_validate_mtu(port_id, mtu);
1399 if (port->need_reconfig == 0) {
1400 diag = rte_eth_dev_set_mtu(port_id, mtu);
1402 fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
1407 port->dev_conf.rxmode.mtu = mtu;
1410 /* Generic flow management functions. */
1412 static struct port_flow_tunnel *
1413 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id)
1415 struct port_flow_tunnel *flow_tunnel;
1417 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1418 if (flow_tunnel->id == port_tunnel_id)
1428 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel)
1431 switch (tunnel->type) {
1435 case RTE_FLOW_ITEM_TYPE_VXLAN:
1438 case RTE_FLOW_ITEM_TYPE_GRE:
1441 case RTE_FLOW_ITEM_TYPE_NVGRE:
1444 case RTE_FLOW_ITEM_TYPE_GENEVE:
1452 struct port_flow_tunnel *
1453 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun)
1455 struct rte_port *port = &ports[port_id];
1456 struct port_flow_tunnel *flow_tunnel;
1458 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1459 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun)))
1468 void port_flow_tunnel_list(portid_t port_id)
1470 struct rte_port *port = &ports[port_id];
1471 struct port_flow_tunnel *flt;
1473 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1474 printf("port %u tunnel #%u type=%s",
1475 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel));
1476 if (flt->tunnel.tun_id)
1477 printf(" id=%" PRIu64, flt->tunnel.tun_id);
1482 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id)
1484 struct rte_port *port = &ports[port_id];
1485 struct port_flow_tunnel *flt;
1487 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1488 if (flt->id == tunnel_id)
1492 LIST_REMOVE(flt, chain);
1494 printf("port %u: flow tunnel #%u destroyed\n",
1495 port_id, tunnel_id);
1499 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops)
1501 struct rte_port *port = &ports[port_id];
1502 enum rte_flow_item_type type;
1503 struct port_flow_tunnel *flt;
1505 if (!strcmp(ops->type, "vxlan"))
1506 type = RTE_FLOW_ITEM_TYPE_VXLAN;
1507 else if (!strcmp(ops->type, "gre"))
1508 type = RTE_FLOW_ITEM_TYPE_GRE;
1509 else if (!strcmp(ops->type, "nvgre"))
1510 type = RTE_FLOW_ITEM_TYPE_NVGRE;
1511 else if (!strcmp(ops->type, "geneve"))
1512 type = RTE_FLOW_ITEM_TYPE_GENEVE;
1514 fprintf(stderr, "cannot offload \"%s\" tunnel type\n",
1518 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1519 if (flt->tunnel.type == type)
1523 flt = calloc(1, sizeof(*flt));
1525 fprintf(stderr, "failed to allocate port flt object\n");
1528 flt->tunnel.type = type;
1529 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 :
1530 LIST_FIRST(&port->flow_tunnel_list)->id + 1;
1531 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain);
1533 printf("port %d: flow tunnel #%u type %s\n",
1534 port_id, flt->id, ops->type);
1537 /** Generate a port_flow entry from attributes/pattern/actions. */
1538 static struct port_flow *
1539 port_flow_new(const struct rte_flow_attr *attr,
1540 const struct rte_flow_item *pattern,
1541 const struct rte_flow_action *actions,
1542 struct rte_flow_error *error)
1544 const struct rte_flow_conv_rule rule = {
1546 .pattern_ro = pattern,
1547 .actions_ro = actions,
1549 struct port_flow *pf;
1552 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1555 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1558 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1562 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1569 /** Print a message out of a flow error. */
1571 port_flow_complain(struct rte_flow_error *error)
1573 static const char *const errstrlist[] = {
1574 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1575 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1576 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1577 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1578 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1579 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1580 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1581 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1582 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1583 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1584 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1585 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1586 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1587 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1588 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1589 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1590 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1594 int err = rte_errno;
1596 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1597 !errstrlist[error->type])
1598 errstr = "unknown type";
1600 errstr = errstrlist[error->type];
1601 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n",
1602 __func__, error->type, errstr,
1603 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1604 error->cause), buf) : "",
1605 error->message ? error->message : "(no stated reason)",
1608 switch (error->type) {
1609 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER:
1610 fprintf(stderr, "The status suggests the use of \"transfer\" "
1611 "as the possible cause of the failure. Make "
1612 "sure that the flow in question and its "
1613 "indirect components (if any) are managed "
1614 "via \"transfer\" proxy port. Use command "
1615 "\"show port (port_id) flow transfer proxy\" "
1616 "to figure out the proxy port ID\n");
1626 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line)
1628 uint16_t total_len = 0;
1635 for (i = 0; rss_type_table[i].str; i++) {
1636 if (rss_type_table[i].rss_type == 0)
1639 if ((rss_types & rss_type_table[i].rss_type) ==
1640 rss_type_table[i].rss_type) {
1641 /* Contain two spaces */
1642 str_len = strlen(rss_type_table[i].str) + 2;
1643 if (total_len + str_len > char_num_per_line) {
1647 printf(" %s", rss_type_table[i].str);
1648 total_len += str_len;
1654 rss_config_display(struct rte_flow_action_rss *rss_conf)
1658 if (rss_conf == NULL) {
1659 fprintf(stderr, "Invalid rule\n");
1665 if (rss_conf->queue_num == 0)
1667 for (i = 0; i < rss_conf->queue_num; i++)
1668 printf(" %d", rss_conf->queue[i]);
1671 printf(" function: ");
1672 switch (rss_conf->func) {
1673 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1674 printf("default\n");
1676 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1677 printf("toeplitz\n");
1679 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1680 printf("simple_xor\n");
1682 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1683 printf("symmetric_toeplitz\n");
1686 printf("Unknown function\n");
1690 printf(" types:\n");
1691 if (rss_conf->types == 0) {
1695 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
1698 static struct port_indirect_action *
1699 action_get_by_id(portid_t port_id, uint32_t id)
1701 struct rte_port *port;
1702 struct port_indirect_action **ppia;
1703 struct port_indirect_action *pia = NULL;
1705 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1706 port_id == (portid_t)RTE_PORT_ALL)
1708 port = &ports[port_id];
1709 ppia = &port->actions_list;
1711 if ((*ppia)->id == id) {
1715 ppia = &(*ppia)->next;
1719 "Failed to find indirect action #%u on port %u\n",
1725 action_alloc(portid_t port_id, uint32_t id,
1726 struct port_indirect_action **action)
1728 struct rte_port *port;
1729 struct port_indirect_action **ppia;
1730 struct port_indirect_action *pia = NULL;
1733 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1734 port_id == (portid_t)RTE_PORT_ALL)
1736 port = &ports[port_id];
1737 if (id == UINT32_MAX) {
1738 /* taking first available ID */
1739 if (port->actions_list) {
1740 if (port->actions_list->id == UINT32_MAX - 1) {
1742 "Highest indirect action ID is already assigned, delete it first\n");
1745 id = port->actions_list->id + 1;
1750 pia = calloc(1, sizeof(*pia));
1753 "Allocation of port %u indirect action failed\n",
1757 ppia = &port->actions_list;
1758 while (*ppia && (*ppia)->id > id)
1759 ppia = &(*ppia)->next;
1760 if (*ppia && (*ppia)->id == id) {
1762 "Indirect action #%u is already assigned, delete it first\n",
1775 template_alloc(uint32_t id, struct port_template **template,
1776 struct port_template **list)
1778 struct port_template *lst = *list;
1779 struct port_template **ppt;
1780 struct port_template *pt = NULL;
1783 if (id == UINT32_MAX) {
1784 /* taking first available ID */
1786 if (lst->id == UINT32_MAX - 1) {
1787 printf("Highest template ID is already"
1788 " assigned, delete it first\n");
1796 pt = calloc(1, sizeof(*pt));
1798 printf("Allocation of port template failed\n");
1802 while (*ppt && (*ppt)->id > id)
1803 ppt = &(*ppt)->next;
1804 if (*ppt && (*ppt)->id == id) {
1805 printf("Template #%u is already assigned,"
1806 " delete it first\n", id);
1818 table_alloc(uint32_t id, struct port_table **table,
1819 struct port_table **list)
1821 struct port_table *lst = *list;
1822 struct port_table **ppt;
1823 struct port_table *pt = NULL;
1826 if (id == UINT32_MAX) {
1827 /* taking first available ID */
1829 if (lst->id == UINT32_MAX - 1) {
1830 printf("Highest table ID is already"
1831 " assigned, delete it first\n");
1839 pt = calloc(1, sizeof(*pt));
1841 printf("Allocation of table failed\n");
1845 while (*ppt && (*ppt)->id > id)
1846 ppt = &(*ppt)->next;
1847 if (*ppt && (*ppt)->id == id) {
1848 printf("Table #%u is already assigned,"
1849 " delete it first\n", id);
1860 /** Get info about flow management resources. */
1862 port_flow_get_info(portid_t port_id)
1864 struct rte_flow_port_info port_info;
1865 struct rte_flow_queue_info queue_info;
1866 struct rte_flow_error error;
1868 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1869 port_id == (portid_t)RTE_PORT_ALL)
1871 /* Poisoning to make sure PMDs update it in case of error. */
1872 memset(&error, 0x99, sizeof(error));
1873 memset(&port_info, 0, sizeof(port_info));
1874 memset(&queue_info, 0, sizeof(queue_info));
1875 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error))
1876 return port_flow_complain(&error);
1877 printf("Flow engine resources on port %u:\n"
1878 "Number of queues: %d\n"
1879 "Size of queues: %d\n"
1880 "Number of counters: %d\n"
1881 "Number of aging objects: %d\n"
1882 "Number of meter actions: %d\n",
1883 port_id, port_info.max_nb_queues,
1884 queue_info.max_size,
1885 port_info.max_nb_counters,
1886 port_info.max_nb_aging_objects,
1887 port_info.max_nb_meters);
1891 /** Configure flow management resources. */
1893 port_flow_configure(portid_t port_id,
1894 const struct rte_flow_port_attr *port_attr,
1896 const struct rte_flow_queue_attr *queue_attr)
1898 struct rte_port *port;
1899 struct rte_flow_error error;
1900 const struct rte_flow_queue_attr *attr_list[nb_queue];
1903 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1904 port_id == (portid_t)RTE_PORT_ALL)
1906 port = &ports[port_id];
1907 port->queue_nb = nb_queue;
1908 port->queue_sz = queue_attr->size;
1909 for (std_queue = 0; std_queue < nb_queue; std_queue++)
1910 attr_list[std_queue] = queue_attr;
1911 /* Poisoning to make sure PMDs update it in case of error. */
1912 memset(&error, 0x66, sizeof(error));
1913 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error))
1914 return port_flow_complain(&error);
1915 printf("Configure flows on port %u: "
1916 "number of queues %d with %d elements\n",
1917 port_id, nb_queue, queue_attr->size);
1921 /** Create indirect action */
1923 port_action_handle_create(portid_t port_id, uint32_t id,
1924 const struct rte_flow_indir_action_conf *conf,
1925 const struct rte_flow_action *action)
1927 struct port_indirect_action *pia;
1929 struct rte_flow_error error;
1931 ret = action_alloc(port_id, id, &pia);
1934 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
1935 struct rte_flow_action_age *age =
1936 (struct rte_flow_action_age *)(uintptr_t)(action->conf);
1938 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
1939 age->context = &pia->age_type;
1940 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) {
1941 struct rte_flow_action_conntrack *ct =
1942 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf);
1944 memcpy(ct, &conntrack_context, sizeof(*ct));
1946 /* Poisoning to make sure PMDs update it in case of error. */
1947 memset(&error, 0x22, sizeof(error));
1948 pia->handle = rte_flow_action_handle_create(port_id, conf, action,
1951 uint32_t destroy_id = pia->id;
1952 port_action_handle_destroy(port_id, 1, &destroy_id);
1953 return port_flow_complain(&error);
1955 pia->type = action->type;
1956 printf("Indirect action #%u created\n", pia->id);
1960 /** Destroy indirect action */
1962 port_action_handle_destroy(portid_t port_id,
1964 const uint32_t *actions)
1966 struct rte_port *port;
1967 struct port_indirect_action **tmp;
1971 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1972 port_id == (portid_t)RTE_PORT_ALL)
1974 port = &ports[port_id];
1975 tmp = &port->actions_list;
1979 for (i = 0; i != n; ++i) {
1980 struct rte_flow_error error;
1981 struct port_indirect_action *pia = *tmp;
1983 if (actions[i] != pia->id)
1986 * Poisoning to make sure PMDs update it in case
1989 memset(&error, 0x33, sizeof(error));
1991 if (pia->handle && rte_flow_action_handle_destroy(
1992 port_id, pia->handle, &error)) {
1993 ret = port_flow_complain(&error);
1997 printf("Indirect action #%u destroyed\n", pia->id);
2002 tmp = &(*tmp)->next;
2009 port_action_handle_flush(portid_t port_id)
2011 struct rte_port *port;
2012 struct port_indirect_action **tmp;
2015 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2016 port_id == (portid_t)RTE_PORT_ALL)
2018 port = &ports[port_id];
2019 tmp = &port->actions_list;
2020 while (*tmp != NULL) {
2021 struct rte_flow_error error;
2022 struct port_indirect_action *pia = *tmp;
2024 /* Poisoning to make sure PMDs update it in case of error. */
2025 memset(&error, 0x44, sizeof(error));
2026 if (pia->handle != NULL &&
2027 rte_flow_action_handle_destroy
2028 (port_id, pia->handle, &error) != 0) {
2029 printf("Indirect action #%u not destroyed\n", pia->id);
2030 ret = port_flow_complain(&error);
2040 /** Get indirect action by port + id */
2041 struct rte_flow_action_handle *
2042 port_action_handle_get_by_id(portid_t port_id, uint32_t id)
2045 struct port_indirect_action *pia = action_get_by_id(port_id, id);
2047 return (pia) ? pia->handle : NULL;
2050 /** Update indirect action */
2052 port_action_handle_update(portid_t port_id, uint32_t id,
2053 const struct rte_flow_action *action)
2055 struct rte_flow_error error;
2056 struct rte_flow_action_handle *action_handle;
2057 struct port_indirect_action *pia;
2060 action_handle = port_action_handle_get_by_id(port_id, id);
2063 pia = action_get_by_id(port_id, id);
2066 switch (pia->type) {
2067 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2068 update = action->conf;
2074 if (rte_flow_action_handle_update(port_id, action_handle, update,
2076 return port_flow_complain(&error);
2078 printf("Indirect action #%u updated\n", id);
2083 port_action_handle_query(portid_t port_id, uint32_t id)
2085 struct rte_flow_error error;
2086 struct port_indirect_action *pia;
2088 struct rte_flow_query_count count;
2089 struct rte_flow_query_age age;
2090 struct rte_flow_action_conntrack ct;
2093 pia = action_get_by_id(port_id, id);
2096 switch (pia->type) {
2097 case RTE_FLOW_ACTION_TYPE_AGE:
2098 case RTE_FLOW_ACTION_TYPE_COUNT:
2102 "Indirect action %u (type: %d) on port %u doesn't support query\n",
2103 id, pia->type, port_id);
2106 /* Poisoning to make sure PMDs update it in case of error. */
2107 memset(&error, 0x55, sizeof(error));
2108 memset(&query, 0, sizeof(query));
2109 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error))
2110 return port_flow_complain(&error);
2111 switch (pia->type) {
2112 case RTE_FLOW_ACTION_TYPE_AGE:
2113 printf("Indirect AGE action:\n"
2115 " sec_since_last_hit_valid: %u\n"
2116 " sec_since_last_hit: %" PRIu32 "\n",
2118 query.age.sec_since_last_hit_valid,
2119 query.age.sec_since_last_hit);
2121 case RTE_FLOW_ACTION_TYPE_COUNT:
2122 printf("Indirect COUNT action:\n"
2125 " hits: %" PRIu64 "\n"
2126 " bytes: %" PRIu64 "\n",
2127 query.count.hits_set,
2128 query.count.bytes_set,
2132 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2133 printf("Conntrack Context:\n"
2134 " Peer: %u, Flow dir: %s, Enable: %u\n"
2135 " Live: %u, SACK: %u, CACK: %u\n"
2136 " Packet dir: %s, Liberal: %u, State: %u\n"
2137 " Factor: %u, Retrans: %u, TCP flags: %u\n"
2138 " Last Seq: %u, Last ACK: %u\n"
2139 " Last Win: %u, Last End: %u\n",
2141 query.ct.is_original_dir ? "Original" : "Reply",
2142 query.ct.enable, query.ct.live_connection,
2143 query.ct.selective_ack, query.ct.challenge_ack_passed,
2144 query.ct.last_direction ? "Original" : "Reply",
2145 query.ct.liberal_mode, query.ct.state,
2146 query.ct.max_ack_window, query.ct.retransmission_limit,
2147 query.ct.last_index, query.ct.last_seq,
2148 query.ct.last_ack, query.ct.last_window,
2150 printf(" Original Dir:\n"
2151 " scale: %u, fin: %u, ack seen: %u\n"
2152 " unacked data: %u\n Sent end: %u,"
2153 " Reply end: %u, Max win: %u, Max ACK: %u\n",
2154 query.ct.original_dir.scale,
2155 query.ct.original_dir.close_initiated,
2156 query.ct.original_dir.last_ack_seen,
2157 query.ct.original_dir.data_unacked,
2158 query.ct.original_dir.sent_end,
2159 query.ct.original_dir.reply_end,
2160 query.ct.original_dir.max_win,
2161 query.ct.original_dir.max_ack);
2162 printf(" Reply Dir:\n"
2163 " scale: %u, fin: %u, ack seen: %u\n"
2164 " unacked data: %u\n Sent end: %u,"
2165 " Reply end: %u, Max win: %u, Max ACK: %u\n",
2166 query.ct.reply_dir.scale,
2167 query.ct.reply_dir.close_initiated,
2168 query.ct.reply_dir.last_ack_seen,
2169 query.ct.reply_dir.data_unacked,
2170 query.ct.reply_dir.sent_end,
2171 query.ct.reply_dir.reply_end,
2172 query.ct.reply_dir.max_win,
2173 query.ct.reply_dir.max_ack);
2177 "Indirect action %u (type: %d) on port %u doesn't support query\n",
2178 id, pia->type, port_id);
2184 static struct port_flow_tunnel *
2185 port_flow_tunnel_offload_cmd_prep(portid_t port_id,
2186 const struct rte_flow_item *pattern,
2187 const struct rte_flow_action *actions,
2188 const struct tunnel_ops *tunnel_ops)
2191 struct rte_port *port;
2192 struct port_flow_tunnel *pft;
2193 struct rte_flow_error error;
2195 port = &ports[port_id];
2196 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id);
2198 fprintf(stderr, "failed to locate port flow tunnel #%u\n",
2202 if (tunnel_ops->actions) {
2203 uint32_t num_actions;
2204 const struct rte_flow_action *aptr;
2206 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel,
2208 &pft->num_pmd_actions,
2211 port_flow_complain(&error);
2214 for (aptr = actions, num_actions = 1;
2215 aptr->type != RTE_FLOW_ACTION_TYPE_END;
2216 aptr++, num_actions++);
2217 pft->actions = malloc(
2218 (num_actions + pft->num_pmd_actions) *
2219 sizeof(actions[0]));
2220 if (!pft->actions) {
2221 rte_flow_tunnel_action_decap_release(
2222 port_id, pft->actions,
2223 pft->num_pmd_actions, &error);
2226 rte_memcpy(pft->actions, pft->pmd_actions,
2227 pft->num_pmd_actions * sizeof(actions[0]));
2228 rte_memcpy(pft->actions + pft->num_pmd_actions, actions,
2229 num_actions * sizeof(actions[0]));
2231 if (tunnel_ops->items) {
2233 const struct rte_flow_item *iptr;
2235 ret = rte_flow_tunnel_match(port_id, &pft->tunnel,
2237 &pft->num_pmd_items,
2240 port_flow_complain(&error);
2243 for (iptr = pattern, num_items = 1;
2244 iptr->type != RTE_FLOW_ITEM_TYPE_END;
2245 iptr++, num_items++);
2246 pft->items = malloc((num_items + pft->num_pmd_items) *
2247 sizeof(pattern[0]));
2249 rte_flow_tunnel_item_release(
2250 port_id, pft->pmd_items,
2251 pft->num_pmd_items, &error);
2254 rte_memcpy(pft->items, pft->pmd_items,
2255 pft->num_pmd_items * sizeof(pattern[0]));
2256 rte_memcpy(pft->items + pft->num_pmd_items, pattern,
2257 num_items * sizeof(pattern[0]));
2264 port_flow_tunnel_offload_cmd_release(portid_t port_id,
2265 const struct tunnel_ops *tunnel_ops,
2266 struct port_flow_tunnel *pft)
2268 struct rte_flow_error error;
2270 if (tunnel_ops->actions) {
2272 rte_flow_tunnel_action_decap_release(
2273 port_id, pft->pmd_actions,
2274 pft->num_pmd_actions, &error);
2275 pft->actions = NULL;
2276 pft->pmd_actions = NULL;
2278 if (tunnel_ops->items) {
2280 rte_flow_tunnel_item_release(port_id, pft->pmd_items,
2284 pft->pmd_items = NULL;
2288 /** Add port meter policy */
2290 port_meter_policy_add(portid_t port_id, uint32_t policy_id,
2291 const struct rte_flow_action *actions)
2293 struct rte_mtr_error error;
2294 const struct rte_flow_action *act = actions;
2295 const struct rte_flow_action *start;
2296 struct rte_mtr_meter_policy_params policy;
2297 uint32_t i = 0, act_n;
2300 for (i = 0; i < RTE_COLORS; i++) {
2301 for (act_n = 0, start = act;
2302 act->type != RTE_FLOW_ACTION_TYPE_END; act++)
2304 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END)
2305 policy.actions[i] = start;
2307 policy.actions[i] = NULL;
2310 ret = rte_mtr_meter_policy_add(port_id,
2314 print_mtr_err_msg(&error);
2318 /** Validate flow rule. */
2320 port_flow_validate(portid_t port_id,
2321 const struct rte_flow_attr *attr,
2322 const struct rte_flow_item *pattern,
2323 const struct rte_flow_action *actions,
2324 const struct tunnel_ops *tunnel_ops)
2326 struct rte_flow_error error;
2327 struct port_flow_tunnel *pft = NULL;
2330 /* Poisoning to make sure PMDs update it in case of error. */
2331 memset(&error, 0x11, sizeof(error));
2332 if (tunnel_ops->enabled) {
2333 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2334 actions, tunnel_ops);
2338 pattern = pft->items;
2340 actions = pft->actions;
2342 ret = rte_flow_validate(port_id, attr, pattern, actions, &error);
2343 if (tunnel_ops->enabled)
2344 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2346 return port_flow_complain(&error);
2347 printf("Flow rule validated\n");
2351 /** Return age action structure if exists, otherwise NULL. */
2352 static struct rte_flow_action_age *
2353 age_action_get(const struct rte_flow_action *actions)
2355 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2356 switch (actions->type) {
2357 case RTE_FLOW_ACTION_TYPE_AGE:
2358 return (struct rte_flow_action_age *)
2359 (uintptr_t)actions->conf;
2367 /** Create pattern template */
2369 port_flow_pattern_template_create(portid_t port_id, uint32_t id,
2370 const struct rte_flow_pattern_template_attr *attr,
2371 const struct rte_flow_item *pattern)
2373 struct rte_port *port;
2374 struct port_template *pit;
2376 struct rte_flow_error error;
2378 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2379 port_id == (portid_t)RTE_PORT_ALL)
2381 port = &ports[port_id];
2382 ret = template_alloc(id, &pit, &port->pattern_templ_list);
2385 /* Poisoning to make sure PMDs update it in case of error. */
2386 memset(&error, 0x22, sizeof(error));
2387 pit->template.pattern_template = rte_flow_pattern_template_create(port_id,
2388 attr, pattern, &error);
2389 if (!pit->template.pattern_template) {
2390 uint32_t destroy_id = pit->id;
2391 port_flow_pattern_template_destroy(port_id, 1, &destroy_id);
2392 return port_flow_complain(&error);
2394 printf("Pattern template #%u created\n", pit->id);
2398 /** Destroy pattern template */
2400 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n,
2401 const uint32_t *template)
2403 struct rte_port *port;
2404 struct port_template **tmp;
2408 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2409 port_id == (portid_t)RTE_PORT_ALL)
2411 port = &ports[port_id];
2412 tmp = &port->pattern_templ_list;
2416 for (i = 0; i != n; ++i) {
2417 struct rte_flow_error error;
2418 struct port_template *pit = *tmp;
2420 if (template[i] != pit->id)
2423 * Poisoning to make sure PMDs update it in case
2426 memset(&error, 0x33, sizeof(error));
2428 if (pit->template.pattern_template &&
2429 rte_flow_pattern_template_destroy(port_id,
2430 pit->template.pattern_template,
2432 ret = port_flow_complain(&error);
2436 printf("Pattern template #%u destroyed\n", pit->id);
2441 tmp = &(*tmp)->next;
2447 /** Create actions template */
2449 port_flow_actions_template_create(portid_t port_id, uint32_t id,
2450 const struct rte_flow_actions_template_attr *attr,
2451 const struct rte_flow_action *actions,
2452 const struct rte_flow_action *masks)
2454 struct rte_port *port;
2455 struct port_template *pat;
2457 struct rte_flow_error error;
2459 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2460 port_id == (portid_t)RTE_PORT_ALL)
2462 port = &ports[port_id];
2463 ret = template_alloc(id, &pat, &port->actions_templ_list);
2466 /* Poisoning to make sure PMDs update it in case of error. */
2467 memset(&error, 0x22, sizeof(error));
2468 pat->template.actions_template = rte_flow_actions_template_create(port_id,
2469 attr, actions, masks, &error);
2470 if (!pat->template.actions_template) {
2471 uint32_t destroy_id = pat->id;
2472 port_flow_actions_template_destroy(port_id, 1, &destroy_id);
2473 return port_flow_complain(&error);
2475 printf("Actions template #%u created\n", pat->id);
2479 /** Destroy actions template */
2481 port_flow_actions_template_destroy(portid_t port_id, uint32_t n,
2482 const uint32_t *template)
2484 struct rte_port *port;
2485 struct port_template **tmp;
2489 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2490 port_id == (portid_t)RTE_PORT_ALL)
2492 port = &ports[port_id];
2493 tmp = &port->actions_templ_list;
2497 for (i = 0; i != n; ++i) {
2498 struct rte_flow_error error;
2499 struct port_template *pat = *tmp;
2501 if (template[i] != pat->id)
2504 * Poisoning to make sure PMDs update it in case
2507 memset(&error, 0x33, sizeof(error));
2509 if (pat->template.actions_template &&
2510 rte_flow_actions_template_destroy(port_id,
2511 pat->template.actions_template, &error)) {
2512 ret = port_flow_complain(&error);
2516 printf("Actions template #%u destroyed\n", pat->id);
2521 tmp = &(*tmp)->next;
2529 port_flow_template_table_create(portid_t port_id, uint32_t id,
2530 const struct rte_flow_template_table_attr *table_attr,
2531 uint32_t nb_pattern_templates, uint32_t *pattern_templates,
2532 uint32_t nb_actions_templates, uint32_t *actions_templates)
2534 struct rte_port *port;
2535 struct port_table *pt;
2536 struct port_template *temp = NULL;
2539 struct rte_flow_error error;
2540 struct rte_flow_pattern_template
2541 *flow_pattern_templates[nb_pattern_templates];
2542 struct rte_flow_actions_template
2543 *flow_actions_templates[nb_actions_templates];
2545 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2546 port_id == (portid_t)RTE_PORT_ALL)
2548 port = &ports[port_id];
2549 for (i = 0; i < nb_pattern_templates; ++i) {
2551 temp = port->pattern_templ_list;
2553 if (pattern_templates[i] == temp->id) {
2554 flow_pattern_templates[i] =
2555 temp->template.pattern_template;
2562 printf("Pattern template #%u is invalid\n",
2563 pattern_templates[i]);
2567 for (i = 0; i < nb_actions_templates; ++i) {
2569 temp = port->actions_templ_list;
2571 if (actions_templates[i] == temp->id) {
2572 flow_actions_templates[i] =
2573 temp->template.actions_template;
2580 printf("Actions template #%u is invalid\n",
2581 actions_templates[i]);
2585 ret = table_alloc(id, &pt, &port->table_list);
2588 /* Poisoning to make sure PMDs update it in case of error. */
2589 memset(&error, 0x22, sizeof(error));
2590 pt->table = rte_flow_template_table_create(port_id, table_attr,
2591 flow_pattern_templates, nb_pattern_templates,
2592 flow_actions_templates, nb_actions_templates,
2596 uint32_t destroy_id = pt->id;
2597 port_flow_template_table_destroy(port_id, 1, &destroy_id);
2598 return port_flow_complain(&error);
2600 pt->nb_pattern_templates = nb_pattern_templates;
2601 pt->nb_actions_templates = nb_actions_templates;
2602 printf("Template table #%u created\n", pt->id);
2606 /** Destroy table */
2608 port_flow_template_table_destroy(portid_t port_id,
2609 uint32_t n, const uint32_t *table)
2611 struct rte_port *port;
2612 struct port_table **tmp;
2616 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2617 port_id == (portid_t)RTE_PORT_ALL)
2619 port = &ports[port_id];
2620 tmp = &port->table_list;
2624 for (i = 0; i != n; ++i) {
2625 struct rte_flow_error error;
2626 struct port_table *pt = *tmp;
2628 if (table[i] != pt->id)
2631 * Poisoning to make sure PMDs update it in case
2634 memset(&error, 0x33, sizeof(error));
2637 rte_flow_template_table_destroy(port_id,
2640 ret = port_flow_complain(&error);
2644 printf("Template table #%u destroyed\n", pt->id);
2649 tmp = &(*tmp)->next;
2655 /** Enqueue create flow rule operation. */
2657 port_queue_flow_create(portid_t port_id, queueid_t queue_id,
2658 bool postpone, uint32_t table_id,
2659 uint32_t pattern_idx, uint32_t actions_idx,
2660 const struct rte_flow_item *pattern,
2661 const struct rte_flow_action *actions)
2663 struct rte_flow_op_attr op_attr = { .postpone = postpone };
2664 struct rte_flow *flow;
2665 struct rte_port *port;
2666 struct port_flow *pf;
2667 struct port_table *pt;
2670 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL };
2671 struct rte_flow_action_age *age = age_action_get(actions);
2673 port = &ports[port_id];
2674 if (port->flow_list) {
2675 if (port->flow_list->id == UINT32_MAX) {
2676 printf("Highest rule ID is already assigned,"
2677 " delete it first");
2680 id = port->flow_list->id + 1;
2683 if (queue_id >= port->queue_nb) {
2684 printf("Queue #%u is invalid\n", queue_id);
2689 pt = port->table_list;
2691 if (table_id == pt->id) {
2698 printf("Table #%u is invalid\n", table_id);
2702 if (pattern_idx >= pt->nb_pattern_templates) {
2703 printf("Pattern template index #%u is invalid,"
2704 " %u templates present in the table\n",
2705 pattern_idx, pt->nb_pattern_templates);
2708 if (actions_idx >= pt->nb_actions_templates) {
2709 printf("Actions template index #%u is invalid,"
2710 " %u templates present in the table\n",
2711 actions_idx, pt->nb_actions_templates);
2715 pf = port_flow_new(NULL, pattern, actions, &error);
2717 return port_flow_complain(&error);
2719 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
2720 age->context = &pf->age_type;
2722 /* Poisoning to make sure PMDs update it in case of error. */
2723 memset(&error, 0x11, sizeof(error));
2724 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table,
2725 pattern, pattern_idx, actions, actions_idx, NULL, &error);
2727 uint32_t flow_id = pf->id;
2728 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id);
2729 return port_flow_complain(&error);
2732 pf->next = port->flow_list;
2735 port->flow_list = pf;
2736 printf("Flow rule #%u creation enqueued\n", pf->id);
2740 /** Enqueue number of destroy flow rules operations. */
2742 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id,
2743 bool postpone, uint32_t n, const uint32_t *rule)
2745 struct rte_flow_op_attr op_attr = { .postpone = postpone };
2746 struct rte_port *port;
2747 struct port_flow **tmp;
2751 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2752 port_id == (portid_t)RTE_PORT_ALL)
2754 port = &ports[port_id];
2756 if (queue_id >= port->queue_nb) {
2757 printf("Queue #%u is invalid\n", queue_id);
2761 tmp = &port->flow_list;
2765 for (i = 0; i != n; ++i) {
2766 struct rte_flow_error error;
2767 struct port_flow *pf = *tmp;
2769 if (rule[i] != pf->id)
2772 * Poisoning to make sure PMD
2773 * update it in case of error.
2775 memset(&error, 0x33, sizeof(error));
2776 if (rte_flow_async_destroy(port_id, queue_id, &op_attr,
2777 pf->flow, NULL, &error)) {
2778 ret = port_flow_complain(&error);
2781 printf("Flow rule #%u destruction enqueued\n", pf->id);
2787 tmp = &(*tmp)->next;
2793 /** Enqueue indirect action create operation. */
2795 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id,
2796 bool postpone, uint32_t id,
2797 const struct rte_flow_indir_action_conf *conf,
2798 const struct rte_flow_action *action)
2800 const struct rte_flow_op_attr attr = { .postpone = postpone};
2801 struct rte_port *port;
2802 struct port_indirect_action *pia;
2804 struct rte_flow_error error;
2806 ret = action_alloc(port_id, id, &pia);
2810 port = &ports[port_id];
2811 if (queue_id >= port->queue_nb) {
2812 printf("Queue #%u is invalid\n", queue_id);
2816 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
2817 struct rte_flow_action_age *age =
2818 (struct rte_flow_action_age *)(uintptr_t)(action->conf);
2820 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
2821 age->context = &pia->age_type;
2823 /* Poisoning to make sure PMDs update it in case of error. */
2824 memset(&error, 0x88, sizeof(error));
2825 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id,
2826 &attr, conf, action, NULL, &error);
2828 uint32_t destroy_id = pia->id;
2829 port_queue_action_handle_destroy(port_id, queue_id,
2830 postpone, 1, &destroy_id);
2831 return port_flow_complain(&error);
2833 pia->type = action->type;
2834 printf("Indirect action #%u creation queued\n", pia->id);
2838 /** Enqueue indirect action destroy operation. */
2840 port_queue_action_handle_destroy(portid_t port_id,
2841 uint32_t queue_id, bool postpone,
2842 uint32_t n, const uint32_t *actions)
2844 const struct rte_flow_op_attr attr = { .postpone = postpone};
2845 struct rte_port *port;
2846 struct port_indirect_action **tmp;
2850 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2851 port_id == (portid_t)RTE_PORT_ALL)
2853 port = &ports[port_id];
2855 if (queue_id >= port->queue_nb) {
2856 printf("Queue #%u is invalid\n", queue_id);
2860 tmp = &port->actions_list;
2864 for (i = 0; i != n; ++i) {
2865 struct rte_flow_error error;
2866 struct port_indirect_action *pia = *tmp;
2868 if (actions[i] != pia->id)
2871 * Poisoning to make sure PMDs update it in case
2874 memset(&error, 0x99, sizeof(error));
2877 rte_flow_async_action_handle_destroy(port_id,
2878 queue_id, &attr, pia->handle, NULL, &error)) {
2879 ret = port_flow_complain(&error);
2883 printf("Indirect action #%u destruction queued\n",
2889 tmp = &(*tmp)->next;
2895 /** Enqueue indirect action update operation. */
2897 port_queue_action_handle_update(portid_t port_id,
2898 uint32_t queue_id, bool postpone, uint32_t id,
2899 const struct rte_flow_action *action)
2901 const struct rte_flow_op_attr attr = { .postpone = postpone};
2902 struct rte_port *port;
2903 struct rte_flow_error error;
2904 struct rte_flow_action_handle *action_handle;
2906 action_handle = port_action_handle_get_by_id(port_id, id);
2910 port = &ports[port_id];
2911 if (queue_id >= port->queue_nb) {
2912 printf("Queue #%u is invalid\n", queue_id);
2916 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr,
2917 action_handle, action, NULL, &error)) {
2918 return port_flow_complain(&error);
2920 printf("Indirect action #%u update queued\n", id);
2924 /** Push all the queue operations in the queue to the NIC. */
2926 port_queue_flow_push(portid_t port_id, queueid_t queue_id)
2928 struct rte_port *port;
2929 struct rte_flow_error error;
2932 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2933 port_id == (portid_t)RTE_PORT_ALL)
2935 port = &ports[port_id];
2937 if (queue_id >= port->queue_nb) {
2938 printf("Queue #%u is invalid\n", queue_id);
2942 memset(&error, 0x55, sizeof(error));
2943 ret = rte_flow_push(port_id, queue_id, &error);
2945 printf("Failed to push operations in the queue\n");
2948 printf("Queue #%u operations pushed\n", queue_id);
2952 /** Pull queue operation results from the queue. */
2954 port_queue_flow_pull(portid_t port_id, queueid_t queue_id)
2956 struct rte_port *port;
2957 struct rte_flow_op_result *res;
2958 struct rte_flow_error error;
2963 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2964 port_id == (portid_t)RTE_PORT_ALL)
2966 port = &ports[port_id];
2968 if (queue_id >= port->queue_nb) {
2969 printf("Queue #%u is invalid\n", queue_id);
2973 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result));
2975 printf("Failed to allocate memory for pulled results\n");
2979 memset(&error, 0x66, sizeof(error));
2980 ret = rte_flow_pull(port_id, queue_id, res,
2981 port->queue_sz, &error);
2983 printf("Failed to pull a operation results\n");
2988 for (i = 0; i < ret; i++) {
2989 if (res[i].status == RTE_FLOW_OP_SUCCESS)
2992 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n",
2993 queue_id, ret, ret - success, success);
2998 /** Create flow rule. */
3000 port_flow_create(portid_t port_id,
3001 const struct rte_flow_attr *attr,
3002 const struct rte_flow_item *pattern,
3003 const struct rte_flow_action *actions,
3004 const struct tunnel_ops *tunnel_ops)
3006 struct rte_flow *flow;
3007 struct rte_port *port;
3008 struct port_flow *pf;
3010 struct rte_flow_error error;
3011 struct port_flow_tunnel *pft = NULL;
3012 struct rte_flow_action_age *age = age_action_get(actions);
3014 port = &ports[port_id];
3015 if (port->flow_list) {
3016 if (port->flow_list->id == UINT32_MAX) {
3018 "Highest rule ID is already assigned, delete it first");
3021 id = port->flow_list->id + 1;
3023 if (tunnel_ops->enabled) {
3024 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
3025 actions, tunnel_ops);
3029 pattern = pft->items;
3031 actions = pft->actions;
3033 pf = port_flow_new(attr, pattern, actions, &error);
3035 return port_flow_complain(&error);
3037 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
3038 age->context = &pf->age_type;
3040 /* Poisoning to make sure PMDs update it in case of error. */
3041 memset(&error, 0x22, sizeof(error));
3042 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
3044 if (tunnel_ops->enabled)
3045 port_flow_tunnel_offload_cmd_release(port_id,
3048 return port_flow_complain(&error);
3050 pf->next = port->flow_list;
3053 port->flow_list = pf;
3054 if (tunnel_ops->enabled)
3055 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
3056 printf("Flow rule #%u created\n", pf->id);
3060 /** Destroy a number of flow rules. */
3062 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
3064 struct rte_port *port;
3065 struct port_flow **tmp;
3069 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3070 port_id == (portid_t)RTE_PORT_ALL)
3072 port = &ports[port_id];
3073 tmp = &port->flow_list;
3077 for (i = 0; i != n; ++i) {
3078 struct rte_flow_error error;
3079 struct port_flow *pf = *tmp;
3081 if (rule[i] != pf->id)
3084 * Poisoning to make sure PMDs update it in case
3087 memset(&error, 0x33, sizeof(error));
3088 if (rte_flow_destroy(port_id, pf->flow, &error)) {
3089 ret = port_flow_complain(&error);
3092 printf("Flow rule #%u destroyed\n", pf->id);
3098 tmp = &(*tmp)->next;
3104 /** Remove all flow rules. */
3106 port_flow_flush(portid_t port_id)
3108 struct rte_flow_error error;
3109 struct rte_port *port;
3112 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3113 port_id == (portid_t)RTE_PORT_ALL)
3116 port = &ports[port_id];
3118 if (port->flow_list == NULL)
3121 /* Poisoning to make sure PMDs update it in case of error. */
3122 memset(&error, 0x44, sizeof(error));
3123 if (rte_flow_flush(port_id, &error)) {
3124 port_flow_complain(&error);
3127 while (port->flow_list) {
3128 struct port_flow *pf = port->flow_list->next;
3130 free(port->flow_list);
3131 port->flow_list = pf;
3136 /** Dump flow rules. */
3138 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id,
3139 const char *file_name)
3142 FILE *file = stdout;
3143 struct rte_flow_error error;
3144 struct rte_port *port;
3145 struct port_flow *pflow;
3146 struct rte_flow *tmpFlow = NULL;
3149 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3150 port_id == (portid_t)RTE_PORT_ALL)
3154 port = &ports[port_id];
3155 pflow = port->flow_list;
3157 if (rule_id != pflow->id) {
3158 pflow = pflow->next;
3160 tmpFlow = pflow->flow;
3166 if (found == false) {
3167 fprintf(stderr, "Failed to dump to flow %d\n", rule_id);
3172 if (file_name && strlen(file_name)) {
3173 file = fopen(file_name, "w");
3175 fprintf(stderr, "Failed to create file %s: %s\n",
3176 file_name, strerror(errno));
3182 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error);
3184 ret = rte_flow_dev_dump(port_id, NULL, file, &error);
3186 port_flow_complain(&error);
3187 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret));
3189 printf("Flow dump finished\n");
3190 if (file_name && strlen(file_name))
3195 /** Query a flow rule. */
3197 port_flow_query(portid_t port_id, uint32_t rule,
3198 const struct rte_flow_action *action)
3200 struct rte_flow_error error;
3201 struct rte_port *port;
3202 struct port_flow *pf;
3205 struct rte_flow_query_count count;
3206 struct rte_flow_action_rss rss_conf;
3207 struct rte_flow_query_age age;
3211 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3212 port_id == (portid_t)RTE_PORT_ALL)
3214 port = &ports[port_id];
3215 for (pf = port->flow_list; pf; pf = pf->next)
3219 fprintf(stderr, "Flow rule #%u not found\n", rule);
3222 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
3223 &name, sizeof(name),
3224 (void *)(uintptr_t)action->type, &error);
3226 return port_flow_complain(&error);
3227 switch (action->type) {
3228 case RTE_FLOW_ACTION_TYPE_COUNT:
3229 case RTE_FLOW_ACTION_TYPE_RSS:
3230 case RTE_FLOW_ACTION_TYPE_AGE:
3233 fprintf(stderr, "Cannot query action type %d (%s)\n",
3234 action->type, name);
3237 /* Poisoning to make sure PMDs update it in case of error. */
3238 memset(&error, 0x55, sizeof(error));
3239 memset(&query, 0, sizeof(query));
3240 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
3241 return port_flow_complain(&error);
3242 switch (action->type) {
3243 case RTE_FLOW_ACTION_TYPE_COUNT:
3247 " hits: %" PRIu64 "\n"
3248 " bytes: %" PRIu64 "\n",
3250 query.count.hits_set,
3251 query.count.bytes_set,
3255 case RTE_FLOW_ACTION_TYPE_RSS:
3256 rss_config_display(&query.rss_conf);
3258 case RTE_FLOW_ACTION_TYPE_AGE:
3261 " sec_since_last_hit_valid: %u\n"
3262 " sec_since_last_hit: %" PRIu32 "\n",
3265 query.age.sec_since_last_hit_valid,
3266 query.age.sec_since_last_hit);
3270 "Cannot display result for action type %d (%s)\n",
3271 action->type, name);
3277 /** List simply and destroy all aged flows. */
3279 port_flow_aged(portid_t port_id, uint8_t destroy)
3282 int nb_context, total = 0, idx;
3283 struct rte_flow_error error;
3284 enum age_action_context_type *type;
3286 struct port_flow *pf;
3287 struct port_indirect_action *pia;
3290 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3291 port_id == (portid_t)RTE_PORT_ALL)
3293 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
3294 printf("Port %u total aged flows: %d\n", port_id, total);
3296 port_flow_complain(&error);
3301 contexts = malloc(sizeof(void *) * total);
3302 if (contexts == NULL) {
3303 fprintf(stderr, "Cannot allocate contexts for aged flow\n");
3306 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
3307 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
3308 if (nb_context != total) {
3310 "Port:%d get aged flows count(%d) != total(%d)\n",
3311 port_id, nb_context, total);
3316 for (idx = 0; idx < nb_context; idx++) {
3317 if (!contexts[idx]) {
3318 fprintf(stderr, "Error: get Null context in port %u\n",
3322 type = (enum age_action_context_type *)contexts[idx];
3324 case ACTION_AGE_CONTEXT_TYPE_FLOW:
3325 ctx.pf = container_of(type, struct port_flow, age_type);
3326 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32
3330 ctx.pf->rule.attr->group,
3331 ctx.pf->rule.attr->priority,
3332 ctx.pf->rule.attr->ingress ? 'i' : '-',
3333 ctx.pf->rule.attr->egress ? 'e' : '-',
3334 ctx.pf->rule.attr->transfer ? 't' : '-');
3335 if (destroy && !port_flow_destroy(port_id, 1,
3339 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION:
3340 ctx.pia = container_of(type,
3341 struct port_indirect_action, age_type);
3342 printf("%-20s\t%" PRIu32 "\n", "Indirect action",
3346 fprintf(stderr, "Error: invalid context type %u\n",
3351 printf("\n%d flows destroyed\n", total);
3355 /** List flow rules. */
3357 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
3359 struct rte_port *port;
3360 struct port_flow *pf;
3361 struct port_flow *list = NULL;
3364 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3365 port_id == (portid_t)RTE_PORT_ALL)
3367 port = &ports[port_id];
3368 if (!port->flow_list)
3370 /* Sort flows by group, priority and ID. */
3371 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
3372 struct port_flow **tmp;
3373 const struct rte_flow_attr *curr = pf->rule.attr;
3376 /* Filter out unwanted groups. */
3377 for (i = 0; i != n; ++i)
3378 if (curr->group == group[i])
3383 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
3384 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
3386 if (curr->group > comp->group ||
3387 (curr->group == comp->group &&
3388 curr->priority > comp->priority) ||
3389 (curr->group == comp->group &&
3390 curr->priority == comp->priority &&
3391 pf->id > (*tmp)->id))
3398 printf("ID\tGroup\tPrio\tAttr\tRule\n");
3399 for (pf = list; pf != NULL; pf = pf->tmp) {
3400 const struct rte_flow_item *item = pf->rule.pattern;
3401 const struct rte_flow_action *action = pf->rule.actions;
3404 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
3406 pf->rule.attr->group,
3407 pf->rule.attr->priority,
3408 pf->rule.attr->ingress ? 'i' : '-',
3409 pf->rule.attr->egress ? 'e' : '-',
3410 pf->rule.attr->transfer ? 't' : '-');
3411 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
3412 if ((uint32_t)item->type > INT_MAX)
3413 name = "PMD_INTERNAL";
3414 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
3415 &name, sizeof(name),
3416 (void *)(uintptr_t)item->type,
3419 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
3420 printf("%s ", name);
3424 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
3425 if ((uint32_t)action->type > INT_MAX)
3426 name = "PMD_INTERNAL";
3427 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
3428 &name, sizeof(name),
3429 (void *)(uintptr_t)action->type,
3432 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
3433 printf(" %s", name);
3440 /** Restrict ingress traffic to the defined flow rules. */
3442 port_flow_isolate(portid_t port_id, int set)
3444 struct rte_flow_error error;
3446 /* Poisoning to make sure PMDs update it in case of error. */
3447 memset(&error, 0x66, sizeof(error));
3448 if (rte_flow_isolate(port_id, set, &error))
3449 return port_flow_complain(&error);
3450 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
3452 set ? "now restricted" : "not restricted anymore");
3457 * RX/TX ring descriptors display functions.
3460 rx_queue_id_is_invalid(queueid_t rxq_id)
3462 if (rxq_id < nb_rxq)
3464 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n",
3470 tx_queue_id_is_invalid(queueid_t txq_id)
3472 if (txq_id < nb_txq)
3474 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n",
3480 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
3482 struct rte_port *port = &ports[port_id];
3483 struct rte_eth_rxq_info rx_qinfo;
3486 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
3488 *ring_size = rx_qinfo.nb_desc;
3492 if (ret != -ENOTSUP)
3495 * If the rte_eth_rx_queue_info_get is not support for this PMD,
3496 * ring_size stored in testpmd will be used for validity verification.
3497 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
3498 * being 0, it will use a default value provided by PMDs to setup this
3499 * rxq. If the default value is 0, it will use the
3500 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
3502 if (port->nb_rx_desc[rxq_id])
3503 *ring_size = port->nb_rx_desc[rxq_id];
3504 else if (port->dev_info.default_rxportconf.ring_size)
3505 *ring_size = port->dev_info.default_rxportconf.ring_size;
3507 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
3512 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
3514 struct rte_port *port = &ports[port_id];
3515 struct rte_eth_txq_info tx_qinfo;
3518 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
3520 *ring_size = tx_qinfo.nb_desc;
3524 if (ret != -ENOTSUP)
3527 * If the rte_eth_tx_queue_info_get is not support for this PMD,
3528 * ring_size stored in testpmd will be used for validity verification.
3529 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
3530 * being 0, it will use a default value provided by PMDs to setup this
3531 * txq. If the default value is 0, it will use the
3532 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
3534 if (port->nb_tx_desc[txq_id])
3535 *ring_size = port->nb_tx_desc[txq_id];
3536 else if (port->dev_info.default_txportconf.ring_size)
3537 *ring_size = port->dev_info.default_txportconf.ring_size;
3539 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
3544 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
3549 ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
3553 if (rxdesc_id < ring_size)
3556 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n",
3557 rxdesc_id, ring_size);
3562 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
3567 ret = get_tx_ring_size(port_id, txq_id, &ring_size);
3571 if (txdesc_id < ring_size)
3574 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n",
3575 txdesc_id, ring_size);
3579 static const struct rte_memzone *
3580 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
3582 char mz_name[RTE_MEMZONE_NAMESIZE];
3583 const struct rte_memzone *mz;
3585 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
3586 port_id, q_id, ring_name);
3587 mz = rte_memzone_lookup(mz_name);
3590 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n",
3591 ring_name, port_id, q_id, mz_name);
3595 union igb_ring_dword {
3598 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3608 struct igb_ring_desc_32_bytes {
3609 union igb_ring_dword lo_dword;
3610 union igb_ring_dword hi_dword;
3611 union igb_ring_dword resv1;
3612 union igb_ring_dword resv2;
3615 struct igb_ring_desc_16_bytes {
3616 union igb_ring_dword lo_dword;
3617 union igb_ring_dword hi_dword;
3621 ring_rxd_display_dword(union igb_ring_dword dword)
3623 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
3624 (unsigned)dword.words.hi);
3628 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
3629 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
3632 __rte_unused portid_t port_id,
3636 struct igb_ring_desc_16_bytes *ring =
3637 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
3638 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
3640 struct rte_eth_dev_info dev_info;
3642 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3646 if (strstr(dev_info.driver_name, "i40e") != NULL) {
3647 /* 32 bytes RX descriptor, i40e only */
3648 struct igb_ring_desc_32_bytes *ring =
3649 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
3650 ring[desc_id].lo_dword.dword =
3651 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
3652 ring_rxd_display_dword(ring[desc_id].lo_dword);
3653 ring[desc_id].hi_dword.dword =
3654 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
3655 ring_rxd_display_dword(ring[desc_id].hi_dword);
3656 ring[desc_id].resv1.dword =
3657 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
3658 ring_rxd_display_dword(ring[desc_id].resv1);
3659 ring[desc_id].resv2.dword =
3660 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
3661 ring_rxd_display_dword(ring[desc_id].resv2);
3666 /* 16 bytes RX descriptor */
3667 ring[desc_id].lo_dword.dword =
3668 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
3669 ring_rxd_display_dword(ring[desc_id].lo_dword);
3670 ring[desc_id].hi_dword.dword =
3671 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
3672 ring_rxd_display_dword(ring[desc_id].hi_dword);
3676 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
3678 struct igb_ring_desc_16_bytes *ring;
3679 struct igb_ring_desc_16_bytes txd;
3681 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
3682 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
3683 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
3684 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
3685 (unsigned)txd.lo_dword.words.lo,
3686 (unsigned)txd.lo_dword.words.hi,
3687 (unsigned)txd.hi_dword.words.lo,
3688 (unsigned)txd.hi_dword.words.hi);
3692 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
3694 const struct rte_memzone *rx_mz;
3696 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
3698 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
3701 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
3705 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
3707 const struct rte_memzone *tx_mz;
3709 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
3711 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
3714 ring_tx_descriptor_display(tx_mz, txd_id);
3718 fwd_lcores_config_display(void)
3722 printf("List of forwarding lcores:");
3723 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
3724 printf(" %2u", fwd_lcores_cpuids[lc_id]);
3728 rxtx_config_display(void)
3733 printf(" %s packet forwarding%s packets/burst=%d\n",
3734 cur_fwd_eng->fwd_mode_name,
3735 retry_enabled == 0 ? "" : " with retry",
3738 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
3739 printf(" packet len=%u - nb packet segments=%d\n",
3740 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
3742 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
3743 nb_fwd_lcores, nb_fwd_ports);
3745 RTE_ETH_FOREACH_DEV(pid) {
3746 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf;
3747 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf;
3748 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
3749 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
3750 struct rte_eth_rxq_info rx_qinfo;
3751 struct rte_eth_txq_info tx_qinfo;
3752 uint16_t rx_free_thresh_tmp;
3753 uint16_t tx_free_thresh_tmp;
3754 uint16_t tx_rs_thresh_tmp;
3755 uint16_t nb_rx_desc_tmp;
3756 uint16_t nb_tx_desc_tmp;
3757 uint64_t offloads_tmp;
3758 uint8_t pthresh_tmp;
3759 uint8_t hthresh_tmp;
3760 uint8_t wthresh_tmp;
3763 /* per port config */
3764 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
3765 (unsigned int)pid, nb_rxq, nb_txq);
3767 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
3768 ports[pid].dev_conf.rxmode.offloads,
3769 ports[pid].dev_conf.txmode.offloads);
3771 /* per rx queue config only for first queue to be less verbose */
3772 for (qid = 0; qid < 1; qid++) {
3773 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
3775 nb_rx_desc_tmp = nb_rx_desc[qid];
3776 rx_free_thresh_tmp =
3777 rx_conf[qid].rx_free_thresh;
3778 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
3779 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
3780 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
3781 offloads_tmp = rx_conf[qid].offloads;
3783 nb_rx_desc_tmp = rx_qinfo.nb_desc;
3784 rx_free_thresh_tmp =
3785 rx_qinfo.conf.rx_free_thresh;
3786 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
3787 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
3788 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
3789 offloads_tmp = rx_qinfo.conf.offloads;
3792 printf(" RX queue: %d\n", qid);
3793 printf(" RX desc=%d - RX free threshold=%d\n",
3794 nb_rx_desc_tmp, rx_free_thresh_tmp);
3795 printf(" RX threshold registers: pthresh=%d hthresh=%d "
3797 pthresh_tmp, hthresh_tmp, wthresh_tmp);
3798 printf(" RX Offloads=0x%"PRIx64, offloads_tmp);
3799 if (rx_conf->share_group > 0)
3800 printf(" share_group=%u share_qid=%u",
3801 rx_conf->share_group,
3802 rx_conf->share_qid);
3806 /* per tx queue config only for first queue to be less verbose */
3807 for (qid = 0; qid < 1; qid++) {
3808 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
3810 nb_tx_desc_tmp = nb_tx_desc[qid];
3811 tx_free_thresh_tmp =
3812 tx_conf[qid].tx_free_thresh;
3813 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
3814 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
3815 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
3816 offloads_tmp = tx_conf[qid].offloads;
3817 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
3819 nb_tx_desc_tmp = tx_qinfo.nb_desc;
3820 tx_free_thresh_tmp =
3821 tx_qinfo.conf.tx_free_thresh;
3822 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
3823 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
3824 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
3825 offloads_tmp = tx_qinfo.conf.offloads;
3826 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
3829 printf(" TX queue: %d\n", qid);
3830 printf(" TX desc=%d - TX free threshold=%d\n",
3831 nb_tx_desc_tmp, tx_free_thresh_tmp);
3832 printf(" TX threshold registers: pthresh=%d hthresh=%d "
3834 pthresh_tmp, hthresh_tmp, wthresh_tmp);
3835 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
3836 offloads_tmp, tx_rs_thresh_tmp);
3842 port_rss_reta_info(portid_t port_id,
3843 struct rte_eth_rss_reta_entry64 *reta_conf,
3844 uint16_t nb_entries)
3846 uint16_t i, idx, shift;
3849 if (port_id_is_invalid(port_id, ENABLED_WARN))
3852 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
3855 "Failed to get RSS RETA info, return code = %d\n",
3860 for (i = 0; i < nb_entries; i++) {
3861 idx = i / RTE_ETH_RETA_GROUP_SIZE;
3862 shift = i % RTE_ETH_RETA_GROUP_SIZE;
3863 if (!(reta_conf[idx].mask & (1ULL << shift)))
3865 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
3866 i, reta_conf[idx].reta[shift]);
3871 * Displays the RSS hash functions of a port, and, optionally, the RSS hash
3875 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
3877 struct rte_eth_rss_conf rss_conf = {0};
3878 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
3882 struct rte_eth_dev_info dev_info;
3883 uint8_t hash_key_size;
3886 if (port_id_is_invalid(port_id, ENABLED_WARN))
3889 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3893 if (dev_info.hash_key_size > 0 &&
3894 dev_info.hash_key_size <= sizeof(rss_key))
3895 hash_key_size = dev_info.hash_key_size;
3898 "dev_info did not provide a valid hash key size\n");
3902 /* Get RSS hash key if asked to display it */
3903 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
3904 rss_conf.rss_key_len = hash_key_size;
3905 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
3909 fprintf(stderr, "port index %d invalid\n", port_id);
3912 fprintf(stderr, "operation not supported by device\n");
3915 fprintf(stderr, "operation failed - diag=%d\n", diag);
3920 rss_hf = rss_conf.rss_hf;
3922 printf("RSS disabled\n");
3925 printf("RSS functions:\n");
3926 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
3930 printf("RSS key:\n");
3931 for (i = 0; i < hash_key_size; i++)
3932 printf("%02X", rss_key[i]);
3937 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
3938 uint8_t hash_key_len)
3940 struct rte_eth_rss_conf rss_conf;
3943 rss_conf.rss_key = NULL;
3944 rss_conf.rss_key_len = 0;
3945 rss_conf.rss_hf = str_to_rsstypes(rss_type);
3946 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
3948 rss_conf.rss_key = hash_key;
3949 rss_conf.rss_key_len = hash_key_len;
3950 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
3957 fprintf(stderr, "port index %d invalid\n", port_id);
3960 fprintf(stderr, "operation not supported by device\n");
3963 fprintf(stderr, "operation failed - diag=%d\n", diag);
3969 * Check whether a shared rxq scheduled on other lcores.
3972 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
3973 portid_t src_port, queueid_t src_rxq,
3974 uint32_t share_group, queueid_t share_rxq)
3977 streamid_t nb_fs_per_lcore;
3980 struct fwd_stream *fs;
3981 struct rte_port *port;
3982 struct rte_eth_dev_info *dev_info;
3983 struct rte_eth_rxconf *rxq_conf;
3985 nb_fc = cur_fwd_config.nb_fwd_lcores;
3986 /* Check remaining cores. */
3987 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {
3988 sm_id = fwd_lcores[lc_id]->stream_idx;
3989 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
3990 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
3992 fs = fwd_streams[sm_id];
3993 port = &ports[fs->rx_port];
3994 dev_info = &port->dev_info;
3995 rxq_conf = &port->rxq[fs->rx_queue].conf;
3996 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
3997 == 0 || rxq_conf->share_group == 0)
3998 /* Not shared rxq. */
4000 if (domain_id != port->dev_info.switch_info.domain_id)
4002 if (rxq_conf->share_group != share_group)
4004 if (rxq_conf->share_qid != share_rxq)
4006 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n",
4007 share_group, share_rxq);
4008 printf(" lcore %hhu Port %hu queue %hu\n",
4009 src_lc, src_port, src_rxq);
4010 printf(" lcore %hhu Port %hu queue %hu\n",
4011 lc_id, fs->rx_port, fs->rx_queue);
4012 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n",
4021 * Check shared rxq configuration.
4023 * Shared group must not being scheduled on different core.
4026 pkt_fwd_shared_rxq_check(void)
4029 streamid_t nb_fs_per_lcore;
4032 struct fwd_stream *fs;
4034 struct rte_port *port;
4035 struct rte_eth_dev_info *dev_info;
4036 struct rte_eth_rxconf *rxq_conf;
4040 nb_fc = cur_fwd_config.nb_fwd_lcores;
4042 * Check streams on each core, make sure the same switch domain +
4043 * group + queue doesn't get scheduled on other cores.
4045 for (lc_id = 0; lc_id < nb_fc; lc_id++) {
4046 sm_id = fwd_lcores[lc_id]->stream_idx;
4047 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
4048 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
4050 fs = fwd_streams[sm_id];
4051 /* Update lcore info stream being scheduled. */
4052 fs->lcore = fwd_lcores[lc_id];
4053 port = &ports[fs->rx_port];
4054 dev_info = &port->dev_info;
4055 rxq_conf = &port->rxq[fs->rx_queue].conf;
4056 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
4057 == 0 || rxq_conf->share_group == 0)
4058 /* Not shared rxq. */
4060 /* Check shared rxq not scheduled on remaining cores. */
4061 domain_id = port->dev_info.switch_info.domain_id;
4062 if (fwd_stream_on_other_lcores(domain_id, lc_id,
4065 rxq_conf->share_group,
4066 rxq_conf->share_qid))
4074 * Setup forwarding configuration for each logical core.
4077 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
4079 streamid_t nb_fs_per_lcore;
4087 nb_fs = cfg->nb_fwd_streams;
4088 nb_fc = cfg->nb_fwd_lcores;
4089 if (nb_fs <= nb_fc) {
4090 nb_fs_per_lcore = 1;
4093 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
4094 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
4097 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
4099 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
4100 fwd_lcores[lc_id]->stream_idx = sm_id;
4101 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
4102 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
4106 * Assign extra remaining streams, if any.
4108 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
4109 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
4110 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
4111 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
4112 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
4117 fwd_topology_tx_port_get(portid_t rxp)
4119 static int warning_once = 1;
4121 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
4123 switch (port_topology) {
4125 case PORT_TOPOLOGY_PAIRED:
4126 if ((rxp & 0x1) == 0) {
4127 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
4131 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n");
4137 case PORT_TOPOLOGY_CHAINED:
4138 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
4139 case PORT_TOPOLOGY_LOOP:
4145 simple_fwd_config_setup(void)
4149 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
4150 cur_fwd_config.nb_fwd_streams =
4151 (streamid_t) cur_fwd_config.nb_fwd_ports;
4153 /* reinitialize forwarding streams */
4157 * In the simple forwarding test, the number of forwarding cores
4158 * must be lower or equal to the number of forwarding ports.
4160 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4161 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
4162 cur_fwd_config.nb_fwd_lcores =
4163 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
4164 setup_fwd_config_of_each_lcore(&cur_fwd_config);
4166 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
4167 fwd_streams[i]->rx_port = fwd_ports_ids[i];
4168 fwd_streams[i]->rx_queue = 0;
4169 fwd_streams[i]->tx_port =
4170 fwd_ports_ids[fwd_topology_tx_port_get(i)];
4171 fwd_streams[i]->tx_queue = 0;
4172 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
4173 fwd_streams[i]->retry_enabled = retry_enabled;
4178 * For the RSS forwarding test all streams distributed over lcores. Each stream
4179 * being composed of a RX queue to poll on a RX port for input messages,
4180 * associated with a TX queue of a TX port where to send forwarded packets.
4183 rss_fwd_config_setup(void)
4196 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4197 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
4198 cur_fwd_config.nb_fwd_streams =
4199 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
4201 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
4202 cur_fwd_config.nb_fwd_lcores =
4203 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
4205 /* reinitialize forwarding streams */
4208 setup_fwd_config_of_each_lcore(&cur_fwd_config);
4210 if (proc_id > 0 && nb_q % num_procs != 0)
4211 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n");
4214 * In multi-process, All queues are allocated to different
4215 * processes based on num_procs and proc_id. For example:
4216 * if supports 4 queues(nb_q), 2 processes(num_procs),
4217 * the 0~1 queue for primary process.
4218 * the 2~3 queue for secondary process.
4220 start = proc_id * nb_q / num_procs;
4221 end = start + nb_q / num_procs;
4224 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
4225 struct fwd_stream *fs;
4227 fs = fwd_streams[sm_id];
4228 txp = fwd_topology_tx_port_get(rxp);
4229 fs->rx_port = fwd_ports_ids[rxp];
4231 fs->tx_port = fwd_ports_ids[txp];
4233 fs->peer_addr = fs->tx_port;
4234 fs->retry_enabled = retry_enabled;
4236 if (rxp < nb_fwd_ports)
4246 get_fwd_port_total_tc_num(void)
4248 struct rte_eth_dcb_info dcb_info;
4249 uint16_t total_tc_num = 0;
4252 for (i = 0; i < nb_fwd_ports; i++) {
4253 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info);
4254 total_tc_num += dcb_info.nb_tcs;
4257 return total_tc_num;
4261 * For the DCB forwarding test, each core is assigned on each traffic class.
4263 * Each core is assigned a multi-stream, each stream being composed of
4264 * a RX queue to poll on a RX port for input messages, associated with
4265 * a TX queue of a TX port where to send forwarded packets. All RX and
4266 * TX queues are mapping to the same traffic class.
4267 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
4271 dcb_fwd_config_setup(void)
4273 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
4274 portid_t txp, rxp = 0;
4275 queueid_t txq, rxq = 0;
4277 uint16_t nb_rx_queue, nb_tx_queue;
4278 uint16_t i, j, k, sm_id = 0;
4279 uint16_t total_tc_num;
4280 struct rte_port *port;
4286 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED
4287 * or RTE_PORT_STOPPED.
4289 * Re-configure ports to get updated mapping between tc and queue in
4290 * case the queue number of the port is changed. Skip for started ports
4291 * since modifying queue number and calling dev_configure need to stop
4294 for (pid = 0; pid < nb_fwd_ports; pid++) {
4295 if (port_is_started(pid) == 1)
4299 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq,
4303 "Failed to re-configure port %d, ret = %d.\n",
4309 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4310 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
4311 cur_fwd_config.nb_fwd_streams =
4312 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
4313 total_tc_num = get_fwd_port_total_tc_num();
4314 if (cur_fwd_config.nb_fwd_lcores > total_tc_num)
4315 cur_fwd_config.nb_fwd_lcores = total_tc_num;
4317 /* reinitialize forwarding streams */
4321 /* get the dcb info on the first RX and TX ports */
4322 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
4323 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
4325 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
4326 fwd_lcores[lc_id]->stream_nb = 0;
4327 fwd_lcores[lc_id]->stream_idx = sm_id;
4328 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
4329 /* if the nb_queue is zero, means this tc is
4330 * not enabled on the POOL
4332 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
4334 k = fwd_lcores[lc_id]->stream_nb +
4335 fwd_lcores[lc_id]->stream_idx;
4336 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
4337 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
4338 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
4339 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
4340 for (j = 0; j < nb_rx_queue; j++) {
4341 struct fwd_stream *fs;
4343 fs = fwd_streams[k + j];
4344 fs->rx_port = fwd_ports_ids[rxp];
4345 fs->rx_queue = rxq + j;
4346 fs->tx_port = fwd_ports_ids[txp];
4347 fs->tx_queue = txq + j % nb_tx_queue;
4348 fs->peer_addr = fs->tx_port;
4349 fs->retry_enabled = retry_enabled;
4351 fwd_lcores[lc_id]->stream_nb +=
4352 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
4354 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
4357 if (tc < rxp_dcb_info.nb_tcs)
4359 /* Restart from TC 0 on next RX port */
4361 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
4363 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
4366 if (rxp >= nb_fwd_ports)
4368 /* get the dcb information on next RX and TX ports */
4369 if ((rxp & 0x1) == 0)
4370 txp = (portid_t) (rxp + 1);
4372 txp = (portid_t) (rxp - 1);
4373 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
4374 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
4379 icmp_echo_config_setup(void)
4386 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
4387 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
4388 (nb_txq * nb_fwd_ports);
4390 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4391 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
4392 cur_fwd_config.nb_fwd_streams =
4393 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
4394 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
4395 cur_fwd_config.nb_fwd_lcores =
4396 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
4397 if (verbose_level > 0) {
4398 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
4400 cur_fwd_config.nb_fwd_lcores,
4401 cur_fwd_config.nb_fwd_ports,
4402 cur_fwd_config.nb_fwd_streams);
4405 /* reinitialize forwarding streams */
4407 setup_fwd_config_of_each_lcore(&cur_fwd_config);
4409 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
4410 if (verbose_level > 0)
4411 printf(" core=%d: \n", lc_id);
4412 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
4413 struct fwd_stream *fs;
4414 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
4415 fs->rx_port = fwd_ports_ids[rxp];
4417 fs->tx_port = fs->rx_port;
4419 fs->peer_addr = fs->tx_port;
4420 fs->retry_enabled = retry_enabled;
4421 if (verbose_level > 0)
4422 printf(" stream=%d port=%d rxq=%d txq=%d\n",
4423 sm_id, fs->rx_port, fs->rx_queue,
4425 rxq = (queueid_t) (rxq + 1);
4426 if (rxq == nb_rxq) {
4428 rxp = (portid_t) (rxp + 1);
4435 fwd_config_setup(void)
4437 struct rte_port *port;
4441 cur_fwd_config.fwd_eng = cur_fwd_eng;
4442 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
4443 icmp_echo_config_setup();
4447 if ((nb_rxq > 1) && (nb_txq > 1)){
4449 for (i = 0; i < nb_fwd_ports; i++) {
4450 pt_id = fwd_ports_ids[i];
4451 port = &ports[pt_id];
4452 if (!port->dcb_flag) {
4454 "In DCB mode, all forwarding ports must be configured in this mode.\n");
4458 if (nb_fwd_lcores == 1) {
4460 "In DCB mode,the nb forwarding cores should be larger than 1.\n");
4464 dcb_fwd_config_setup();
4466 rss_fwd_config_setup();
4469 simple_fwd_config_setup();
4473 mp_alloc_to_str(uint8_t mode)
4476 case MP_ALLOC_NATIVE:
4482 case MP_ALLOC_XMEM_HUGE:
4492 pkt_fwd_config_display(struct fwd_config *cfg)
4494 struct fwd_stream *fs;
4498 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
4499 "NUMA support %s, MP allocation mode: %s\n",
4500 cfg->fwd_eng->fwd_mode_name,
4501 retry_enabled == 0 ? "" : " with retry",
4502 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
4503 numa_support == 1 ? "enabled" : "disabled",
4504 mp_alloc_to_str(mp_alloc_type));
4507 printf("TX retry num: %u, delay between TX retries: %uus\n",
4508 burst_tx_retry_num, burst_tx_delay_time);
4509 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
4510 printf("Logical Core %u (socket %u) forwards packets on "
4512 fwd_lcores_cpuids[lc_id],
4513 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
4514 fwd_lcores[lc_id]->stream_nb);
4515 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
4516 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
4517 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
4518 "P=%d/Q=%d (socket %u) ",
4519 fs->rx_port, fs->rx_queue,
4520 ports[fs->rx_port].socket_id,
4521 fs->tx_port, fs->tx_queue,
4522 ports[fs->tx_port].socket_id);
4523 print_ethaddr("peer=",
4524 &peer_eth_addrs[fs->peer_addr]);
4532 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
4534 struct rte_ether_addr new_peer_addr;
4535 if (!rte_eth_dev_is_valid_port(port_id)) {
4536 fprintf(stderr, "Error: Invalid port number %i\n", port_id);
4539 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
4540 fprintf(stderr, "Error: Invalid ethernet address: %s\n",
4544 peer_eth_addrs[port_id] = new_peer_addr;
4548 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
4551 unsigned int lcore_cpuid;
4556 for (i = 0; i < nb_lc; i++) {
4557 lcore_cpuid = lcorelist[i];
4558 if (! rte_lcore_is_enabled(lcore_cpuid)) {
4559 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid);
4562 if (lcore_cpuid == rte_get_main_lcore()) {
4564 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n",
4569 fwd_lcores_cpuids[i] = lcore_cpuid;
4571 if (record_now == 0) {
4575 nb_cfg_lcores = (lcoreid_t) nb_lc;
4576 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
4577 printf("previous number of forwarding cores %u - changed to "
4578 "number of configured cores %u\n",
4579 (unsigned int) nb_fwd_lcores, nb_lc);
4580 nb_fwd_lcores = (lcoreid_t) nb_lc;
4587 set_fwd_lcores_mask(uint64_t lcoremask)
4589 unsigned int lcorelist[64];
4593 if (lcoremask == 0) {
4594 fprintf(stderr, "Invalid NULL mask of cores\n");
4598 for (i = 0; i < 64; i++) {
4599 if (! ((uint64_t)(1ULL << i) & lcoremask))
4601 lcorelist[nb_lc++] = i;
4603 return set_fwd_lcores_list(lcorelist, nb_lc);
4607 set_fwd_lcores_number(uint16_t nb_lc)
4609 if (test_done == 0) {
4610 fprintf(stderr, "Please stop forwarding first\n");
4613 if (nb_lc > nb_cfg_lcores) {
4615 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n",
4616 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
4619 nb_fwd_lcores = (lcoreid_t) nb_lc;
4620 printf("Number of forwarding cores set to %u\n",
4621 (unsigned int) nb_fwd_lcores);
4625 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
4633 for (i = 0; i < nb_pt; i++) {
4634 port_id = (portid_t) portlist[i];
4635 if (port_id_is_invalid(port_id, ENABLED_WARN))
4638 fwd_ports_ids[i] = port_id;
4640 if (record_now == 0) {
4644 nb_cfg_ports = (portid_t) nb_pt;
4645 if (nb_fwd_ports != (portid_t) nb_pt) {
4646 printf("previous number of forwarding ports %u - changed to "
4647 "number of configured ports %u\n",
4648 (unsigned int) nb_fwd_ports, nb_pt);
4649 nb_fwd_ports = (portid_t) nb_pt;
4654 * Parse the user input and obtain the list of forwarding ports
4657 * String containing the user input. User can specify
4658 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
4659 * For example, if the user wants to use all the available
4660 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3.
4661 * If the user wants to use only the ports 1,2 then the input
4663 * valid characters are '-' and ','
4664 * @param[out] values
4665 * This array will be filled with a list of port IDs
4666 * based on the user input
4667 * Note that duplicate entries are discarded and only the first
4668 * count entries in this array are port IDs and all the rest
4669 * will contain default values
4670 * @param[in] maxsize
4671 * This parameter denotes 2 things
4672 * 1) Number of elements in the values array
4673 * 2) Maximum value of each element in the values array
4675 * On success, returns total count of parsed port IDs
4676 * On failure, returns 0
4679 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
4681 unsigned int count = 0;
4685 unsigned int marked[maxsize];
4687 if (list == NULL || values == NULL)
4690 for (i = 0; i < (int)maxsize; i++)
4696 /*Remove the blank spaces if any*/
4697 while (isblank(*list))
4702 value = strtol(list, &end, 10);
4703 if (errno || end == NULL)
4705 if (value < 0 || value >= (int)maxsize)
4707 while (isblank(*end))
4709 if (*end == '-' && min == INT_MAX) {
4711 } else if ((*end == ',') || (*end == '\0')) {
4715 for (i = min; i <= max; i++) {
4716 if (count < maxsize) {
4728 } while (*end != '\0');
4734 parse_fwd_portlist(const char *portlist)
4736 unsigned int portcount;
4737 unsigned int portindex[RTE_MAX_ETHPORTS];
4738 unsigned int i, valid_port_count = 0;
4740 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
4742 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
4745 * Here we verify the validity of the ports
4746 * and thereby calculate the total number of
4749 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
4750 if (rte_eth_dev_is_valid_port(portindex[i])) {
4751 portindex[valid_port_count] = portindex[i];
4756 set_fwd_ports_list(portindex, valid_port_count);
4760 set_fwd_ports_mask(uint64_t portmask)
4762 unsigned int portlist[64];
4766 if (portmask == 0) {
4767 fprintf(stderr, "Invalid NULL mask of ports\n");
4771 RTE_ETH_FOREACH_DEV(i) {
4772 if (! ((uint64_t)(1ULL << i) & portmask))
4774 portlist[nb_pt++] = i;
4776 set_fwd_ports_list(portlist, nb_pt);
4780 set_fwd_ports_number(uint16_t nb_pt)
4782 if (nb_pt > nb_cfg_ports) {
4784 "nb fwd ports %u > %u (number of configured ports) - ignored\n",
4785 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
4788 nb_fwd_ports = (portid_t) nb_pt;
4789 printf("Number of forwarding ports set to %u\n",
4790 (unsigned int) nb_fwd_ports);
4794 port_is_forwarding(portid_t port_id)
4798 if (port_id_is_invalid(port_id, ENABLED_WARN))
4801 for (i = 0; i < nb_fwd_ports; i++) {
4802 if (fwd_ports_ids[i] == port_id)
4810 set_nb_pkt_per_burst(uint16_t nb)
4812 if (nb > MAX_PKT_BURST) {
4814 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n",
4815 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
4818 nb_pkt_per_burst = nb;
4819 printf("Number of packets per burst set to %u\n",
4820 (unsigned int) nb_pkt_per_burst);
4824 tx_split_get_name(enum tx_pkt_split split)
4828 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
4829 if (tx_split_name[i].split == split)
4830 return tx_split_name[i].name;
4836 set_tx_pkt_split(const char *name)
4840 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
4841 if (strcmp(tx_split_name[i].name, name) == 0) {
4842 tx_pkt_split = tx_split_name[i].split;
4846 fprintf(stderr, "unknown value: \"%s\"\n", name);
4850 parse_fec_mode(const char *name, uint32_t *fec_capa)
4854 for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
4855 if (strcmp(fec_mode_name[i].name, name) == 0) {
4857 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
4865 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa)
4869 printf("FEC capabilities:\n");
4871 for (i = 0; i < num; i++) {
4873 rte_eth_link_speed_to_str(speed_fec_capa[i].speed));
4875 for (j = 0; j < RTE_DIM(fec_mode_name); j++) {
4876 if (RTE_ETH_FEC_MODE_TO_CAPA(j) &
4877 speed_fec_capa[i].capa)
4878 printf("%s ", fec_mode_name[j].name);
4885 show_rx_pkt_offsets(void)
4890 printf("Number of offsets: %u\n", n);
4892 printf("Segment offsets: ");
4893 for (i = 0; i != n - 1; i++)
4894 printf("%hu,", rx_pkt_seg_offsets[i]);
4895 printf("%hu\n", rx_pkt_seg_lengths[i]);
4900 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs)
4904 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) {
4905 printf("nb segments per RX packets=%u >= "
4906 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs);
4911 * No extra check here, the segment length will be checked by PMD
4912 * in the extended queue setup.
4914 for (i = 0; i < nb_offs; i++) {
4915 if (seg_offsets[i] >= UINT16_MAX) {
4916 printf("offset[%u]=%u > UINT16_MAX - give up\n",
4922 for (i = 0; i < nb_offs; i++)
4923 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i];
4925 rx_pkt_nb_offs = (uint8_t) nb_offs;
4929 show_rx_pkt_segments(void)
4934 printf("Number of segments: %u\n", n);
4936 printf("Segment sizes: ");
4937 for (i = 0; i != n - 1; i++)
4938 printf("%hu,", rx_pkt_seg_lengths[i]);
4939 printf("%hu\n", rx_pkt_seg_lengths[i]);
4944 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
4948 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) {
4949 printf("nb segments per RX packets=%u >= "
4950 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs);
4955 * No extra check here, the segment length will be checked by PMD
4956 * in the extended queue setup.
4958 for (i = 0; i < nb_segs; i++) {
4959 if (seg_lengths[i] >= UINT16_MAX) {
4960 printf("length[%u]=%u > UINT16_MAX - give up\n",
4966 for (i = 0; i < nb_segs; i++)
4967 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
4969 rx_pkt_nb_segs = (uint8_t) nb_segs;
4973 show_tx_pkt_segments(void)
4979 split = tx_split_get_name(tx_pkt_split);
4981 printf("Number of segments: %u\n", n);
4982 printf("Segment sizes: ");
4983 for (i = 0; i != n - 1; i++)
4984 printf("%hu,", tx_pkt_seg_lengths[i]);
4985 printf("%hu\n", tx_pkt_seg_lengths[i]);
4986 printf("Split packet: %s\n", split);
4990 nb_segs_is_invalid(unsigned int nb_segs)
4997 RTE_ETH_FOREACH_DEV(port_id) {
4998 for (queue_id = 0; queue_id < nb_txq; queue_id++) {
4999 ret = get_tx_ring_size(port_id, queue_id, &ring_size);
5001 /* Port may not be initialized yet, can't say
5002 * the port is invalid in this stage.
5006 if (ring_size < nb_segs) {
5007 printf("nb segments per TX packets=%u >= TX "
5008 "queue(%u) ring_size=%u - txpkts ignored\n",
5009 nb_segs, queue_id, ring_size);
5019 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
5021 uint16_t tx_pkt_len;
5025 * For single segment settings failed check is ignored.
5026 * It is a very basic capability to send the single segment
5027 * packets, suppose it is always supported.
5029 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) {
5031 "Tx segment size(%u) is not supported - txpkts ignored\n",
5036 if (nb_segs > RTE_MAX_SEGS_PER_PKT) {
5038 "Tx segment size(%u) is bigger than max number of segment(%u)\n",
5039 nb_segs, RTE_MAX_SEGS_PER_PKT);
5044 * Check that each segment length is greater or equal than
5045 * the mbuf data size.
5046 * Check also that the total packet length is greater or equal than the
5047 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
5051 for (i = 0; i < nb_segs; i++) {
5052 if (seg_lengths[i] > mbuf_data_size[0]) {
5054 "length[%u]=%u > mbuf_data_size=%u - give up\n",
5055 i, seg_lengths[i], mbuf_data_size[0]);
5058 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
5060 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
5061 fprintf(stderr, "total packet length=%u < %d - give up\n",
5062 (unsigned) tx_pkt_len,
5063 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
5067 for (i = 0; i < nb_segs; i++)
5068 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
5070 tx_pkt_length = tx_pkt_len;
5071 tx_pkt_nb_segs = (uint8_t) nb_segs;
5075 show_tx_pkt_times(void)
5077 printf("Interburst gap: %u\n", tx_pkt_times_inter);
5078 printf("Intraburst gap: %u\n", tx_pkt_times_intra);
5082 set_tx_pkt_times(unsigned int *tx_times)
5084 tx_pkt_times_inter = tx_times[0];
5085 tx_pkt_times_intra = tx_times[1];
5090 setup_gro(const char *onoff, portid_t port_id)
5092 if (!rte_eth_dev_is_valid_port(port_id)) {
5093 fprintf(stderr, "invalid port id %u\n", port_id);
5096 if (test_done == 0) {
5098 "Before enable/disable GRO, please stop forwarding first\n");
5101 if (strcmp(onoff, "on") == 0) {
5102 if (gro_ports[port_id].enable != 0) {
5104 "Port %u has enabled GRO. Please disable GRO first\n",
5108 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
5109 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
5110 gro_ports[port_id].param.max_flow_num =
5111 GRO_DEFAULT_FLOW_NUM;
5112 gro_ports[port_id].param.max_item_per_flow =
5113 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
5115 gro_ports[port_id].enable = 1;
5117 if (gro_ports[port_id].enable == 0) {
5118 fprintf(stderr, "Port %u has disabled GRO\n", port_id);
5121 gro_ports[port_id].enable = 0;
5126 setup_gro_flush_cycles(uint8_t cycles)
5128 if (test_done == 0) {
5130 "Before change flush interval for GRO, please stop forwarding first.\n");
5134 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
5135 GRO_DEFAULT_FLUSH_CYCLES) {
5137 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n",
5138 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES);
5139 cycles = GRO_DEFAULT_FLUSH_CYCLES;
5142 gro_flush_cycles = cycles;
5146 show_gro(portid_t port_id)
5148 struct rte_gro_param *param;
5149 uint32_t max_pkts_num;
5151 param = &gro_ports[port_id].param;
5153 if (!rte_eth_dev_is_valid_port(port_id)) {
5154 fprintf(stderr, "Invalid port id %u.\n", port_id);
5157 if (gro_ports[port_id].enable) {
5158 printf("GRO type: TCP/IPv4\n");
5159 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
5160 max_pkts_num = param->max_flow_num *
5161 param->max_item_per_flow;
5163 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
5164 printf("Max number of packets to perform GRO: %u\n",
5166 printf("Flushing cycles: %u\n", gro_flush_cycles);
5168 printf("Port %u doesn't enable GRO.\n", port_id);
5170 #endif /* RTE_LIB_GRO */
5174 setup_gso(const char *mode, portid_t port_id)
5176 if (!rte_eth_dev_is_valid_port(port_id)) {
5177 fprintf(stderr, "invalid port id %u\n", port_id);
5180 if (strcmp(mode, "on") == 0) {
5181 if (test_done == 0) {
5183 "before enabling GSO, please stop forwarding first\n");
5186 gso_ports[port_id].enable = 1;
5187 } else if (strcmp(mode, "off") == 0) {
5188 if (test_done == 0) {
5190 "before disabling GSO, please stop forwarding first\n");
5193 gso_ports[port_id].enable = 0;
5196 #endif /* RTE_LIB_GSO */
5199 list_pkt_forwarding_modes(void)
5201 static char fwd_modes[128] = "";
5202 const char *separator = "|";
5203 struct fwd_engine *fwd_eng;
5206 if (strlen (fwd_modes) == 0) {
5207 while ((fwd_eng = fwd_engines[i++]) != NULL) {
5208 strncat(fwd_modes, fwd_eng->fwd_mode_name,
5209 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
5210 strncat(fwd_modes, separator,
5211 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
5213 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
5220 list_pkt_forwarding_retry_modes(void)
5222 static char fwd_modes[128] = "";
5223 const char *separator = "|";
5224 struct fwd_engine *fwd_eng;
5227 if (strlen(fwd_modes) == 0) {
5228 while ((fwd_eng = fwd_engines[i++]) != NULL) {
5229 if (fwd_eng == &rx_only_engine)
5231 strncat(fwd_modes, fwd_eng->fwd_mode_name,
5233 strlen(fwd_modes) - 1);
5234 strncat(fwd_modes, separator,
5236 strlen(fwd_modes) - 1);
5238 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
5245 set_pkt_forwarding_mode(const char *fwd_mode_name)
5247 struct fwd_engine *fwd_eng;
5251 while ((fwd_eng = fwd_engines[i]) != NULL) {
5252 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
5253 printf("Set %s packet forwarding mode%s\n",
5255 retry_enabled == 0 ? "" : " with retry");
5256 cur_fwd_eng = fwd_eng;
5261 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name);
5265 add_rx_dump_callbacks(portid_t portid)
5267 struct rte_eth_dev_info dev_info;
5271 if (port_id_is_invalid(portid, ENABLED_WARN))
5274 ret = eth_dev_info_get_print_err(portid, &dev_info);
5278 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
5279 if (!ports[portid].rx_dump_cb[queue])
5280 ports[portid].rx_dump_cb[queue] =
5281 rte_eth_add_rx_callback(portid, queue,
5282 dump_rx_pkts, NULL);
5286 add_tx_dump_callbacks(portid_t portid)
5288 struct rte_eth_dev_info dev_info;
5292 if (port_id_is_invalid(portid, ENABLED_WARN))
5295 ret = eth_dev_info_get_print_err(portid, &dev_info);
5299 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
5300 if (!ports[portid].tx_dump_cb[queue])
5301 ports[portid].tx_dump_cb[queue] =
5302 rte_eth_add_tx_callback(portid, queue,
5303 dump_tx_pkts, NULL);
5307 remove_rx_dump_callbacks(portid_t portid)
5309 struct rte_eth_dev_info dev_info;
5313 if (port_id_is_invalid(portid, ENABLED_WARN))
5316 ret = eth_dev_info_get_print_err(portid, &dev_info);
5320 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
5321 if (ports[portid].rx_dump_cb[queue]) {
5322 rte_eth_remove_rx_callback(portid, queue,
5323 ports[portid].rx_dump_cb[queue]);
5324 ports[portid].rx_dump_cb[queue] = NULL;
5329 remove_tx_dump_callbacks(portid_t portid)
5331 struct rte_eth_dev_info dev_info;
5335 if (port_id_is_invalid(portid, ENABLED_WARN))
5338 ret = eth_dev_info_get_print_err(portid, &dev_info);
5342 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
5343 if (ports[portid].tx_dump_cb[queue]) {
5344 rte_eth_remove_tx_callback(portid, queue,
5345 ports[portid].tx_dump_cb[queue]);
5346 ports[portid].tx_dump_cb[queue] = NULL;
5351 configure_rxtx_dump_callbacks(uint16_t verbose)
5355 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5356 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
5360 RTE_ETH_FOREACH_DEV(portid)
5362 if (verbose == 1 || verbose > 2)
5363 add_rx_dump_callbacks(portid);
5365 remove_rx_dump_callbacks(portid);
5367 add_tx_dump_callbacks(portid);
5369 remove_tx_dump_callbacks(portid);
5374 set_verbose_level(uint16_t vb_level)
5376 printf("Change verbose level from %u to %u\n",
5377 (unsigned int) verbose_level, (unsigned int) vb_level);
5378 verbose_level = vb_level;
5379 configure_rxtx_dump_callbacks(verbose_level);
5383 vlan_extend_set(portid_t port_id, int on)
5387 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
5389 if (port_id_is_invalid(port_id, ENABLED_WARN))
5392 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
5395 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
5396 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
5398 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
5399 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
5402 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
5405 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n",
5409 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
5413 rx_vlan_strip_set(portid_t port_id, int on)
5417 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
5419 if (port_id_is_invalid(port_id, ENABLED_WARN))
5422 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
5425 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
5426 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
5428 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
5429 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
5432 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
5435 "%s(port_pi=%d, on=%d) failed diag=%d\n",
5436 __func__, port_id, on, diag);
5439 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
5443 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
5447 if (port_id_is_invalid(port_id, ENABLED_WARN))
5450 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
5453 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n",
5454 __func__, port_id, queue_id, on, diag);
5458 rx_vlan_filter_set(portid_t port_id, int on)
5462 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
5464 if (port_id_is_invalid(port_id, ENABLED_WARN))
5467 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
5470 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
5471 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
5473 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
5474 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
5477 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
5480 "%s(port_pi=%d, on=%d) failed diag=%d\n",
5481 __func__, port_id, on, diag);
5484 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
5488 rx_vlan_qinq_strip_set(portid_t port_id, int on)
5492 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
5494 if (port_id_is_invalid(port_id, ENABLED_WARN))
5497 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
5500 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
5501 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
5503 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
5504 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
5507 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
5509 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n",
5510 __func__, port_id, on, diag);
5513 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
5517 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
5521 if (port_id_is_invalid(port_id, ENABLED_WARN))
5523 if (vlan_id_is_invalid(vlan_id))
5525 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
5529 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n",
5530 port_id, vlan_id, on, diag);
5535 rx_vlan_all_filter_set(portid_t port_id, int on)
5539 if (port_id_is_invalid(port_id, ENABLED_WARN))
5541 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
5542 if (rx_vft_set(port_id, vlan_id, on))
5548 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
5552 if (port_id_is_invalid(port_id, ENABLED_WARN))
5555 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
5560 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n",
5561 port_id, vlan_type, tp_id, diag);
5565 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
5567 struct rte_eth_dev_info dev_info;
5570 if (vlan_id_is_invalid(vlan_id))
5573 if (ports[port_id].dev_conf.txmode.offloads &
5574 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
5575 fprintf(stderr, "Error, as QinQ has been enabled.\n");
5579 ret = eth_dev_info_get_print_err(port_id, &dev_info);
5583 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
5585 "Error: vlan insert is not supported by port %d\n",
5590 tx_vlan_reset(port_id);
5591 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
5592 ports[port_id].tx_vlan_id = vlan_id;
5596 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
5598 struct rte_eth_dev_info dev_info;
5601 if (vlan_id_is_invalid(vlan_id))
5603 if (vlan_id_is_invalid(vlan_id_outer))
5606 ret = eth_dev_info_get_print_err(port_id, &dev_info);
5610 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
5612 "Error: qinq insert not supported by port %d\n",
5617 tx_vlan_reset(port_id);
5618 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
5619 RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
5620 ports[port_id].tx_vlan_id = vlan_id;
5621 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
5625 tx_vlan_reset(portid_t port_id)
5627 ports[port_id].dev_conf.txmode.offloads &=
5628 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
5629 RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
5630 ports[port_id].tx_vlan_id = 0;
5631 ports[port_id].tx_vlan_id_outer = 0;
5635 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
5637 if (port_id_is_invalid(port_id, ENABLED_WARN))
5640 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
5644 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
5648 if (port_id_is_invalid(port_id, ENABLED_WARN))
5651 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
5654 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
5655 fprintf(stderr, "map_value not in required range 0..%d\n",
5656 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
5660 if (!is_rx) { /* tx */
5661 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id,
5665 "failed to set tx queue stats mapping.\n");
5669 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id,
5673 "failed to set rx queue stats mapping.\n");
5680 set_xstats_hide_zero(uint8_t on_off)
5682 xstats_hide_zero = on_off;
5686 set_record_core_cycles(uint8_t on_off)
5688 record_core_cycles = on_off;
5692 set_record_burst_stats(uint8_t on_off)
5694 record_burst_stats = on_off;
5698 str_to_flowtype(const char *string)
5702 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
5703 if (!strcmp(flowtype_str_table[i].str, string))
5704 return flowtype_str_table[i].ftype;
5707 if (isdigit(string[0])) {
5708 int val = atoi(string);
5709 if (val > 0 && val < 64)
5710 return (uint16_t)val;
5713 return RTE_ETH_FLOW_UNKNOWN;
5717 flowtype_to_str(uint16_t flow_type)
5721 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
5722 if (flowtype_str_table[i].ftype == flow_type)
5723 return flowtype_str_table[i].str;
5729 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
5732 print_fdir_mask(struct rte_eth_fdir_masks *mask)
5734 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
5736 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
5737 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
5738 " tunnel_id: 0x%08x",
5739 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
5740 rte_be_to_cpu_32(mask->tunnel_id_mask));
5741 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
5742 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
5743 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
5744 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
5746 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
5747 rte_be_to_cpu_16(mask->src_port_mask),
5748 rte_be_to_cpu_16(mask->dst_port_mask));
5750 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
5751 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
5752 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
5753 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
5754 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
5756 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
5757 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
5758 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
5759 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
5760 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
5767 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
5769 struct rte_eth_flex_payload_cfg *cfg;
5772 for (i = 0; i < flex_conf->nb_payloads; i++) {
5773 cfg = &flex_conf->flex_set[i];
5774 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
5776 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
5777 printf("\n L2_PAYLOAD: ");
5778 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
5779 printf("\n L3_PAYLOAD: ");
5780 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
5781 printf("\n L4_PAYLOAD: ");
5783 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
5784 for (j = 0; j < num; j++)
5785 printf(" %-5u", cfg->src_offset[j]);
5791 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
5793 struct rte_eth_fdir_flex_mask *mask;
5797 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
5798 mask = &flex_conf->flex_mask[i];
5799 p = flowtype_to_str(mask->flow_type);
5800 printf("\n %s:\t", p ? p : "unknown");
5801 for (j = 0; j < num; j++)
5802 printf(" %02x", mask->mask[j]);
5808 print_fdir_flow_type(uint32_t flow_types_mask)
5813 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
5814 if (!(flow_types_mask & (1 << i)))
5816 p = flowtype_to_str(i);
5826 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
5827 struct rte_eth_fdir_stats *fdir_stat)
5832 if (ret == -ENOTSUP) {
5833 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
5835 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
5838 #ifdef RTE_NET_IXGBE
5839 if (ret == -ENOTSUP) {
5840 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
5842 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
5849 fprintf(stderr, "\n FDIR is not supported on port %-2d\n",
5853 fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
5860 fdir_get_infos(portid_t port_id)
5862 struct rte_eth_fdir_stats fdir_stat;
5863 struct rte_eth_fdir_info fdir_info;
5865 static const char *fdir_stats_border = "########################";
5867 if (port_id_is_invalid(port_id, ENABLED_WARN))
5870 memset(&fdir_info, 0, sizeof(fdir_info));
5871 memset(&fdir_stat, 0, sizeof(fdir_stat));
5872 if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
5875 printf("\n %s FDIR infos for port %-2d %s\n",
5876 fdir_stats_border, port_id, fdir_stats_border);
5878 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
5879 printf(" PERFECT\n");
5880 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
5881 printf(" PERFECT-MAC-VLAN\n");
5882 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
5883 printf(" PERFECT-TUNNEL\n");
5884 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
5885 printf(" SIGNATURE\n");
5887 printf(" DISABLE\n");
5888 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
5889 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
5890 printf(" SUPPORTED FLOW TYPE: ");
5891 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
5893 printf(" FLEX PAYLOAD INFO:\n");
5894 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
5895 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
5896 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
5897 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
5898 fdir_info.flex_payload_unit,
5899 fdir_info.max_flex_payload_segment_num,
5900 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
5902 print_fdir_mask(&fdir_info.mask);
5903 if (fdir_info.flex_conf.nb_payloads > 0) {
5904 printf(" FLEX PAYLOAD SRC OFFSET:");
5905 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
5907 if (fdir_info.flex_conf.nb_flexmasks > 0) {
5908 printf(" FLEX MASK CFG:");
5909 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
5911 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
5912 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
5913 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
5914 fdir_info.guarant_spc, fdir_info.best_spc);
5915 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
5916 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
5917 " add: %-10"PRIu64" remove: %"PRIu64"\n"
5918 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
5919 fdir_stat.collision, fdir_stat.free,
5920 fdir_stat.maxhash, fdir_stat.maxlen,
5921 fdir_stat.add, fdir_stat.remove,
5922 fdir_stat.f_add, fdir_stat.f_remove);
5923 printf(" %s############################%s\n",
5924 fdir_stats_border, fdir_stats_border);
5927 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */
5930 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
5932 struct rte_port *port;
5933 struct rte_eth_fdir_flex_conf *flex_conf;
5936 port = &ports[port_id];
5937 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
5938 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
5939 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
5944 if (i >= RTE_ETH_FLOW_MAX) {
5945 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
5946 idx = flex_conf->nb_flexmasks;
5947 flex_conf->nb_flexmasks++;
5950 "The flex mask table is full. Can not set flex mask for flow_type(%u).",
5955 rte_memcpy(&flex_conf->flex_mask[idx],
5957 sizeof(struct rte_eth_fdir_flex_mask));
5961 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
5963 struct rte_port *port;
5964 struct rte_eth_fdir_flex_conf *flex_conf;
5967 port = &ports[port_id];
5968 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
5969 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
5970 if (cfg->type == flex_conf->flex_set[i].type) {
5975 if (i >= RTE_ETH_PAYLOAD_MAX) {
5976 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
5977 idx = flex_conf->nb_payloads;
5978 flex_conf->nb_payloads++;
5981 "The flex payload table is full. Can not set flex payload for type(%u).",
5986 rte_memcpy(&flex_conf->flex_set[idx],
5988 sizeof(struct rte_eth_flex_payload_cfg));
5993 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
5995 #ifdef RTE_NET_IXGBE
5999 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
6001 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
6006 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
6007 is_rx ? "rx" : "tx", port_id, diag);
6010 fprintf(stderr, "VF %s setting not supported for port %d\n",
6011 is_rx ? "Rx" : "Tx", port_id);
6017 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
6020 struct rte_eth_link link;
6023 if (port_id_is_invalid(port_id, ENABLED_WARN))
6025 ret = eth_link_get_nowait_print_err(port_id, &link);
6028 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
6029 rate > link.link_speed) {
6031 "Invalid rate value:%u bigger than link speed: %u\n",
6032 rate, link.link_speed);
6035 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
6039 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
6045 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
6047 int diag = -ENOTSUP;
6051 RTE_SET_USED(q_msk);
6053 #ifdef RTE_NET_IXGBE
6054 if (diag == -ENOTSUP)
6055 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
6059 if (diag == -ENOTSUP)
6060 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
6066 "%s for port_id=%d failed diag=%d\n",
6067 __func__, port_id, diag);
6072 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh)
6074 if (port_id_is_invalid(port_id, ENABLED_WARN))
6077 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh);
6081 * Functions to manage the set of filtered Multicast MAC addresses.
6083 * A pool of filtered multicast MAC addresses is associated with each port.
6084 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
6085 * The address of the pool and the number of valid multicast MAC addresses
6086 * recorded in the pool are stored in the fields "mc_addr_pool" and
6087 * "mc_addr_nb" of the "rte_port" data structure.
6089 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
6090 * to be supplied a contiguous array of multicast MAC addresses.
6091 * To comply with this constraint, the set of multicast addresses recorded
6092 * into the pool are systematically compacted at the beginning of the pool.
6093 * Hence, when a multicast address is removed from the pool, all following
6094 * addresses, if any, are copied back to keep the set contiguous.
6096 #define MCAST_POOL_INC 32
6099 mcast_addr_pool_extend(struct rte_port *port)
6101 struct rte_ether_addr *mc_pool;
6102 size_t mc_pool_size;
6105 * If a free entry is available at the end of the pool, just
6106 * increment the number of recorded multicast addresses.
6108 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
6114 * [re]allocate a pool with MCAST_POOL_INC more entries.
6115 * The previous test guarantees that port->mc_addr_nb is a multiple
6116 * of MCAST_POOL_INC.
6118 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
6120 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
6122 if (mc_pool == NULL) {
6124 "allocation of pool of %u multicast addresses failed\n",
6125 port->mc_addr_nb + MCAST_POOL_INC);
6129 port->mc_addr_pool = mc_pool;
6136 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
6138 if (mcast_addr_pool_extend(port) != 0)
6140 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
6144 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
6147 if (addr_idx == port->mc_addr_nb) {
6148 /* No need to recompact the set of multicast addresses. */
6149 if (port->mc_addr_nb == 0) {
6150 /* free the pool of multicast addresses. */
6151 free(port->mc_addr_pool);
6152 port->mc_addr_pool = NULL;
6156 memmove(&port->mc_addr_pool[addr_idx],
6157 &port->mc_addr_pool[addr_idx + 1],
6158 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
6162 mcast_addr_pool_destroy(portid_t port_id)
6164 struct rte_port *port;
6166 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
6167 port_id == (portid_t)RTE_PORT_ALL)
6169 port = &ports[port_id];
6171 if (port->mc_addr_nb != 0) {
6172 /* free the pool of multicast addresses. */
6173 free(port->mc_addr_pool);
6174 port->mc_addr_pool = NULL;
6175 port->mc_addr_nb = 0;
6181 eth_port_multicast_addr_list_set(portid_t port_id)
6183 struct rte_port *port;
6186 port = &ports[port_id];
6187 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
6191 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
6192 port_id, port->mc_addr_nb, diag);
6198 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
6200 struct rte_port *port;
6203 if (port_id_is_invalid(port_id, ENABLED_WARN))
6206 port = &ports[port_id];
6209 * Check that the added multicast MAC address is not already recorded
6210 * in the pool of multicast addresses.
6212 for (i = 0; i < port->mc_addr_nb; i++) {
6213 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
6215 "multicast address already filtered by port\n");
6220 mcast_addr_pool_append(port, mc_addr);
6221 if (eth_port_multicast_addr_list_set(port_id) < 0)
6222 /* Rollback on failure, remove the address from the pool */
6223 mcast_addr_pool_remove(port, i);
6227 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
6229 struct rte_port *port;
6232 if (port_id_is_invalid(port_id, ENABLED_WARN))
6235 port = &ports[port_id];
6238 * Search the pool of multicast MAC addresses for the removed address.
6240 for (i = 0; i < port->mc_addr_nb; i++) {
6241 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
6244 if (i == port->mc_addr_nb) {
6245 fprintf(stderr, "multicast address not filtered by port %d\n",
6250 mcast_addr_pool_remove(port, i);
6251 if (eth_port_multicast_addr_list_set(port_id) < 0)
6252 /* Rollback on failure, add the address back into the pool */
6253 mcast_addr_pool_append(port, mc_addr);
6257 port_dcb_info_display(portid_t port_id)
6259 struct rte_eth_dcb_info dcb_info;
6262 static const char *border = "================";
6264 if (port_id_is_invalid(port_id, ENABLED_WARN))
6267 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
6269 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n",
6273 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
6274 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
6276 for (i = 0; i < dcb_info.nb_tcs; i++)
6278 printf("\n Priority : ");
6279 for (i = 0; i < dcb_info.nb_tcs; i++)
6280 printf("\t%4d", dcb_info.prio_tc[i]);
6281 printf("\n BW percent :");
6282 for (i = 0; i < dcb_info.nb_tcs; i++)
6283 printf("\t%4d%%", dcb_info.tc_bws[i]);
6284 printf("\n RXQ base : ");
6285 for (i = 0; i < dcb_info.nb_tcs; i++)
6286 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
6287 printf("\n RXQ number :");
6288 for (i = 0; i < dcb_info.nb_tcs; i++)
6289 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
6290 printf("\n TXQ base : ");
6291 for (i = 0; i < dcb_info.nb_tcs; i++)
6292 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
6293 printf("\n TXQ number :");
6294 for (i = 0; i < dcb_info.nb_tcs; i++)
6295 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
6300 open_file(const char *file_path, uint32_t *size)
6302 int fd = open(file_path, O_RDONLY);
6304 uint8_t *buf = NULL;
6312 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
6316 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
6318 fprintf(stderr, "%s: File operations failed\n", __func__);
6322 pkg_size = st_buf.st_size;
6325 fprintf(stderr, "%s: File operations failed\n", __func__);
6329 buf = (uint8_t *)malloc(pkg_size);
6332 fprintf(stderr, "%s: Failed to malloc memory\n", __func__);
6336 ret = read(fd, buf, pkg_size);
6339 fprintf(stderr, "%s: File read operation failed\n", __func__);
6353 save_file(const char *file_path, uint8_t *buf, uint32_t size)
6355 FILE *fh = fopen(file_path, "wb");
6358 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
6362 if (fwrite(buf, 1, size, fh) != size) {
6364 fprintf(stderr, "%s: File write operation failed\n", __func__);
6374 close_file(uint8_t *buf)
6385 show_macs(portid_t port_id)
6387 char buf[RTE_ETHER_ADDR_FMT_SIZE];
6388 struct rte_eth_dev_info dev_info;
6389 int32_t i, rc, num_macs = 0;
6391 if (eth_dev_info_get_print_err(port_id, &dev_info))
6394 struct rte_ether_addr addr[dev_info.max_mac_addrs];
6395 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs);
6399 for (i = 0; i < rc; i++) {
6401 /* skip zero address */
6402 if (rte_is_zero_ether_addr(&addr[i]))
6408 printf("Number of MAC address added: %d\n", num_macs);
6410 for (i = 0; i < rc; i++) {
6412 /* skip zero address */
6413 if (rte_is_zero_ether_addr(&addr[i]))
6416 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]);
6417 printf(" %s\n", buf);
6422 show_mcast_macs(portid_t port_id)
6424 char buf[RTE_ETHER_ADDR_FMT_SIZE];
6425 struct rte_ether_addr *addr;
6426 struct rte_port *port;
6429 port = &ports[port_id];
6431 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
6433 for (i = 0; i < port->mc_addr_nb; i++) {
6434 addr = &port->mc_addr_pool[i];
6436 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
6437 printf(" %s\n", buf);