1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
42 #include <rte_errno.h>
44 #include <rte_pmd_ixgbe.h>
47 #include <rte_pmd_i40e.h>
50 #include <rte_pmd_bnxt.h>
53 #include <rte_hexdump.h>
56 #include "cmdline_mtr.h"
58 #define ETHDEV_FWVERS_LEN 32
60 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
63 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
66 #define NS_PER_SEC 1E9
68 static char *flowtype_to_str(uint16_t flow_type);
71 enum tx_pkt_split split;
75 .split = TX_PKT_SPLIT_OFF,
79 .split = TX_PKT_SPLIT_ON,
83 .split = TX_PKT_SPLIT_RND,
88 const struct rss_type_info rss_type_table[] = {
89 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
90 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
91 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
92 ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
94 { "eth", ETH_RSS_ETH },
95 { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
96 { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
97 { "vlan", ETH_RSS_VLAN },
98 { "s-vlan", ETH_RSS_S_VLAN },
99 { "c-vlan", ETH_RSS_C_VLAN },
100 { "ipv4", ETH_RSS_IPV4 },
101 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
102 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
103 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
104 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
105 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
106 { "ipv6", ETH_RSS_IPV6 },
107 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
108 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
109 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
110 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
111 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
112 { "l2-payload", ETH_RSS_L2_PAYLOAD },
113 { "ipv6-ex", ETH_RSS_IPV6_EX },
114 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
115 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
116 { "port", ETH_RSS_PORT },
117 { "vxlan", ETH_RSS_VXLAN },
118 { "geneve", ETH_RSS_GENEVE },
119 { "nvgre", ETH_RSS_NVGRE },
120 { "ip", ETH_RSS_IP },
121 { "udp", ETH_RSS_UDP },
122 { "tcp", ETH_RSS_TCP },
123 { "sctp", ETH_RSS_SCTP },
124 { "tunnel", ETH_RSS_TUNNEL },
125 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
126 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
127 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
128 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
129 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
130 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
131 { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
132 { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
133 { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
134 { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
135 { "esp", ETH_RSS_ESP },
136 { "ah", ETH_RSS_AH },
137 { "l2tpv3", ETH_RSS_L2TPV3 },
138 { "pfcp", ETH_RSS_PFCP },
139 { "pppoe", ETH_RSS_PPPOE },
140 { "gtpu", ETH_RSS_GTPU },
141 { "ecpri", ETH_RSS_ECPRI },
142 { "mpls", ETH_RSS_MPLS },
143 { "ipv4-chksum", ETH_RSS_IPV4_CHKSUM },
144 { "l4-chksum", ETH_RSS_L4_CHKSUM },
148 static const struct {
149 enum rte_eth_fec_mode mode;
151 } fec_mode_name[] = {
153 .mode = RTE_ETH_FEC_NOFEC,
157 .mode = RTE_ETH_FEC_AUTO,
161 .mode = RTE_ETH_FEC_BASER,
165 .mode = RTE_ETH_FEC_RS,
171 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
173 char buf[RTE_ETHER_ADDR_FMT_SIZE];
174 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
175 printf("%s%s", name, buf);
179 nic_xstats_display_periodic(portid_t port_id)
181 struct xstat_display_info *xstats_info;
182 uint64_t *prev_values, *curr_values;
183 uint64_t diff_value, value_rate;
184 struct timespec cur_time;
191 xstats_info = &ports[port_id].xstats_info;
193 ids_supp_sz = xstats_info->ids_supp_sz;
194 if (ids_supp_sz == 0)
199 ids_supp = xstats_info->ids_supp;
200 prev_values = xstats_info->prev_values;
201 curr_values = xstats_info->curr_values;
203 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values,
205 if (rc != (int)ids_supp_sz) {
207 "Failed to get values of %zu xstats for port %u - return code %d\n",
208 ids_supp_sz, port_id, rc);
213 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
216 ns = cur_time.tv_sec * NS_PER_SEC;
217 ns += cur_time.tv_nsec;
219 if (xstats_info->prev_ns != 0)
220 diff_ns = ns - xstats_info->prev_ns;
221 xstats_info->prev_ns = ns;
224 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)");
225 for (i = 0; i < ids_supp_sz; i++) {
226 diff_value = (curr_values[i] > prev_values[i]) ?
227 (curr_values[i] - prev_values[i]) : 0;
228 prev_values[i] = curr_values[i];
229 value_rate = diff_ns > 0 ?
230 (double)diff_value / diff_ns * NS_PER_SEC : 0;
232 printf(" %-25s%12"PRIu64" %15"PRIu64"\n",
233 xstats_display[i].name, curr_values[i], value_rate);
238 nic_stats_display(portid_t port_id)
240 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
241 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
242 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
243 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
244 static uint64_t prev_ns[RTE_MAX_ETHPORTS];
245 struct timespec cur_time;
246 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
248 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
249 struct rte_eth_stats stats;
251 static const char *nic_stats_border = "########################";
253 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
257 rte_eth_stats_get(port_id, &stats);
258 printf("\n %s NIC statistics for port %-2d %s\n",
259 nic_stats_border, port_id, nic_stats_border);
261 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
262 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes);
263 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
264 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf);
265 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
266 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes);
269 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
272 ns = cur_time.tv_sec * NS_PER_SEC;
273 ns += cur_time.tv_nsec;
275 if (prev_ns[port_id] != 0)
276 diff_ns = ns - prev_ns[port_id];
277 prev_ns[port_id] = ns;
280 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
281 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
282 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
283 (stats.opackets - prev_pkts_tx[port_id]) : 0;
284 prev_pkts_rx[port_id] = stats.ipackets;
285 prev_pkts_tx[port_id] = stats.opackets;
286 mpps_rx = diff_ns > 0 ?
287 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
288 mpps_tx = diff_ns > 0 ?
289 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
291 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
292 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
293 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
294 (stats.obytes - prev_bytes_tx[port_id]) : 0;
295 prev_bytes_rx[port_id] = stats.ibytes;
296 prev_bytes_tx[port_id] = stats.obytes;
297 mbps_rx = diff_ns > 0 ?
298 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
299 mbps_tx = diff_ns > 0 ?
300 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
302 printf("\n Throughput (since last show)\n");
303 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
304 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
305 mpps_tx, mbps_tx * 8);
307 if (xstats_display_num > 0)
308 nic_xstats_display_periodic(port_id);
310 printf(" %s############################%s\n",
311 nic_stats_border, nic_stats_border);
315 nic_stats_clear(portid_t port_id)
319 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
324 ret = rte_eth_stats_reset(port_id);
327 "%s: Error: failed to reset stats (port %u): %s",
328 __func__, port_id, strerror(-ret));
332 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
337 "%s: Error: failed to get stats (port %u): %s",
338 __func__, port_id, strerror(ret));
341 printf("\n NIC statistics for port %d cleared\n", port_id);
345 nic_xstats_display(portid_t port_id)
347 struct rte_eth_xstat *xstats;
348 int cnt_xstats, idx_xstat;
349 struct rte_eth_xstat_name *xstats_names;
351 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
355 printf("###### NIC extended statistics for port %-2d\n", port_id);
356 if (!rte_eth_dev_is_valid_port(port_id)) {
357 fprintf(stderr, "Error: Invalid port number %i\n", port_id);
362 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
363 if (cnt_xstats < 0) {
364 fprintf(stderr, "Error: Cannot get count of xstats\n");
368 /* Get id-name lookup table */
369 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
370 if (xstats_names == NULL) {
371 fprintf(stderr, "Cannot allocate memory for xstats lookup\n");
374 if (cnt_xstats != rte_eth_xstats_get_names(
375 port_id, xstats_names, cnt_xstats)) {
376 fprintf(stderr, "Error: Cannot get xstats lookup\n");
381 /* Get stats themselves */
382 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
383 if (xstats == NULL) {
384 fprintf(stderr, "Cannot allocate memory for xstats\n");
388 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
389 fprintf(stderr, "Error: Unable to get xstats\n");
396 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
397 if (xstats_hide_zero && !xstats[idx_xstat].value)
399 printf("%s: %"PRIu64"\n",
400 xstats_names[idx_xstat].name,
401 xstats[idx_xstat].value);
408 nic_xstats_clear(portid_t port_id)
412 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
417 ret = rte_eth_xstats_reset(port_id);
420 "%s: Error: failed to reset xstats (port %u): %s\n",
421 __func__, port_id, strerror(-ret));
425 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
429 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s",
430 __func__, port_id, strerror(ret));
436 get_queue_state_name(uint8_t queue_state)
438 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED)
440 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED)
442 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN)
449 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
451 struct rte_eth_burst_mode mode;
452 struct rte_eth_rxq_info qinfo;
454 static const char *info_border = "*********************";
456 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
459 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n",
460 port_id, queue_id, strerror(-rc), rc);
464 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
465 info_border, port_id, queue_id, info_border);
467 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
468 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
469 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
470 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
471 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
472 printf("\nRX drop packets: %s",
473 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
474 printf("\nRX deferred start: %s",
475 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
476 printf("\nRX scattered packets: %s",
477 (qinfo.scattered_rx != 0) ? "on" : "off");
478 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state));
479 if (qinfo.rx_buf_size != 0)
480 printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
481 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
483 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
484 printf("\nBurst mode: %s%s",
486 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
487 " (per queue)" : "");
493 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
495 struct rte_eth_burst_mode mode;
496 struct rte_eth_txq_info qinfo;
498 static const char *info_border = "*********************";
500 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
503 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n",
504 port_id, queue_id, strerror(-rc), rc);
508 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
509 info_border, port_id, queue_id, info_border);
511 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
512 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
513 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
514 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
515 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
516 printf("\nTX deferred start: %s",
517 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
518 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
519 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state));
521 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
522 printf("\nBurst mode: %s%s",
524 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
525 " (per queue)" : "");
530 static int bus_match_all(const struct rte_bus *bus, const void *data)
538 device_infos_display_speeds(uint32_t speed_capa)
540 printf("\n\tDevice speed capability:");
541 if (speed_capa == ETH_LINK_SPEED_AUTONEG)
542 printf(" Autonegotiate (all speeds)");
543 if (speed_capa & ETH_LINK_SPEED_FIXED)
544 printf(" Disable autonegotiate (fixed speed) ");
545 if (speed_capa & ETH_LINK_SPEED_10M_HD)
546 printf(" 10 Mbps half-duplex ");
547 if (speed_capa & ETH_LINK_SPEED_10M)
548 printf(" 10 Mbps full-duplex ");
549 if (speed_capa & ETH_LINK_SPEED_100M_HD)
550 printf(" 100 Mbps half-duplex ");
551 if (speed_capa & ETH_LINK_SPEED_100M)
552 printf(" 100 Mbps full-duplex ");
553 if (speed_capa & ETH_LINK_SPEED_1G)
555 if (speed_capa & ETH_LINK_SPEED_2_5G)
556 printf(" 2.5 Gbps ");
557 if (speed_capa & ETH_LINK_SPEED_5G)
559 if (speed_capa & ETH_LINK_SPEED_10G)
561 if (speed_capa & ETH_LINK_SPEED_20G)
563 if (speed_capa & ETH_LINK_SPEED_25G)
565 if (speed_capa & ETH_LINK_SPEED_40G)
567 if (speed_capa & ETH_LINK_SPEED_50G)
569 if (speed_capa & ETH_LINK_SPEED_56G)
571 if (speed_capa & ETH_LINK_SPEED_100G)
572 printf(" 100 Gbps ");
573 if (speed_capa & ETH_LINK_SPEED_200G)
574 printf(" 200 Gbps ");
578 device_infos_display(const char *identifier)
580 static const char *info_border = "*********************";
581 struct rte_bus *start = NULL, *next;
582 struct rte_dev_iterator dev_iter;
583 char name[RTE_ETH_NAME_MAX_LEN];
584 struct rte_ether_addr mac_addr;
585 struct rte_device *dev;
586 struct rte_devargs da;
588 struct rte_eth_dev_info dev_info;
591 memset(&da, 0, sizeof(da));
595 if (rte_devargs_parsef(&da, "%s", identifier)) {
596 fprintf(stderr, "cannot parse identifier\n");
601 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
604 if (identifier && da.bus != next)
607 /* Skip buses that don't have iterate method */
608 if (!next->dev_iterate)
611 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
612 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
616 /* Check for matching device if identifier is present */
618 strncmp(da.name, dev->name, strlen(dev->name)))
620 printf("\n%s Infos for device %s %s\n",
621 info_border, dev->name, info_border);
622 printf("Bus name: %s", dev->bus->name);
623 printf("\nDriver name: %s", dev->driver->name);
624 printf("\nDevargs: %s",
625 dev->devargs ? dev->devargs->args : "");
626 printf("\nConnect to socket: %d", dev->numa_node);
629 /* List ports with matching device name */
630 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
631 printf("\n\tPort id: %-2d", port_id);
632 if (eth_macaddr_get_print_err(port_id,
634 print_ethaddr("\n\tMAC address: ",
636 rte_eth_dev_get_name_by_port(port_id, name);
637 printf("\n\tDevice name: %s", name);
638 if (rte_eth_dev_info_get(port_id, &dev_info) == 0)
639 device_infos_display_speeds(dev_info.speed_capa);
644 rte_devargs_reset(&da);
648 port_infos_display(portid_t port_id)
650 struct rte_port *port;
651 struct rte_ether_addr mac_addr;
652 struct rte_eth_link link;
653 struct rte_eth_dev_info dev_info;
655 struct rte_mempool * mp;
656 static const char *info_border = "*********************";
658 char name[RTE_ETH_NAME_MAX_LEN];
660 char fw_version[ETHDEV_FWVERS_LEN];
662 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
666 port = &ports[port_id];
667 ret = eth_link_get_nowait_print_err(port_id, &link);
671 ret = eth_dev_info_get_print_err(port_id, &dev_info);
675 printf("\n%s Infos for port %-2d %s\n",
676 info_border, port_id, info_border);
677 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
678 print_ethaddr("MAC address: ", &mac_addr);
679 rte_eth_dev_get_name_by_port(port_id, name);
680 printf("\nDevice name: %s", name);
681 printf("\nDriver name: %s", dev_info.driver_name);
683 if (rte_eth_dev_fw_version_get(port_id, fw_version,
684 ETHDEV_FWVERS_LEN) == 0)
685 printf("\nFirmware-version: %s", fw_version);
687 printf("\nFirmware-version: %s", "not available");
689 if (dev_info.device->devargs && dev_info.device->devargs->args)
690 printf("\nDevargs: %s", dev_info.device->devargs->args);
691 printf("\nConnect to socket: %u", port->socket_id);
693 if (port_numa[port_id] != NUMA_NO_CONFIG) {
694 mp = mbuf_pool_find(port_numa[port_id], 0);
696 printf("\nmemory allocation on the socket: %d",
699 printf("\nmemory allocation on the socket: %u",port->socket_id);
701 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
702 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
703 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
704 ("full-duplex") : ("half-duplex"));
705 printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
708 if (!rte_eth_dev_get_mtu(port_id, &mtu))
709 printf("MTU: %u\n", mtu);
711 printf("Promiscuous mode: %s\n",
712 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
713 printf("Allmulticast mode: %s\n",
714 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
715 printf("Maximum number of MAC addresses: %u\n",
716 (unsigned int)(port->dev_info.max_mac_addrs));
717 printf("Maximum number of MAC addresses of hash filtering: %u\n",
718 (unsigned int)(port->dev_info.max_hash_mac_addrs));
720 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
721 if (vlan_offload >= 0){
722 printf("VLAN offload: \n");
723 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
724 printf(" strip on, ");
726 printf(" strip off, ");
728 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
729 printf("filter on, ");
731 printf("filter off, ");
733 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
734 printf("extend on, ");
736 printf("extend off, ");
738 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
739 printf("qinq strip on\n");
741 printf("qinq strip off\n");
744 if (dev_info.hash_key_size > 0)
745 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
746 if (dev_info.reta_size > 0)
747 printf("Redirection table size: %u\n", dev_info.reta_size);
748 if (!dev_info.flow_type_rss_offloads)
749 printf("No RSS offload flow type is supported.\n");
754 printf("Supported RSS offload flow types:\n");
755 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
756 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
757 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
759 p = flowtype_to_str(i);
763 printf(" user defined %d\n", i);
767 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
768 printf("Maximum configurable length of RX packet: %u\n",
769 dev_info.max_rx_pktlen);
770 printf("Maximum configurable size of LRO aggregated packet: %u\n",
771 dev_info.max_lro_pkt_size);
772 if (dev_info.max_vfs)
773 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
774 if (dev_info.max_vmdq_pools)
775 printf("Maximum number of VMDq pools: %u\n",
776 dev_info.max_vmdq_pools);
778 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
779 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
780 printf("Max possible number of RXDs per queue: %hu\n",
781 dev_info.rx_desc_lim.nb_max);
782 printf("Min possible number of RXDs per queue: %hu\n",
783 dev_info.rx_desc_lim.nb_min);
784 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
786 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
787 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
788 printf("Max possible number of TXDs per queue: %hu\n",
789 dev_info.tx_desc_lim.nb_max);
790 printf("Min possible number of TXDs per queue: %hu\n",
791 dev_info.tx_desc_lim.nb_min);
792 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
793 printf("Max segment number per packet: %hu\n",
794 dev_info.tx_desc_lim.nb_seg_max);
795 printf("Max segment number per MTU/TSO: %hu\n",
796 dev_info.tx_desc_lim.nb_mtu_seg_max);
798 /* Show switch info only if valid switch domain and port id is set */
799 if (dev_info.switch_info.domain_id !=
800 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
801 if (dev_info.switch_info.name)
802 printf("Switch name: %s\n", dev_info.switch_info.name);
804 printf("Switch domain Id: %u\n",
805 dev_info.switch_info.domain_id);
806 printf("Switch Port Id: %u\n",
807 dev_info.switch_info.port_id);
812 port_summary_header_display(void)
814 uint16_t port_number;
816 port_number = rte_eth_dev_count_avail();
817 printf("Number of available ports: %i\n", port_number);
818 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
819 "Driver", "Status", "Link");
823 port_summary_display(portid_t port_id)
825 struct rte_ether_addr mac_addr;
826 struct rte_eth_link link;
827 struct rte_eth_dev_info dev_info;
828 char name[RTE_ETH_NAME_MAX_LEN];
831 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
836 ret = eth_link_get_nowait_print_err(port_id, &link);
840 ret = eth_dev_info_get_print_err(port_id, &dev_info);
844 rte_eth_dev_get_name_by_port(port_id, name);
845 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
849 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n",
850 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name,
851 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
852 rte_eth_link_speed_to_str(link.link_speed));
856 port_eeprom_display(portid_t port_id)
858 struct rte_dev_eeprom_info einfo;
860 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
865 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
866 if (len_eeprom < 0) {
867 switch (len_eeprom) {
869 fprintf(stderr, "port index %d invalid\n", port_id);
872 fprintf(stderr, "operation not supported by device\n");
875 fprintf(stderr, "device is removed\n");
878 fprintf(stderr, "Unable to get EEPROM: %d\n",
885 char buf[len_eeprom];
887 einfo.length = len_eeprom;
890 ret = rte_eth_dev_get_eeprom(port_id, &einfo);
894 fprintf(stderr, "port index %d invalid\n", port_id);
897 fprintf(stderr, "operation not supported by device\n");
900 fprintf(stderr, "device is removed\n");
903 fprintf(stderr, "Unable to get EEPROM: %d\n", ret);
908 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
909 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
913 port_module_eeprom_display(portid_t port_id)
915 struct rte_eth_dev_module_info minfo;
916 struct rte_dev_eeprom_info einfo;
919 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
925 ret = rte_eth_dev_get_module_info(port_id, &minfo);
929 fprintf(stderr, "port index %d invalid\n", port_id);
932 fprintf(stderr, "operation not supported by device\n");
935 fprintf(stderr, "device is removed\n");
938 fprintf(stderr, "Unable to get module EEPROM: %d\n",
945 char buf[minfo.eeprom_len];
947 einfo.length = minfo.eeprom_len;
950 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
954 fprintf(stderr, "port index %d invalid\n", port_id);
957 fprintf(stderr, "operation not supported by device\n");
960 fprintf(stderr, "device is removed\n");
963 fprintf(stderr, "Unable to get module EEPROM: %d\n",
970 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
971 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
975 port_id_is_invalid(portid_t port_id, enum print_warning warning)
979 if (port_id == (portid_t)RTE_PORT_ALL)
982 RTE_ETH_FOREACH_DEV(pid)
986 if (warning == ENABLED_WARN)
987 fprintf(stderr, "Invalid port %d\n", port_id);
992 void print_valid_ports(void)
996 printf("The valid ports array is [");
997 RTE_ETH_FOREACH_DEV(pid) {
1004 vlan_id_is_invalid(uint16_t vlan_id)
1008 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1013 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1015 const struct rte_pci_device *pci_dev;
1016 const struct rte_bus *bus;
1019 if (reg_off & 0x3) {
1021 "Port register offset 0x%X not aligned on a 4-byte boundary\n",
1022 (unsigned int)reg_off);
1026 if (!ports[port_id].dev_info.device) {
1027 fprintf(stderr, "Invalid device\n");
1031 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1032 if (bus && !strcmp(bus->name, "pci")) {
1033 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1035 fprintf(stderr, "Not a PCI device\n");
1039 pci_len = pci_dev->mem_resource[0].len;
1040 if (reg_off >= pci_len) {
1042 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n",
1043 port_id, (unsigned int)reg_off, (unsigned int)reg_off,
1051 reg_bit_pos_is_invalid(uint8_t bit_pos)
1055 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos);
1059 #define display_port_and_reg_off(port_id, reg_off) \
1060 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1063 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1065 display_port_and_reg_off(port_id, (unsigned)reg_off);
1066 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1070 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1075 if (port_id_is_invalid(port_id, ENABLED_WARN))
1077 if (port_reg_off_is_invalid(port_id, reg_off))
1079 if (reg_bit_pos_is_invalid(bit_x))
1081 reg_v = port_id_pci_reg_read(port_id, reg_off);
1082 display_port_and_reg_off(port_id, (unsigned)reg_off);
1083 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1087 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1088 uint8_t bit1_pos, uint8_t bit2_pos)
1094 if (port_id_is_invalid(port_id, ENABLED_WARN))
1096 if (port_reg_off_is_invalid(port_id, reg_off))
1098 if (reg_bit_pos_is_invalid(bit1_pos))
1100 if (reg_bit_pos_is_invalid(bit2_pos))
1102 if (bit1_pos > bit2_pos)
1103 l_bit = bit2_pos, h_bit = bit1_pos;
1105 l_bit = bit1_pos, h_bit = bit2_pos;
1107 reg_v = port_id_pci_reg_read(port_id, reg_off);
1110 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1111 display_port_and_reg_off(port_id, (unsigned)reg_off);
1112 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1113 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1117 port_reg_display(portid_t port_id, uint32_t reg_off)
1121 if (port_id_is_invalid(port_id, ENABLED_WARN))
1123 if (port_reg_off_is_invalid(port_id, reg_off))
1125 reg_v = port_id_pci_reg_read(port_id, reg_off);
1126 display_port_reg_value(port_id, reg_off, reg_v);
1130 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1135 if (port_id_is_invalid(port_id, ENABLED_WARN))
1137 if (port_reg_off_is_invalid(port_id, reg_off))
1139 if (reg_bit_pos_is_invalid(bit_pos))
1142 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n",
1146 reg_v = port_id_pci_reg_read(port_id, reg_off);
1148 reg_v &= ~(1 << bit_pos);
1150 reg_v |= (1 << bit_pos);
1151 port_id_pci_reg_write(port_id, reg_off, reg_v);
1152 display_port_reg_value(port_id, reg_off, reg_v);
1156 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1157 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1164 if (port_id_is_invalid(port_id, ENABLED_WARN))
1166 if (port_reg_off_is_invalid(port_id, reg_off))
1168 if (reg_bit_pos_is_invalid(bit1_pos))
1170 if (reg_bit_pos_is_invalid(bit2_pos))
1172 if (bit1_pos > bit2_pos)
1173 l_bit = bit2_pos, h_bit = bit1_pos;
1175 l_bit = bit1_pos, h_bit = bit2_pos;
1177 if ((h_bit - l_bit) < 31)
1178 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1182 if (value > max_v) {
1183 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n",
1184 (unsigned)value, (unsigned)value,
1185 (unsigned)max_v, (unsigned)max_v);
1188 reg_v = port_id_pci_reg_read(port_id, reg_off);
1189 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1190 reg_v |= (value << l_bit); /* Set changed bits */
1191 port_id_pci_reg_write(port_id, reg_off, reg_v);
1192 display_port_reg_value(port_id, reg_off, reg_v);
1196 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1198 if (port_id_is_invalid(port_id, ENABLED_WARN))
1200 if (port_reg_off_is_invalid(port_id, reg_off))
1202 port_id_pci_reg_write(port_id, reg_off, reg_v);
1203 display_port_reg_value(port_id, reg_off, reg_v);
1207 port_mtu_set(portid_t port_id, uint16_t mtu)
1210 struct rte_port *rte_port = &ports[port_id];
1211 struct rte_eth_dev_info dev_info;
1214 if (port_id_is_invalid(port_id, ENABLED_WARN))
1217 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1221 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1223 "Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1224 mtu, dev_info.min_mtu, dev_info.max_mtu);
1227 diag = rte_eth_dev_set_mtu(port_id, mtu);
1229 fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
1233 rte_port->dev_conf.rxmode.mtu = mtu;
1235 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1236 if (mtu > RTE_ETHER_MTU)
1237 rte_port->dev_conf.rxmode.offloads |=
1238 DEV_RX_OFFLOAD_JUMBO_FRAME;
1240 rte_port->dev_conf.rxmode.offloads &=
1241 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1245 /* Generic flow management functions. */
1247 static struct port_flow_tunnel *
1248 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id)
1250 struct port_flow_tunnel *flow_tunnel;
1252 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1253 if (flow_tunnel->id == port_tunnel_id)
1263 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel)
1266 switch (tunnel->type) {
1270 case RTE_FLOW_ITEM_TYPE_VXLAN:
1278 struct port_flow_tunnel *
1279 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun)
1281 struct rte_port *port = &ports[port_id];
1282 struct port_flow_tunnel *flow_tunnel;
1284 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1285 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun)))
1294 void port_flow_tunnel_list(portid_t port_id)
1296 struct rte_port *port = &ports[port_id];
1297 struct port_flow_tunnel *flt;
1299 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1300 printf("port %u tunnel #%u type=%s",
1301 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel));
1302 if (flt->tunnel.tun_id)
1303 printf(" id=%" PRIu64, flt->tunnel.tun_id);
1308 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id)
1310 struct rte_port *port = &ports[port_id];
1311 struct port_flow_tunnel *flt;
1313 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1314 if (flt->id == tunnel_id)
1318 LIST_REMOVE(flt, chain);
1320 printf("port %u: flow tunnel #%u destroyed\n",
1321 port_id, tunnel_id);
1325 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops)
1327 struct rte_port *port = &ports[port_id];
1328 enum rte_flow_item_type type;
1329 struct port_flow_tunnel *flt;
1331 if (!strcmp(ops->type, "vxlan"))
1332 type = RTE_FLOW_ITEM_TYPE_VXLAN;
1334 fprintf(stderr, "cannot offload \"%s\" tunnel type\n",
1338 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1339 if (flt->tunnel.type == type)
1343 flt = calloc(1, sizeof(*flt));
1345 fprintf(stderr, "failed to allocate port flt object\n");
1348 flt->tunnel.type = type;
1349 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 :
1350 LIST_FIRST(&port->flow_tunnel_list)->id + 1;
1351 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain);
1353 printf("port %d: flow tunnel #%u type %s\n",
1354 port_id, flt->id, ops->type);
1357 /** Generate a port_flow entry from attributes/pattern/actions. */
1358 static struct port_flow *
1359 port_flow_new(const struct rte_flow_attr *attr,
1360 const struct rte_flow_item *pattern,
1361 const struct rte_flow_action *actions,
1362 struct rte_flow_error *error)
1364 const struct rte_flow_conv_rule rule = {
1366 .pattern_ro = pattern,
1367 .actions_ro = actions,
1369 struct port_flow *pf;
1372 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1375 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1378 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1382 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1389 /** Print a message out of a flow error. */
1391 port_flow_complain(struct rte_flow_error *error)
1393 static const char *const errstrlist[] = {
1394 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1395 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1396 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1397 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1398 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1399 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1400 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1401 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1402 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1403 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1404 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1405 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1406 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1407 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1408 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1409 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1410 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1414 int err = rte_errno;
1416 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1417 !errstrlist[error->type])
1418 errstr = "unknown type";
1420 errstr = errstrlist[error->type];
1421 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n",
1422 __func__, error->type, errstr,
1423 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1424 error->cause), buf) : "",
1425 error->message ? error->message : "(no stated reason)",
1431 rss_config_display(struct rte_flow_action_rss *rss_conf)
1435 if (rss_conf == NULL) {
1436 fprintf(stderr, "Invalid rule\n");
1442 if (rss_conf->queue_num == 0)
1444 for (i = 0; i < rss_conf->queue_num; i++)
1445 printf(" %d", rss_conf->queue[i]);
1448 printf(" function: ");
1449 switch (rss_conf->func) {
1450 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1451 printf("default\n");
1453 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1454 printf("toeplitz\n");
1456 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1457 printf("simple_xor\n");
1459 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1460 printf("symmetric_toeplitz\n");
1463 printf("Unknown function\n");
1467 printf(" types:\n");
1468 if (rss_conf->types == 0) {
1472 for (i = 0; rss_type_table[i].str; i++) {
1473 if ((rss_conf->types &
1474 rss_type_table[i].rss_type) ==
1475 rss_type_table[i].rss_type &&
1476 rss_type_table[i].rss_type != 0)
1477 printf(" %s\n", rss_type_table[i].str);
1481 static struct port_indirect_action *
1482 action_get_by_id(portid_t port_id, uint32_t id)
1484 struct rte_port *port;
1485 struct port_indirect_action **ppia;
1486 struct port_indirect_action *pia = NULL;
1488 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1489 port_id == (portid_t)RTE_PORT_ALL)
1491 port = &ports[port_id];
1492 ppia = &port->actions_list;
1494 if ((*ppia)->id == id) {
1498 ppia = &(*ppia)->next;
1502 "Failed to find indirect action #%u on port %u\n",
1508 action_alloc(portid_t port_id, uint32_t id,
1509 struct port_indirect_action **action)
1511 struct rte_port *port;
1512 struct port_indirect_action **ppia;
1513 struct port_indirect_action *pia = NULL;
1516 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1517 port_id == (portid_t)RTE_PORT_ALL)
1519 port = &ports[port_id];
1520 if (id == UINT32_MAX) {
1521 /* taking first available ID */
1522 if (port->actions_list) {
1523 if (port->actions_list->id == UINT32_MAX - 1) {
1525 "Highest indirect action ID is already assigned, delete it first\n");
1528 id = port->actions_list->id + 1;
1533 pia = calloc(1, sizeof(*pia));
1536 "Allocation of port %u indirect action failed\n",
1540 ppia = &port->actions_list;
1541 while (*ppia && (*ppia)->id > id)
1542 ppia = &(*ppia)->next;
1543 if (*ppia && (*ppia)->id == id) {
1545 "Indirect action #%u is already assigned, delete it first\n",
1557 /** Create indirect action */
1559 port_action_handle_create(portid_t port_id, uint32_t id,
1560 const struct rte_flow_indir_action_conf *conf,
1561 const struct rte_flow_action *action)
1563 struct port_indirect_action *pia;
1565 struct rte_flow_error error;
1566 struct rte_port *port;
1568 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1569 port_id == (portid_t)RTE_PORT_ALL)
1572 ret = action_alloc(port_id, id, &pia);
1576 port = &ports[port_id];
1579 port_id = port->flow_transfer_proxy;
1581 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1582 port_id == (portid_t)RTE_PORT_ALL)
1585 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
1586 struct rte_flow_action_age *age =
1587 (struct rte_flow_action_age *)(uintptr_t)(action->conf);
1589 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
1590 age->context = &pia->age_type;
1591 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) {
1592 struct rte_flow_action_conntrack *ct =
1593 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf);
1595 memcpy(ct, &conntrack_context, sizeof(*ct));
1597 /* Poisoning to make sure PMDs update it in case of error. */
1598 memset(&error, 0x22, sizeof(error));
1599 pia->handle = rte_flow_action_handle_create(port_id, conf, action,
1602 uint32_t destroy_id = pia->id;
1603 port_action_handle_destroy(port_id, 1, &destroy_id);
1604 return port_flow_complain(&error);
1606 pia->type = action->type;
1607 pia->transfer = conf->transfer;
1608 printf("Indirect action #%u created\n", pia->id);
1612 /** Destroy indirect action */
1614 port_action_handle_destroy(portid_t port_id,
1616 const uint32_t *actions)
1618 struct rte_port *port;
1619 struct port_indirect_action **tmp;
1623 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1624 port_id == (portid_t)RTE_PORT_ALL)
1626 port = &ports[port_id];
1627 tmp = &port->actions_list;
1631 for (i = 0; i != n; ++i) {
1632 struct rte_flow_error error;
1633 struct port_indirect_action *pia = *tmp;
1634 portid_t port_id_eff = port_id;
1636 if (actions[i] != pia->id)
1640 port_id_eff = port->flow_transfer_proxy;
1642 if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
1643 port_id_eff == (portid_t)RTE_PORT_ALL)
1647 * Poisoning to make sure PMDs update it in case
1650 memset(&error, 0x33, sizeof(error));
1652 if (pia->handle && rte_flow_action_handle_destroy(
1653 port_id_eff, pia->handle, &error)) {
1654 ret = port_flow_complain(&error);
1658 printf("Indirect action #%u destroyed\n", pia->id);
1663 tmp = &(*tmp)->next;
1670 /** Get indirect action by port + id */
1671 struct rte_flow_action_handle *
1672 port_action_handle_get_by_id(portid_t port_id, uint32_t id)
1675 struct port_indirect_action *pia = action_get_by_id(port_id, id);
1677 return (pia) ? pia->handle : NULL;
1680 /** Update indirect action */
1682 port_action_handle_update(portid_t port_id, uint32_t id,
1683 const struct rte_flow_action *action)
1685 struct rte_flow_error error;
1686 struct rte_flow_action_handle *action_handle;
1687 struct port_indirect_action *pia;
1688 struct rte_port *port;
1691 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1692 port_id == (portid_t)RTE_PORT_ALL)
1695 port = &ports[port_id];
1697 action_handle = port_action_handle_get_by_id(port_id, id);
1700 pia = action_get_by_id(port_id, id);
1703 switch (pia->type) {
1704 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1705 update = action->conf;
1713 port_id = port->flow_transfer_proxy;
1715 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1716 port_id == (portid_t)RTE_PORT_ALL)
1719 if (rte_flow_action_handle_update(port_id, action_handle, update,
1721 return port_flow_complain(&error);
1723 printf("Indirect action #%u updated\n", id);
1728 port_action_handle_query(portid_t port_id, uint32_t id)
1730 struct rte_flow_error error;
1731 struct port_indirect_action *pia;
1733 struct rte_flow_query_count count;
1734 struct rte_flow_query_age age;
1735 struct rte_flow_action_conntrack ct;
1737 portid_t port_id_eff = port_id;
1738 struct rte_port *port;
1740 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1741 port_id == (portid_t)RTE_PORT_ALL)
1744 port = &ports[port_id];
1746 pia = action_get_by_id(port_id, id);
1749 switch (pia->type) {
1750 case RTE_FLOW_ACTION_TYPE_AGE:
1751 case RTE_FLOW_ACTION_TYPE_COUNT:
1755 "Indirect action %u (type: %d) on port %u doesn't support query\n",
1756 id, pia->type, port_id);
1761 port_id_eff = port->flow_transfer_proxy;
1763 if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
1764 port_id_eff == (portid_t)RTE_PORT_ALL)
1767 /* Poisoning to make sure PMDs update it in case of error. */
1768 memset(&error, 0x55, sizeof(error));
1769 memset(&query, 0, sizeof(query));
1770 if (rte_flow_action_handle_query(port_id_eff, pia->handle, &query,
1772 return port_flow_complain(&error);
1773 switch (pia->type) {
1774 case RTE_FLOW_ACTION_TYPE_AGE:
1775 printf("Indirect AGE action:\n"
1777 " sec_since_last_hit_valid: %u\n"
1778 " sec_since_last_hit: %" PRIu32 "\n",
1780 query.age.sec_since_last_hit_valid,
1781 query.age.sec_since_last_hit);
1783 case RTE_FLOW_ACTION_TYPE_COUNT:
1784 printf("Indirect COUNT action:\n"
1787 " hits: %" PRIu64 "\n"
1788 " bytes: %" PRIu64 "\n",
1789 query.count.hits_set,
1790 query.count.bytes_set,
1794 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1795 printf("Conntrack Context:\n"
1796 " Peer: %u, Flow dir: %s, Enable: %u\n"
1797 " Live: %u, SACK: %u, CACK: %u\n"
1798 " Packet dir: %s, Liberal: %u, State: %u\n"
1799 " Factor: %u, Retrans: %u, TCP flags: %u\n"
1800 " Last Seq: %u, Last ACK: %u\n"
1801 " Last Win: %u, Last End: %u\n",
1803 query.ct.is_original_dir ? "Original" : "Reply",
1804 query.ct.enable, query.ct.live_connection,
1805 query.ct.selective_ack, query.ct.challenge_ack_passed,
1806 query.ct.last_direction ? "Original" : "Reply",
1807 query.ct.liberal_mode, query.ct.state,
1808 query.ct.max_ack_window, query.ct.retransmission_limit,
1809 query.ct.last_index, query.ct.last_seq,
1810 query.ct.last_ack, query.ct.last_window,
1812 printf(" Original Dir:\n"
1813 " scale: %u, fin: %u, ack seen: %u\n"
1814 " unacked data: %u\n Sent end: %u,"
1815 " Reply end: %u, Max win: %u, Max ACK: %u\n",
1816 query.ct.original_dir.scale,
1817 query.ct.original_dir.close_initiated,
1818 query.ct.original_dir.last_ack_seen,
1819 query.ct.original_dir.data_unacked,
1820 query.ct.original_dir.sent_end,
1821 query.ct.original_dir.reply_end,
1822 query.ct.original_dir.max_win,
1823 query.ct.original_dir.max_ack);
1824 printf(" Reply Dir:\n"
1825 " scale: %u, fin: %u, ack seen: %u\n"
1826 " unacked data: %u\n Sent end: %u,"
1827 " Reply end: %u, Max win: %u, Max ACK: %u\n",
1828 query.ct.reply_dir.scale,
1829 query.ct.reply_dir.close_initiated,
1830 query.ct.reply_dir.last_ack_seen,
1831 query.ct.reply_dir.data_unacked,
1832 query.ct.reply_dir.sent_end,
1833 query.ct.reply_dir.reply_end,
1834 query.ct.reply_dir.max_win,
1835 query.ct.reply_dir.max_ack);
1839 "Indirect action %u (type: %d) on port %u doesn't support query\n",
1840 id, pia->type, port_id);
1846 static struct port_flow_tunnel *
1847 port_flow_tunnel_offload_cmd_prep(portid_t port_id,
1848 const struct rte_flow_item *pattern,
1849 const struct rte_flow_action *actions,
1850 const struct tunnel_ops *tunnel_ops)
1853 struct rte_port *port;
1854 struct port_flow_tunnel *pft;
1855 struct rte_flow_error error;
1857 port = &ports[port_id];
1858 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id);
1860 fprintf(stderr, "failed to locate port flow tunnel #%u\n",
1864 if (tunnel_ops->actions) {
1865 uint32_t num_actions;
1866 const struct rte_flow_action *aptr;
1868 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel,
1870 &pft->num_pmd_actions,
1873 port_flow_complain(&error);
1876 for (aptr = actions, num_actions = 1;
1877 aptr->type != RTE_FLOW_ACTION_TYPE_END;
1878 aptr++, num_actions++);
1879 pft->actions = malloc(
1880 (num_actions + pft->num_pmd_actions) *
1881 sizeof(actions[0]));
1882 if (!pft->actions) {
1883 rte_flow_tunnel_action_decap_release(
1884 port_id, pft->actions,
1885 pft->num_pmd_actions, &error);
1888 rte_memcpy(pft->actions, pft->pmd_actions,
1889 pft->num_pmd_actions * sizeof(actions[0]));
1890 rte_memcpy(pft->actions + pft->num_pmd_actions, actions,
1891 num_actions * sizeof(actions[0]));
1893 if (tunnel_ops->items) {
1895 const struct rte_flow_item *iptr;
1897 ret = rte_flow_tunnel_match(port_id, &pft->tunnel,
1899 &pft->num_pmd_items,
1902 port_flow_complain(&error);
1905 for (iptr = pattern, num_items = 1;
1906 iptr->type != RTE_FLOW_ITEM_TYPE_END;
1907 iptr++, num_items++);
1908 pft->items = malloc((num_items + pft->num_pmd_items) *
1909 sizeof(pattern[0]));
1911 rte_flow_tunnel_item_release(
1912 port_id, pft->pmd_items,
1913 pft->num_pmd_items, &error);
1916 rte_memcpy(pft->items, pft->pmd_items,
1917 pft->num_pmd_items * sizeof(pattern[0]));
1918 rte_memcpy(pft->items + pft->num_pmd_items, pattern,
1919 num_items * sizeof(pattern[0]));
1926 port_flow_tunnel_offload_cmd_release(portid_t port_id,
1927 const struct tunnel_ops *tunnel_ops,
1928 struct port_flow_tunnel *pft)
1930 struct rte_flow_error error;
1932 if (tunnel_ops->actions) {
1934 rte_flow_tunnel_action_decap_release(
1935 port_id, pft->pmd_actions,
1936 pft->num_pmd_actions, &error);
1937 pft->actions = NULL;
1938 pft->pmd_actions = NULL;
1940 if (tunnel_ops->items) {
1942 rte_flow_tunnel_item_release(port_id, pft->pmd_items,
1946 pft->pmd_items = NULL;
1950 /** Add port meter policy */
1952 port_meter_policy_add(portid_t port_id, uint32_t policy_id,
1953 const struct rte_flow_action *actions)
1955 struct rte_mtr_error error;
1956 const struct rte_flow_action *act = actions;
1957 const struct rte_flow_action *start;
1958 struct rte_mtr_meter_policy_params policy;
1959 uint32_t i = 0, act_n;
1962 for (i = 0; i < RTE_COLORS; i++) {
1963 for (act_n = 0, start = act;
1964 act->type != RTE_FLOW_ACTION_TYPE_END; act++)
1966 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END)
1967 policy.actions[i] = start;
1969 policy.actions[i] = NULL;
1972 ret = rte_mtr_meter_policy_add(port_id,
1976 print_mtr_err_msg(&error);
1980 /** Validate flow rule. */
1982 port_flow_validate(portid_t port_id,
1983 const struct rte_flow_attr *attr,
1984 const struct rte_flow_item *pattern,
1985 const struct rte_flow_action *actions,
1986 const struct tunnel_ops *tunnel_ops)
1988 struct rte_flow_error error;
1989 struct port_flow_tunnel *pft = NULL;
1990 struct rte_port *port;
1992 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1993 port_id == (portid_t)RTE_PORT_ALL)
1996 port = &ports[port_id];
1999 port_id = port->flow_transfer_proxy;
2001 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2002 port_id == (portid_t)RTE_PORT_ALL)
2005 /* Poisoning to make sure PMDs update it in case of error. */
2006 memset(&error, 0x11, sizeof(error));
2007 if (tunnel_ops->enabled) {
2008 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2009 actions, tunnel_ops);
2013 pattern = pft->items;
2015 actions = pft->actions;
2017 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
2018 return port_flow_complain(&error);
2019 if (tunnel_ops->enabled)
2020 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2021 printf("Flow rule validated\n");
2025 /** Return age action structure if exists, otherwise NULL. */
2026 static struct rte_flow_action_age *
2027 age_action_get(const struct rte_flow_action *actions)
2029 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2030 switch (actions->type) {
2031 case RTE_FLOW_ACTION_TYPE_AGE:
2032 return (struct rte_flow_action_age *)
2033 (uintptr_t)actions->conf;
2041 /** Create flow rule. */
2043 port_flow_create(portid_t port_id,
2044 const struct rte_flow_attr *attr,
2045 const struct rte_flow_item *pattern,
2046 const struct rte_flow_action *actions,
2047 const struct tunnel_ops *tunnel_ops)
2049 struct rte_flow *flow;
2050 struct rte_port *port;
2051 struct port_flow *pf;
2053 struct rte_flow_error error;
2054 struct port_flow_tunnel *pft = NULL;
2055 struct rte_flow_action_age *age = age_action_get(actions);
2057 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2058 port_id == (portid_t)RTE_PORT_ALL)
2061 port = &ports[port_id];
2064 port_id = port->flow_transfer_proxy;
2066 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2067 port_id == (portid_t)RTE_PORT_ALL)
2070 if (port->flow_list) {
2071 if (port->flow_list->id == UINT32_MAX) {
2073 "Highest rule ID is already assigned, delete it first");
2076 id = port->flow_list->id + 1;
2078 if (tunnel_ops->enabled) {
2079 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2080 actions, tunnel_ops);
2084 pattern = pft->items;
2086 actions = pft->actions;
2088 pf = port_flow_new(attr, pattern, actions, &error);
2090 return port_flow_complain(&error);
2092 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
2093 age->context = &pf->age_type;
2095 /* Poisoning to make sure PMDs update it in case of error. */
2096 memset(&error, 0x22, sizeof(error));
2097 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
2099 if (tunnel_ops->enabled)
2100 port_flow_tunnel_offload_cmd_release(port_id,
2103 return port_flow_complain(&error);
2105 pf->next = port->flow_list;
2108 port->flow_list = pf;
2109 if (tunnel_ops->enabled)
2110 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2111 printf("Flow rule #%u created\n", pf->id);
2115 /** Destroy a number of flow rules. */
2117 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
2119 struct rte_port *port;
2120 struct port_flow **tmp;
2124 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2125 port_id == (portid_t)RTE_PORT_ALL)
2127 port = &ports[port_id];
2128 tmp = &port->flow_list;
2132 for (i = 0; i != n; ++i) {
2133 portid_t port_id_eff = port_id;
2134 struct rte_flow_error error;
2135 struct port_flow *pf = *tmp;
2137 if (rule[i] != pf->id)
2140 * Poisoning to make sure PMDs update it in case
2143 memset(&error, 0x33, sizeof(error));
2145 if (pf->rule.attr->transfer)
2146 port_id_eff = port->flow_transfer_proxy;
2148 if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
2149 port_id_eff == (portid_t)RTE_PORT_ALL)
2152 if (rte_flow_destroy(port_id_eff, pf->flow, &error)) {
2153 ret = port_flow_complain(&error);
2156 printf("Flow rule #%u destroyed\n", pf->id);
2162 tmp = &(*tmp)->next;
2168 /** Remove all flow rules. */
2170 port_flow_flush(portid_t port_id)
2172 struct rte_flow_error error;
2173 struct rte_port *port;
2176 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2177 port_id == (portid_t)RTE_PORT_ALL)
2180 port = &ports[port_id];
2182 if (port->flow_list == NULL)
2185 /* Poisoning to make sure PMDs update it in case of error. */
2186 memset(&error, 0x44, sizeof(error));
2187 if (rte_flow_flush(port_id, &error)) {
2188 port_flow_complain(&error);
2191 while (port->flow_list) {
2192 struct port_flow *pf = port->flow_list->next;
2194 free(port->flow_list);
2195 port->flow_list = pf;
2200 /** Dump flow rules. */
2202 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id,
2203 const char *file_name)
2206 FILE *file = stdout;
2207 struct rte_flow_error error;
2208 struct rte_port *port;
2209 struct port_flow *pflow;
2210 struct rte_flow *tmpFlow = NULL;
2213 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2214 port_id == (portid_t)RTE_PORT_ALL)
2218 port = &ports[port_id];
2219 pflow = port->flow_list;
2221 if (rule_id != pflow->id) {
2222 pflow = pflow->next;
2224 tmpFlow = pflow->flow;
2230 if (found == false) {
2231 fprintf(stderr, "Failed to dump to flow %d\n", rule_id);
2236 if (file_name && strlen(file_name)) {
2237 file = fopen(file_name, "w");
2239 fprintf(stderr, "Failed to create file %s: %s\n",
2240 file_name, strerror(errno));
2246 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error);
2248 ret = rte_flow_dev_dump(port_id, NULL, file, &error);
2250 port_flow_complain(&error);
2251 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret));
2253 printf("Flow dump finished\n");
2254 if (file_name && strlen(file_name))
2259 /** Query a flow rule. */
2261 port_flow_query(portid_t port_id, uint32_t rule,
2262 const struct rte_flow_action *action)
2264 struct rte_flow_error error;
2265 struct rte_port *port;
2266 struct port_flow *pf;
2269 struct rte_flow_query_count count;
2270 struct rte_flow_action_rss rss_conf;
2271 struct rte_flow_query_age age;
2275 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2276 port_id == (portid_t)RTE_PORT_ALL)
2278 port = &ports[port_id];
2279 for (pf = port->flow_list; pf; pf = pf->next)
2283 fprintf(stderr, "Flow rule #%u not found\n", rule);
2287 if (pf->rule.attr->transfer)
2288 port_id = port->flow_transfer_proxy;
2290 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2291 port_id == (portid_t)RTE_PORT_ALL)
2294 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2295 &name, sizeof(name),
2296 (void *)(uintptr_t)action->type, &error);
2298 return port_flow_complain(&error);
2299 switch (action->type) {
2300 case RTE_FLOW_ACTION_TYPE_COUNT:
2301 case RTE_FLOW_ACTION_TYPE_RSS:
2302 case RTE_FLOW_ACTION_TYPE_AGE:
2305 fprintf(stderr, "Cannot query action type %d (%s)\n",
2306 action->type, name);
2309 /* Poisoning to make sure PMDs update it in case of error. */
2310 memset(&error, 0x55, sizeof(error));
2311 memset(&query, 0, sizeof(query));
2312 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
2313 return port_flow_complain(&error);
2314 switch (action->type) {
2315 case RTE_FLOW_ACTION_TYPE_COUNT:
2319 " hits: %" PRIu64 "\n"
2320 " bytes: %" PRIu64 "\n",
2322 query.count.hits_set,
2323 query.count.bytes_set,
2327 case RTE_FLOW_ACTION_TYPE_RSS:
2328 rss_config_display(&query.rss_conf);
2330 case RTE_FLOW_ACTION_TYPE_AGE:
2333 " sec_since_last_hit_valid: %u\n"
2334 " sec_since_last_hit: %" PRIu32 "\n",
2337 query.age.sec_since_last_hit_valid,
2338 query.age.sec_since_last_hit);
2342 "Cannot display result for action type %d (%s)\n",
2343 action->type, name);
2349 /** List simply and destroy all aged flows. */
2351 port_flow_aged(portid_t port_id, uint8_t destroy)
2354 int nb_context, total = 0, idx;
2355 struct rte_flow_error error;
2356 enum age_action_context_type *type;
2358 struct port_flow *pf;
2359 struct port_indirect_action *pia;
2362 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2363 port_id == (portid_t)RTE_PORT_ALL)
2365 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
2366 printf("Port %u total aged flows: %d\n", port_id, total);
2368 port_flow_complain(&error);
2373 contexts = malloc(sizeof(void *) * total);
2374 if (contexts == NULL) {
2375 fprintf(stderr, "Cannot allocate contexts for aged flow\n");
2378 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
2379 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
2380 if (nb_context != total) {
2382 "Port:%d get aged flows count(%d) != total(%d)\n",
2383 port_id, nb_context, total);
2388 for (idx = 0; idx < nb_context; idx++) {
2389 if (!contexts[idx]) {
2390 fprintf(stderr, "Error: get Null context in port %u\n",
2394 type = (enum age_action_context_type *)contexts[idx];
2396 case ACTION_AGE_CONTEXT_TYPE_FLOW:
2397 ctx.pf = container_of(type, struct port_flow, age_type);
2398 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32
2402 ctx.pf->rule.attr->group,
2403 ctx.pf->rule.attr->priority,
2404 ctx.pf->rule.attr->ingress ? 'i' : '-',
2405 ctx.pf->rule.attr->egress ? 'e' : '-',
2406 ctx.pf->rule.attr->transfer ? 't' : '-');
2407 if (destroy && !port_flow_destroy(port_id, 1,
2411 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION:
2412 ctx.pia = container_of(type,
2413 struct port_indirect_action, age_type);
2414 printf("%-20s\t%" PRIu32 "\n", "Indirect action",
2418 fprintf(stderr, "Error: invalid context type %u\n",
2423 printf("\n%d flows destroyed\n", total);
2427 /** List flow rules. */
2429 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
2431 struct rte_port *port;
2432 struct port_flow *pf;
2433 struct port_flow *list = NULL;
2436 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2437 port_id == (portid_t)RTE_PORT_ALL)
2439 port = &ports[port_id];
2440 if (!port->flow_list)
2442 /* Sort flows by group, priority and ID. */
2443 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2444 struct port_flow **tmp;
2445 const struct rte_flow_attr *curr = pf->rule.attr;
2448 /* Filter out unwanted groups. */
2449 for (i = 0; i != n; ++i)
2450 if (curr->group == group[i])
2455 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
2456 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
2458 if (curr->group > comp->group ||
2459 (curr->group == comp->group &&
2460 curr->priority > comp->priority) ||
2461 (curr->group == comp->group &&
2462 curr->priority == comp->priority &&
2463 pf->id > (*tmp)->id))
2470 printf("ID\tGroup\tPrio\tAttr\tRule\n");
2471 for (pf = list; pf != NULL; pf = pf->tmp) {
2472 const struct rte_flow_item *item = pf->rule.pattern;
2473 const struct rte_flow_action *action = pf->rule.actions;
2476 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
2478 pf->rule.attr->group,
2479 pf->rule.attr->priority,
2480 pf->rule.attr->ingress ? 'i' : '-',
2481 pf->rule.attr->egress ? 'e' : '-',
2482 pf->rule.attr->transfer ? 't' : '-');
2483 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
2484 if ((uint32_t)item->type > INT_MAX)
2485 name = "PMD_INTERNAL";
2486 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
2487 &name, sizeof(name),
2488 (void *)(uintptr_t)item->type,
2491 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
2492 printf("%s ", name);
2496 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
2497 if ((uint32_t)action->type > INT_MAX)
2498 name = "PMD_INTERNAL";
2499 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2500 &name, sizeof(name),
2501 (void *)(uintptr_t)action->type,
2504 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
2505 printf(" %s", name);
2512 /** Restrict ingress traffic to the defined flow rules. */
2514 port_flow_isolate(portid_t port_id, int set)
2516 struct rte_flow_error error;
2518 /* Poisoning to make sure PMDs update it in case of error. */
2519 memset(&error, 0x66, sizeof(error));
2520 if (rte_flow_isolate(port_id, set, &error))
2521 return port_flow_complain(&error);
2522 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
2524 set ? "now restricted" : "not restricted anymore");
2529 * RX/TX ring descriptors display functions.
2532 rx_queue_id_is_invalid(queueid_t rxq_id)
2534 if (rxq_id < nb_rxq)
2536 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n",
2542 tx_queue_id_is_invalid(queueid_t txq_id)
2544 if (txq_id < nb_txq)
2546 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n",
2552 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
2554 struct rte_port *port = &ports[port_id];
2555 struct rte_eth_rxq_info rx_qinfo;
2558 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
2560 *ring_size = rx_qinfo.nb_desc;
2564 if (ret != -ENOTSUP)
2567 * If the rte_eth_rx_queue_info_get is not support for this PMD,
2568 * ring_size stored in testpmd will be used for validity verification.
2569 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
2570 * being 0, it will use a default value provided by PMDs to setup this
2571 * rxq. If the default value is 0, it will use the
2572 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
2574 if (port->nb_rx_desc[rxq_id])
2575 *ring_size = port->nb_rx_desc[rxq_id];
2576 else if (port->dev_info.default_rxportconf.ring_size)
2577 *ring_size = port->dev_info.default_rxportconf.ring_size;
2579 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2584 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
2586 struct rte_port *port = &ports[port_id];
2587 struct rte_eth_txq_info tx_qinfo;
2590 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
2592 *ring_size = tx_qinfo.nb_desc;
2596 if (ret != -ENOTSUP)
2599 * If the rte_eth_tx_queue_info_get is not support for this PMD,
2600 * ring_size stored in testpmd will be used for validity verification.
2601 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
2602 * being 0, it will use a default value provided by PMDs to setup this
2603 * txq. If the default value is 0, it will use the
2604 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
2606 if (port->nb_tx_desc[txq_id])
2607 *ring_size = port->nb_tx_desc[txq_id];
2608 else if (port->dev_info.default_txportconf.ring_size)
2609 *ring_size = port->dev_info.default_txportconf.ring_size;
2611 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2616 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
2621 ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
2625 if (rxdesc_id < ring_size)
2628 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n",
2629 rxdesc_id, ring_size);
2634 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
2639 ret = get_tx_ring_size(port_id, txq_id, &ring_size);
2643 if (txdesc_id < ring_size)
2646 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n",
2647 txdesc_id, ring_size);
2651 static const struct rte_memzone *
2652 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
2654 char mz_name[RTE_MEMZONE_NAMESIZE];
2655 const struct rte_memzone *mz;
2657 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
2658 port_id, q_id, ring_name);
2659 mz = rte_memzone_lookup(mz_name);
2662 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n",
2663 ring_name, port_id, q_id, mz_name);
2667 union igb_ring_dword {
2670 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2680 struct igb_ring_desc_32_bytes {
2681 union igb_ring_dword lo_dword;
2682 union igb_ring_dword hi_dword;
2683 union igb_ring_dword resv1;
2684 union igb_ring_dword resv2;
2687 struct igb_ring_desc_16_bytes {
2688 union igb_ring_dword lo_dword;
2689 union igb_ring_dword hi_dword;
2693 ring_rxd_display_dword(union igb_ring_dword dword)
2695 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
2696 (unsigned)dword.words.hi);
2700 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
2701 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2704 __rte_unused portid_t port_id,
2708 struct igb_ring_desc_16_bytes *ring =
2709 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2710 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2712 struct rte_eth_dev_info dev_info;
2714 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2718 if (strstr(dev_info.driver_name, "i40e") != NULL) {
2719 /* 32 bytes RX descriptor, i40e only */
2720 struct igb_ring_desc_32_bytes *ring =
2721 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
2722 ring[desc_id].lo_dword.dword =
2723 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2724 ring_rxd_display_dword(ring[desc_id].lo_dword);
2725 ring[desc_id].hi_dword.dword =
2726 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2727 ring_rxd_display_dword(ring[desc_id].hi_dword);
2728 ring[desc_id].resv1.dword =
2729 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
2730 ring_rxd_display_dword(ring[desc_id].resv1);
2731 ring[desc_id].resv2.dword =
2732 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
2733 ring_rxd_display_dword(ring[desc_id].resv2);
2738 /* 16 bytes RX descriptor */
2739 ring[desc_id].lo_dword.dword =
2740 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2741 ring_rxd_display_dword(ring[desc_id].lo_dword);
2742 ring[desc_id].hi_dword.dword =
2743 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2744 ring_rxd_display_dword(ring[desc_id].hi_dword);
2748 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
2750 struct igb_ring_desc_16_bytes *ring;
2751 struct igb_ring_desc_16_bytes txd;
2753 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2754 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2755 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2756 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
2757 (unsigned)txd.lo_dword.words.lo,
2758 (unsigned)txd.lo_dword.words.hi,
2759 (unsigned)txd.hi_dword.words.lo,
2760 (unsigned)txd.hi_dword.words.hi);
2764 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
2766 const struct rte_memzone *rx_mz;
2768 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
2770 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
2773 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
2777 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
2779 const struct rte_memzone *tx_mz;
2781 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
2783 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
2786 ring_tx_descriptor_display(tx_mz, txd_id);
2790 fwd_lcores_config_display(void)
2794 printf("List of forwarding lcores:");
2795 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
2796 printf(" %2u", fwd_lcores_cpuids[lc_id]);
2800 rxtx_config_display(void)
2805 printf(" %s packet forwarding%s packets/burst=%d\n",
2806 cur_fwd_eng->fwd_mode_name,
2807 retry_enabled == 0 ? "" : " with retry",
2810 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
2811 printf(" packet len=%u - nb packet segments=%d\n",
2812 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
2814 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
2815 nb_fwd_lcores, nb_fwd_ports);
2817 RTE_ETH_FOREACH_DEV(pid) {
2818 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
2819 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
2820 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
2821 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
2822 struct rte_eth_rxq_info rx_qinfo;
2823 struct rte_eth_txq_info tx_qinfo;
2824 uint16_t rx_free_thresh_tmp;
2825 uint16_t tx_free_thresh_tmp;
2826 uint16_t tx_rs_thresh_tmp;
2827 uint16_t nb_rx_desc_tmp;
2828 uint16_t nb_tx_desc_tmp;
2829 uint64_t offloads_tmp;
2830 uint8_t pthresh_tmp;
2831 uint8_t hthresh_tmp;
2832 uint8_t wthresh_tmp;
2835 /* per port config */
2836 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
2837 (unsigned int)pid, nb_rxq, nb_txq);
2839 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
2840 ports[pid].dev_conf.rxmode.offloads,
2841 ports[pid].dev_conf.txmode.offloads);
2843 /* per rx queue config only for first queue to be less verbose */
2844 for (qid = 0; qid < 1; qid++) {
2845 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
2847 nb_rx_desc_tmp = nb_rx_desc[qid];
2848 rx_free_thresh_tmp =
2849 rx_conf[qid].rx_free_thresh;
2850 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
2851 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
2852 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
2853 offloads_tmp = rx_conf[qid].offloads;
2855 nb_rx_desc_tmp = rx_qinfo.nb_desc;
2856 rx_free_thresh_tmp =
2857 rx_qinfo.conf.rx_free_thresh;
2858 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
2859 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
2860 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
2861 offloads_tmp = rx_qinfo.conf.offloads;
2864 printf(" RX queue: %d\n", qid);
2865 printf(" RX desc=%d - RX free threshold=%d\n",
2866 nb_rx_desc_tmp, rx_free_thresh_tmp);
2867 printf(" RX threshold registers: pthresh=%d hthresh=%d "
2869 pthresh_tmp, hthresh_tmp, wthresh_tmp);
2870 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp);
2873 /* per tx queue config only for first queue to be less verbose */
2874 for (qid = 0; qid < 1; qid++) {
2875 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
2877 nb_tx_desc_tmp = nb_tx_desc[qid];
2878 tx_free_thresh_tmp =
2879 tx_conf[qid].tx_free_thresh;
2880 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
2881 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
2882 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
2883 offloads_tmp = tx_conf[qid].offloads;
2884 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
2886 nb_tx_desc_tmp = tx_qinfo.nb_desc;
2887 tx_free_thresh_tmp =
2888 tx_qinfo.conf.tx_free_thresh;
2889 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
2890 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
2891 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
2892 offloads_tmp = tx_qinfo.conf.offloads;
2893 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
2896 printf(" TX queue: %d\n", qid);
2897 printf(" TX desc=%d - TX free threshold=%d\n",
2898 nb_tx_desc_tmp, tx_free_thresh_tmp);
2899 printf(" TX threshold registers: pthresh=%d hthresh=%d "
2901 pthresh_tmp, hthresh_tmp, wthresh_tmp);
2902 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
2903 offloads_tmp, tx_rs_thresh_tmp);
2909 port_rss_reta_info(portid_t port_id,
2910 struct rte_eth_rss_reta_entry64 *reta_conf,
2911 uint16_t nb_entries)
2913 uint16_t i, idx, shift;
2916 if (port_id_is_invalid(port_id, ENABLED_WARN))
2919 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
2922 "Failed to get RSS RETA info, return code = %d\n",
2927 for (i = 0; i < nb_entries; i++) {
2928 idx = i / RTE_RETA_GROUP_SIZE;
2929 shift = i % RTE_RETA_GROUP_SIZE;
2930 if (!(reta_conf[idx].mask & (1ULL << shift)))
2932 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
2933 i, reta_conf[idx].reta[shift]);
2938 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
2942 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
2944 struct rte_eth_rss_conf rss_conf = {0};
2945 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
2949 struct rte_eth_dev_info dev_info;
2950 uint8_t hash_key_size;
2953 if (port_id_is_invalid(port_id, ENABLED_WARN))
2956 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2960 if (dev_info.hash_key_size > 0 &&
2961 dev_info.hash_key_size <= sizeof(rss_key))
2962 hash_key_size = dev_info.hash_key_size;
2965 "dev_info did not provide a valid hash key size\n");
2969 /* Get RSS hash key if asked to display it */
2970 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
2971 rss_conf.rss_key_len = hash_key_size;
2972 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2976 fprintf(stderr, "port index %d invalid\n", port_id);
2979 fprintf(stderr, "operation not supported by device\n");
2982 fprintf(stderr, "operation failed - diag=%d\n", diag);
2987 rss_hf = rss_conf.rss_hf;
2989 printf("RSS disabled\n");
2992 printf("RSS functions:\n ");
2993 for (i = 0; rss_type_table[i].str; i++) {
2994 if (rss_hf & rss_type_table[i].rss_type)
2995 printf("%s ", rss_type_table[i].str);
3000 printf("RSS key:\n");
3001 for (i = 0; i < hash_key_size; i++)
3002 printf("%02X", rss_key[i]);
3007 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
3008 uint8_t hash_key_len)
3010 struct rte_eth_rss_conf rss_conf;
3014 rss_conf.rss_key = NULL;
3015 rss_conf.rss_key_len = hash_key_len;
3016 rss_conf.rss_hf = 0;
3017 for (i = 0; rss_type_table[i].str; i++) {
3018 if (!strcmp(rss_type_table[i].str, rss_type))
3019 rss_conf.rss_hf = rss_type_table[i].rss_type;
3021 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
3023 rss_conf.rss_key = hash_key;
3024 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
3031 fprintf(stderr, "port index %d invalid\n", port_id);
3034 fprintf(stderr, "operation not supported by device\n");
3037 fprintf(stderr, "operation failed - diag=%d\n", diag);
3043 * Setup forwarding configuration for each logical core.
3046 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
3048 streamid_t nb_fs_per_lcore;
3056 nb_fs = cfg->nb_fwd_streams;
3057 nb_fc = cfg->nb_fwd_lcores;
3058 if (nb_fs <= nb_fc) {
3059 nb_fs_per_lcore = 1;
3062 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
3063 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
3066 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
3068 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
3069 fwd_lcores[lc_id]->stream_idx = sm_id;
3070 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
3071 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
3075 * Assign extra remaining streams, if any.
3077 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
3078 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
3079 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
3080 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
3081 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
3086 fwd_topology_tx_port_get(portid_t rxp)
3088 static int warning_once = 1;
3090 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
3092 switch (port_topology) {
3094 case PORT_TOPOLOGY_PAIRED:
3095 if ((rxp & 0x1) == 0) {
3096 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
3100 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n");
3106 case PORT_TOPOLOGY_CHAINED:
3107 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
3108 case PORT_TOPOLOGY_LOOP:
3114 simple_fwd_config_setup(void)
3118 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
3119 cur_fwd_config.nb_fwd_streams =
3120 (streamid_t) cur_fwd_config.nb_fwd_ports;
3122 /* reinitialize forwarding streams */
3126 * In the simple forwarding test, the number of forwarding cores
3127 * must be lower or equal to the number of forwarding ports.
3129 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3130 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
3131 cur_fwd_config.nb_fwd_lcores =
3132 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
3133 setup_fwd_config_of_each_lcore(&cur_fwd_config);
3135 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
3136 fwd_streams[i]->rx_port = fwd_ports_ids[i];
3137 fwd_streams[i]->rx_queue = 0;
3138 fwd_streams[i]->tx_port =
3139 fwd_ports_ids[fwd_topology_tx_port_get(i)];
3140 fwd_streams[i]->tx_queue = 0;
3141 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
3142 fwd_streams[i]->retry_enabled = retry_enabled;
3147 * For the RSS forwarding test all streams distributed over lcores. Each stream
3148 * being composed of a RX queue to poll on a RX port for input messages,
3149 * associated with a TX queue of a TX port where to send forwarded packets.
3152 rss_fwd_config_setup(void)
3165 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3166 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3167 cur_fwd_config.nb_fwd_streams =
3168 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
3170 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
3171 cur_fwd_config.nb_fwd_lcores =
3172 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
3174 /* reinitialize forwarding streams */
3177 setup_fwd_config_of_each_lcore(&cur_fwd_config);
3179 if (proc_id > 0 && nb_q % num_procs != 0)
3180 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n");
3183 * In multi-process, All queues are allocated to different
3184 * processes based on num_procs and proc_id. For example:
3185 * if supports 4 queues(nb_q), 2 processes(num_procs),
3186 * the 0~1 queue for primary process.
3187 * the 2~3 queue for secondary process.
3189 start = proc_id * nb_q / num_procs;
3190 end = start + nb_q / num_procs;
3193 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
3194 struct fwd_stream *fs;
3196 fs = fwd_streams[sm_id];
3197 txp = fwd_topology_tx_port_get(rxp);
3198 fs->rx_port = fwd_ports_ids[rxp];
3200 fs->tx_port = fwd_ports_ids[txp];
3202 fs->peer_addr = fs->tx_port;
3203 fs->retry_enabled = retry_enabled;
3205 if (rxp < nb_fwd_ports)
3215 get_fwd_port_total_tc_num(void)
3217 struct rte_eth_dcb_info dcb_info;
3218 uint16_t total_tc_num = 0;
3221 for (i = 0; i < nb_fwd_ports; i++) {
3222 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info);
3223 total_tc_num += dcb_info.nb_tcs;
3226 return total_tc_num;
3230 * For the DCB forwarding test, each core is assigned on each traffic class.
3232 * Each core is assigned a multi-stream, each stream being composed of
3233 * a RX queue to poll on a RX port for input messages, associated with
3234 * a TX queue of a TX port where to send forwarded packets. All RX and
3235 * TX queues are mapping to the same traffic class.
3236 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
3240 dcb_fwd_config_setup(void)
3242 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
3243 portid_t txp, rxp = 0;
3244 queueid_t txq, rxq = 0;
3246 uint16_t nb_rx_queue, nb_tx_queue;
3247 uint16_t i, j, k, sm_id = 0;
3248 uint16_t total_tc_num;
3249 struct rte_port *port;
3255 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED
3256 * or RTE_PORT_STOPPED.
3258 * Re-configure ports to get updated mapping between tc and queue in
3259 * case the queue number of the port is changed. Skip for started ports
3260 * since modifying queue number and calling dev_configure need to stop
3263 for (pid = 0; pid < nb_fwd_ports; pid++) {
3264 if (port_is_started(pid) == 1)
3268 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq,
3272 "Failed to re-configure port %d, ret = %d.\n",
3278 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3279 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3280 cur_fwd_config.nb_fwd_streams =
3281 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
3282 total_tc_num = get_fwd_port_total_tc_num();
3283 if (cur_fwd_config.nb_fwd_lcores > total_tc_num)
3284 cur_fwd_config.nb_fwd_lcores = total_tc_num;
3286 /* reinitialize forwarding streams */
3290 /* get the dcb info on the first RX and TX ports */
3291 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
3292 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
3294 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
3295 fwd_lcores[lc_id]->stream_nb = 0;
3296 fwd_lcores[lc_id]->stream_idx = sm_id;
3297 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
3298 /* if the nb_queue is zero, means this tc is
3299 * not enabled on the POOL
3301 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
3303 k = fwd_lcores[lc_id]->stream_nb +
3304 fwd_lcores[lc_id]->stream_idx;
3305 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
3306 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
3307 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
3308 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
3309 for (j = 0; j < nb_rx_queue; j++) {
3310 struct fwd_stream *fs;
3312 fs = fwd_streams[k + j];
3313 fs->rx_port = fwd_ports_ids[rxp];
3314 fs->rx_queue = rxq + j;
3315 fs->tx_port = fwd_ports_ids[txp];
3316 fs->tx_queue = txq + j % nb_tx_queue;
3317 fs->peer_addr = fs->tx_port;
3318 fs->retry_enabled = retry_enabled;
3320 fwd_lcores[lc_id]->stream_nb +=
3321 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
3323 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
3326 if (tc < rxp_dcb_info.nb_tcs)
3328 /* Restart from TC 0 on next RX port */
3330 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
3332 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
3335 if (rxp >= nb_fwd_ports)
3337 /* get the dcb information on next RX and TX ports */
3338 if ((rxp & 0x1) == 0)
3339 txp = (portid_t) (rxp + 1);
3341 txp = (portid_t) (rxp - 1);
3342 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
3343 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
3348 icmp_echo_config_setup(void)
3355 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
3356 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
3357 (nb_txq * nb_fwd_ports);
3359 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3360 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3361 cur_fwd_config.nb_fwd_streams =
3362 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
3363 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
3364 cur_fwd_config.nb_fwd_lcores =
3365 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
3366 if (verbose_level > 0) {
3367 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
3369 cur_fwd_config.nb_fwd_lcores,
3370 cur_fwd_config.nb_fwd_ports,
3371 cur_fwd_config.nb_fwd_streams);
3374 /* reinitialize forwarding streams */
3376 setup_fwd_config_of_each_lcore(&cur_fwd_config);
3378 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
3379 if (verbose_level > 0)
3380 printf(" core=%d: \n", lc_id);
3381 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3382 struct fwd_stream *fs;
3383 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3384 fs->rx_port = fwd_ports_ids[rxp];
3386 fs->tx_port = fs->rx_port;
3388 fs->peer_addr = fs->tx_port;
3389 fs->retry_enabled = retry_enabled;
3390 if (verbose_level > 0)
3391 printf(" stream=%d port=%d rxq=%d txq=%d\n",
3392 sm_id, fs->rx_port, fs->rx_queue,
3394 rxq = (queueid_t) (rxq + 1);
3395 if (rxq == nb_rxq) {
3397 rxp = (portid_t) (rxp + 1);
3404 fwd_config_setup(void)
3406 struct rte_port *port;
3410 cur_fwd_config.fwd_eng = cur_fwd_eng;
3411 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
3412 icmp_echo_config_setup();
3416 if ((nb_rxq > 1) && (nb_txq > 1)){
3418 for (i = 0; i < nb_fwd_ports; i++) {
3419 pt_id = fwd_ports_ids[i];
3420 port = &ports[pt_id];
3421 if (!port->dcb_flag) {
3423 "In DCB mode, all forwarding ports must be configured in this mode.\n");
3427 if (nb_fwd_lcores == 1) {
3429 "In DCB mode,the nb forwarding cores should be larger than 1.\n");
3433 dcb_fwd_config_setup();
3435 rss_fwd_config_setup();
3438 simple_fwd_config_setup();
3442 mp_alloc_to_str(uint8_t mode)
3445 case MP_ALLOC_NATIVE:
3451 case MP_ALLOC_XMEM_HUGE:
3461 pkt_fwd_config_display(struct fwd_config *cfg)
3463 struct fwd_stream *fs;
3467 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
3468 "NUMA support %s, MP allocation mode: %s\n",
3469 cfg->fwd_eng->fwd_mode_name,
3470 retry_enabled == 0 ? "" : " with retry",
3471 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
3472 numa_support == 1 ? "enabled" : "disabled",
3473 mp_alloc_to_str(mp_alloc_type));
3476 printf("TX retry num: %u, delay between TX retries: %uus\n",
3477 burst_tx_retry_num, burst_tx_delay_time);
3478 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
3479 printf("Logical Core %u (socket %u) forwards packets on "
3481 fwd_lcores_cpuids[lc_id],
3482 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
3483 fwd_lcores[lc_id]->stream_nb);
3484 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3485 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3486 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
3487 "P=%d/Q=%d (socket %u) ",
3488 fs->rx_port, fs->rx_queue,
3489 ports[fs->rx_port].socket_id,
3490 fs->tx_port, fs->tx_queue,
3491 ports[fs->tx_port].socket_id);
3492 print_ethaddr("peer=",
3493 &peer_eth_addrs[fs->peer_addr]);
3501 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
3503 struct rte_ether_addr new_peer_addr;
3504 if (!rte_eth_dev_is_valid_port(port_id)) {
3505 fprintf(stderr, "Error: Invalid port number %i\n", port_id);
3508 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
3509 fprintf(stderr, "Error: Invalid ethernet address: %s\n",
3513 peer_eth_addrs[port_id] = new_peer_addr;
3517 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
3520 unsigned int lcore_cpuid;
3525 for (i = 0; i < nb_lc; i++) {
3526 lcore_cpuid = lcorelist[i];
3527 if (! rte_lcore_is_enabled(lcore_cpuid)) {
3528 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid);
3531 if (lcore_cpuid == rte_get_main_lcore()) {
3533 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n",
3538 fwd_lcores_cpuids[i] = lcore_cpuid;
3540 if (record_now == 0) {
3544 nb_cfg_lcores = (lcoreid_t) nb_lc;
3545 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
3546 printf("previous number of forwarding cores %u - changed to "
3547 "number of configured cores %u\n",
3548 (unsigned int) nb_fwd_lcores, nb_lc);
3549 nb_fwd_lcores = (lcoreid_t) nb_lc;
3556 set_fwd_lcores_mask(uint64_t lcoremask)
3558 unsigned int lcorelist[64];
3562 if (lcoremask == 0) {
3563 fprintf(stderr, "Invalid NULL mask of cores\n");
3567 for (i = 0; i < 64; i++) {
3568 if (! ((uint64_t)(1ULL << i) & lcoremask))
3570 lcorelist[nb_lc++] = i;
3572 return set_fwd_lcores_list(lcorelist, nb_lc);
3576 set_fwd_lcores_number(uint16_t nb_lc)
3578 if (test_done == 0) {
3579 fprintf(stderr, "Please stop forwarding first\n");
3582 if (nb_lc > nb_cfg_lcores) {
3584 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n",
3585 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
3588 nb_fwd_lcores = (lcoreid_t) nb_lc;
3589 printf("Number of forwarding cores set to %u\n",
3590 (unsigned int) nb_fwd_lcores);
3594 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
3602 for (i = 0; i < nb_pt; i++) {
3603 port_id = (portid_t) portlist[i];
3604 if (port_id_is_invalid(port_id, ENABLED_WARN))
3607 fwd_ports_ids[i] = port_id;
3609 if (record_now == 0) {
3613 nb_cfg_ports = (portid_t) nb_pt;
3614 if (nb_fwd_ports != (portid_t) nb_pt) {
3615 printf("previous number of forwarding ports %u - changed to "
3616 "number of configured ports %u\n",
3617 (unsigned int) nb_fwd_ports, nb_pt);
3618 nb_fwd_ports = (portid_t) nb_pt;
3623 * Parse the user input and obtain the list of forwarding ports
3626 * String containing the user input. User can specify
3627 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
3628 * For example, if the user wants to use all the available
3629 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3.
3630 * If the user wants to use only the ports 1,2 then the input
3632 * valid characters are '-' and ','
3633 * @param[out] values
3634 * This array will be filled with a list of port IDs
3635 * based on the user input
3636 * Note that duplicate entries are discarded and only the first
3637 * count entries in this array are port IDs and all the rest
3638 * will contain default values
3639 * @param[in] maxsize
3640 * This parameter denotes 2 things
3641 * 1) Number of elements in the values array
3642 * 2) Maximum value of each element in the values array
3644 * On success, returns total count of parsed port IDs
3645 * On failure, returns 0
3648 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
3650 unsigned int count = 0;
3654 unsigned int marked[maxsize];
3656 if (list == NULL || values == NULL)
3659 for (i = 0; i < (int)maxsize; i++)
3665 /*Remove the blank spaces if any*/
3666 while (isblank(*list))
3671 value = strtol(list, &end, 10);
3672 if (errno || end == NULL)
3674 if (value < 0 || value >= (int)maxsize)
3676 while (isblank(*end))
3678 if (*end == '-' && min == INT_MAX) {
3680 } else if ((*end == ',') || (*end == '\0')) {
3684 for (i = min; i <= max; i++) {
3685 if (count < maxsize) {
3697 } while (*end != '\0');
3703 parse_fwd_portlist(const char *portlist)
3705 unsigned int portcount;
3706 unsigned int portindex[RTE_MAX_ETHPORTS];
3707 unsigned int i, valid_port_count = 0;
3709 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
3711 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
3714 * Here we verify the validity of the ports
3715 * and thereby calculate the total number of
3718 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
3719 if (rte_eth_dev_is_valid_port(portindex[i])) {
3720 portindex[valid_port_count] = portindex[i];
3725 set_fwd_ports_list(portindex, valid_port_count);
3729 set_fwd_ports_mask(uint64_t portmask)
3731 unsigned int portlist[64];
3735 if (portmask == 0) {
3736 fprintf(stderr, "Invalid NULL mask of ports\n");
3740 RTE_ETH_FOREACH_DEV(i) {
3741 if (! ((uint64_t)(1ULL << i) & portmask))
3743 portlist[nb_pt++] = i;
3745 set_fwd_ports_list(portlist, nb_pt);
3749 set_fwd_ports_number(uint16_t nb_pt)
3751 if (nb_pt > nb_cfg_ports) {
3753 "nb fwd ports %u > %u (number of configured ports) - ignored\n",
3754 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
3757 nb_fwd_ports = (portid_t) nb_pt;
3758 printf("Number of forwarding ports set to %u\n",
3759 (unsigned int) nb_fwd_ports);
3763 port_is_forwarding(portid_t port_id)
3767 if (port_id_is_invalid(port_id, ENABLED_WARN))
3770 for (i = 0; i < nb_fwd_ports; i++) {
3771 if (fwd_ports_ids[i] == port_id)
3779 set_nb_pkt_per_burst(uint16_t nb)
3781 if (nb > MAX_PKT_BURST) {
3783 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n",
3784 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
3787 nb_pkt_per_burst = nb;
3788 printf("Number of packets per burst set to %u\n",
3789 (unsigned int) nb_pkt_per_burst);
3793 tx_split_get_name(enum tx_pkt_split split)
3797 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3798 if (tx_split_name[i].split == split)
3799 return tx_split_name[i].name;
3805 set_tx_pkt_split(const char *name)
3809 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3810 if (strcmp(tx_split_name[i].name, name) == 0) {
3811 tx_pkt_split = tx_split_name[i].split;
3815 fprintf(stderr, "unknown value: \"%s\"\n", name);
3819 parse_fec_mode(const char *name, uint32_t *fec_capa)
3823 for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
3824 if (strcmp(fec_mode_name[i].name, name) == 0) {
3826 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
3834 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa)
3838 printf("FEC capabilities:\n");
3840 for (i = 0; i < num; i++) {
3842 rte_eth_link_speed_to_str(speed_fec_capa[i].speed));
3844 for (j = 0; j < RTE_DIM(fec_mode_name); j++) {
3845 if (RTE_ETH_FEC_MODE_TO_CAPA(j) &
3846 speed_fec_capa[i].capa)
3847 printf("%s ", fec_mode_name[j].name);
3854 show_rx_pkt_offsets(void)
3859 printf("Number of offsets: %u\n", n);
3861 printf("Segment offsets: ");
3862 for (i = 0; i != n - 1; i++)
3863 printf("%hu,", rx_pkt_seg_offsets[i]);
3864 printf("%hu\n", rx_pkt_seg_lengths[i]);
3869 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs)
3873 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) {
3874 printf("nb segments per RX packets=%u >= "
3875 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs);
3880 * No extra check here, the segment length will be checked by PMD
3881 * in the extended queue setup.
3883 for (i = 0; i < nb_offs; i++) {
3884 if (seg_offsets[i] >= UINT16_MAX) {
3885 printf("offset[%u]=%u > UINT16_MAX - give up\n",
3891 for (i = 0; i < nb_offs; i++)
3892 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i];
3894 rx_pkt_nb_offs = (uint8_t) nb_offs;
3898 show_rx_pkt_segments(void)
3903 printf("Number of segments: %u\n", n);
3905 printf("Segment sizes: ");
3906 for (i = 0; i != n - 1; i++)
3907 printf("%hu,", rx_pkt_seg_lengths[i]);
3908 printf("%hu\n", rx_pkt_seg_lengths[i]);
3913 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
3917 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) {
3918 printf("nb segments per RX packets=%u >= "
3919 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs);
3924 * No extra check here, the segment length will be checked by PMD
3925 * in the extended queue setup.
3927 for (i = 0; i < nb_segs; i++) {
3928 if (seg_lengths[i] >= UINT16_MAX) {
3929 printf("length[%u]=%u > UINT16_MAX - give up\n",
3935 for (i = 0; i < nb_segs; i++)
3936 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
3938 rx_pkt_nb_segs = (uint8_t) nb_segs;
3942 show_tx_pkt_segments(void)
3948 split = tx_split_get_name(tx_pkt_split);
3950 printf("Number of segments: %u\n", n);
3951 printf("Segment sizes: ");
3952 for (i = 0; i != n - 1; i++)
3953 printf("%hu,", tx_pkt_seg_lengths[i]);
3954 printf("%hu\n", tx_pkt_seg_lengths[i]);
3955 printf("Split packet: %s\n", split);
3959 nb_segs_is_invalid(unsigned int nb_segs)
3966 RTE_ETH_FOREACH_DEV(port_id) {
3967 for (queue_id = 0; queue_id < nb_txq; queue_id++) {
3968 ret = get_tx_ring_size(port_id, queue_id, &ring_size);
3970 /* Port may not be initialized yet, can't say
3971 * the port is invalid in this stage.
3975 if (ring_size < nb_segs) {
3976 printf("nb segments per TX packets=%u >= TX "
3977 "queue(%u) ring_size=%u - txpkts ignored\n",
3978 nb_segs, queue_id, ring_size);
3988 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
3990 uint16_t tx_pkt_len;
3994 * For single segment settings failed check is ignored.
3995 * It is a very basic capability to send the single segment
3996 * packets, suppose it is always supported.
3998 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) {
4000 "Tx segment size(%u) is not supported - txpkts ignored\n",
4005 if (nb_segs > RTE_MAX_SEGS_PER_PKT) {
4007 "Tx segment size(%u) is bigger than max number of segment(%u)\n",
4008 nb_segs, RTE_MAX_SEGS_PER_PKT);
4013 * Check that each segment length is greater or equal than
4014 * the mbuf data size.
4015 * Check also that the total packet length is greater or equal than the
4016 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
4020 for (i = 0; i < nb_segs; i++) {
4021 if (seg_lengths[i] > mbuf_data_size[0]) {
4023 "length[%u]=%u > mbuf_data_size=%u - give up\n",
4024 i, seg_lengths[i], mbuf_data_size[0]);
4027 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
4029 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
4030 fprintf(stderr, "total packet length=%u < %d - give up\n",
4031 (unsigned) tx_pkt_len,
4032 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
4036 for (i = 0; i < nb_segs; i++)
4037 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
4039 tx_pkt_length = tx_pkt_len;
4040 tx_pkt_nb_segs = (uint8_t) nb_segs;
4044 show_tx_pkt_times(void)
4046 printf("Interburst gap: %u\n", tx_pkt_times_inter);
4047 printf("Intraburst gap: %u\n", tx_pkt_times_intra);
4051 set_tx_pkt_times(unsigned int *tx_times)
4053 tx_pkt_times_inter = tx_times[0];
4054 tx_pkt_times_intra = tx_times[1];
4058 setup_gro(const char *onoff, portid_t port_id)
4060 if (!rte_eth_dev_is_valid_port(port_id)) {
4061 fprintf(stderr, "invalid port id %u\n", port_id);
4064 if (test_done == 0) {
4066 "Before enable/disable GRO, please stop forwarding first\n");
4069 if (strcmp(onoff, "on") == 0) {
4070 if (gro_ports[port_id].enable != 0) {
4072 "Port %u has enabled GRO. Please disable GRO first\n",
4076 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
4077 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
4078 gro_ports[port_id].param.max_flow_num =
4079 GRO_DEFAULT_FLOW_NUM;
4080 gro_ports[port_id].param.max_item_per_flow =
4081 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
4083 gro_ports[port_id].enable = 1;
4085 if (gro_ports[port_id].enable == 0) {
4086 fprintf(stderr, "Port %u has disabled GRO\n", port_id);
4089 gro_ports[port_id].enable = 0;
4094 setup_gro_flush_cycles(uint8_t cycles)
4096 if (test_done == 0) {
4098 "Before change flush interval for GRO, please stop forwarding first.\n");
4102 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
4103 GRO_DEFAULT_FLUSH_CYCLES) {
4105 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n",
4106 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES);
4107 cycles = GRO_DEFAULT_FLUSH_CYCLES;
4110 gro_flush_cycles = cycles;
4114 show_gro(portid_t port_id)
4116 struct rte_gro_param *param;
4117 uint32_t max_pkts_num;
4119 param = &gro_ports[port_id].param;
4121 if (!rte_eth_dev_is_valid_port(port_id)) {
4122 fprintf(stderr, "Invalid port id %u.\n", port_id);
4125 if (gro_ports[port_id].enable) {
4126 printf("GRO type: TCP/IPv4\n");
4127 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
4128 max_pkts_num = param->max_flow_num *
4129 param->max_item_per_flow;
4131 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
4132 printf("Max number of packets to perform GRO: %u\n",
4134 printf("Flushing cycles: %u\n", gro_flush_cycles);
4136 printf("Port %u doesn't enable GRO.\n", port_id);
4140 setup_gso(const char *mode, portid_t port_id)
4142 if (!rte_eth_dev_is_valid_port(port_id)) {
4143 fprintf(stderr, "invalid port id %u\n", port_id);
4146 if (strcmp(mode, "on") == 0) {
4147 if (test_done == 0) {
4149 "before enabling GSO, please stop forwarding first\n");
4152 gso_ports[port_id].enable = 1;
4153 } else if (strcmp(mode, "off") == 0) {
4154 if (test_done == 0) {
4156 "before disabling GSO, please stop forwarding first\n");
4159 gso_ports[port_id].enable = 0;
4164 list_pkt_forwarding_modes(void)
4166 static char fwd_modes[128] = "";
4167 const char *separator = "|";
4168 struct fwd_engine *fwd_eng;
4171 if (strlen (fwd_modes) == 0) {
4172 while ((fwd_eng = fwd_engines[i++]) != NULL) {
4173 strncat(fwd_modes, fwd_eng->fwd_mode_name,
4174 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
4175 strncat(fwd_modes, separator,
4176 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
4178 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
4185 list_pkt_forwarding_retry_modes(void)
4187 static char fwd_modes[128] = "";
4188 const char *separator = "|";
4189 struct fwd_engine *fwd_eng;
4192 if (strlen(fwd_modes) == 0) {
4193 while ((fwd_eng = fwd_engines[i++]) != NULL) {
4194 if (fwd_eng == &rx_only_engine)
4196 strncat(fwd_modes, fwd_eng->fwd_mode_name,
4198 strlen(fwd_modes) - 1);
4199 strncat(fwd_modes, separator,
4201 strlen(fwd_modes) - 1);
4203 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
4210 set_pkt_forwarding_mode(const char *fwd_mode_name)
4212 struct fwd_engine *fwd_eng;
4216 while ((fwd_eng = fwd_engines[i]) != NULL) {
4217 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
4218 printf("Set %s packet forwarding mode%s\n",
4220 retry_enabled == 0 ? "" : " with retry");
4221 cur_fwd_eng = fwd_eng;
4226 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name);
4230 add_rx_dump_callbacks(portid_t portid)
4232 struct rte_eth_dev_info dev_info;
4236 if (port_id_is_invalid(portid, ENABLED_WARN))
4239 ret = eth_dev_info_get_print_err(portid, &dev_info);
4243 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
4244 if (!ports[portid].rx_dump_cb[queue])
4245 ports[portid].rx_dump_cb[queue] =
4246 rte_eth_add_rx_callback(portid, queue,
4247 dump_rx_pkts, NULL);
4251 add_tx_dump_callbacks(portid_t portid)
4253 struct rte_eth_dev_info dev_info;
4257 if (port_id_is_invalid(portid, ENABLED_WARN))
4260 ret = eth_dev_info_get_print_err(portid, &dev_info);
4264 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
4265 if (!ports[portid].tx_dump_cb[queue])
4266 ports[portid].tx_dump_cb[queue] =
4267 rte_eth_add_tx_callback(portid, queue,
4268 dump_tx_pkts, NULL);
4272 remove_rx_dump_callbacks(portid_t portid)
4274 struct rte_eth_dev_info dev_info;
4278 if (port_id_is_invalid(portid, ENABLED_WARN))
4281 ret = eth_dev_info_get_print_err(portid, &dev_info);
4285 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
4286 if (ports[portid].rx_dump_cb[queue]) {
4287 rte_eth_remove_rx_callback(portid, queue,
4288 ports[portid].rx_dump_cb[queue]);
4289 ports[portid].rx_dump_cb[queue] = NULL;
4294 remove_tx_dump_callbacks(portid_t portid)
4296 struct rte_eth_dev_info dev_info;
4300 if (port_id_is_invalid(portid, ENABLED_WARN))
4303 ret = eth_dev_info_get_print_err(portid, &dev_info);
4307 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
4308 if (ports[portid].tx_dump_cb[queue]) {
4309 rte_eth_remove_tx_callback(portid, queue,
4310 ports[portid].tx_dump_cb[queue]);
4311 ports[portid].tx_dump_cb[queue] = NULL;
4316 configure_rxtx_dump_callbacks(uint16_t verbose)
4320 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4321 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
4325 RTE_ETH_FOREACH_DEV(portid)
4327 if (verbose == 1 || verbose > 2)
4328 add_rx_dump_callbacks(portid);
4330 remove_rx_dump_callbacks(portid);
4332 add_tx_dump_callbacks(portid);
4334 remove_tx_dump_callbacks(portid);
4339 set_verbose_level(uint16_t vb_level)
4341 printf("Change verbose level from %u to %u\n",
4342 (unsigned int) verbose_level, (unsigned int) vb_level);
4343 verbose_level = vb_level;
4344 configure_rxtx_dump_callbacks(verbose_level);
4348 vlan_extend_set(portid_t port_id, int on)
4352 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4354 if (port_id_is_invalid(port_id, ENABLED_WARN))
4357 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4360 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
4361 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
4363 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
4364 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
4367 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4370 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n",
4374 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4378 rx_vlan_strip_set(portid_t port_id, int on)
4382 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4384 if (port_id_is_invalid(port_id, ENABLED_WARN))
4387 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4390 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
4391 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
4393 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
4394 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4397 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4400 "%s(port_pi=%d, on=%d) failed diag=%d\n",
4401 __func__, port_id, on, diag);
4404 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4408 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
4412 if (port_id_is_invalid(port_id, ENABLED_WARN))
4415 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
4418 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n",
4419 __func__, port_id, queue_id, on, diag);
4423 rx_vlan_filter_set(portid_t port_id, int on)
4427 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4429 if (port_id_is_invalid(port_id, ENABLED_WARN))
4432 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4435 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
4436 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
4438 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
4439 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
4442 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4445 "%s(port_pi=%d, on=%d) failed diag=%d\n",
4446 __func__, port_id, on, diag);
4449 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4453 rx_vlan_qinq_strip_set(portid_t port_id, int on)
4457 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4459 if (port_id_is_invalid(port_id, ENABLED_WARN))
4462 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4465 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
4466 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
4468 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
4469 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
4472 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4474 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n",
4475 __func__, port_id, on, diag);
4478 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4482 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
4486 if (port_id_is_invalid(port_id, ENABLED_WARN))
4488 if (vlan_id_is_invalid(vlan_id))
4490 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
4494 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n",
4495 port_id, vlan_id, on, diag);
4500 rx_vlan_all_filter_set(portid_t port_id, int on)
4504 if (port_id_is_invalid(port_id, ENABLED_WARN))
4506 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
4507 if (rx_vft_set(port_id, vlan_id, on))
4513 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
4517 if (port_id_is_invalid(port_id, ENABLED_WARN))
4520 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
4525 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n",
4526 port_id, vlan_type, tp_id, diag);
4530 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
4532 struct rte_eth_dev_info dev_info;
4535 if (vlan_id_is_invalid(vlan_id))
4538 if (ports[port_id].dev_conf.txmode.offloads &
4539 DEV_TX_OFFLOAD_QINQ_INSERT) {
4540 fprintf(stderr, "Error, as QinQ has been enabled.\n");
4544 ret = eth_dev_info_get_print_err(port_id, &dev_info);
4548 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
4550 "Error: vlan insert is not supported by port %d\n",
4555 tx_vlan_reset(port_id);
4556 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
4557 ports[port_id].tx_vlan_id = vlan_id;
4561 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
4563 struct rte_eth_dev_info dev_info;
4566 if (vlan_id_is_invalid(vlan_id))
4568 if (vlan_id_is_invalid(vlan_id_outer))
4571 ret = eth_dev_info_get_print_err(port_id, &dev_info);
4575 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
4577 "Error: qinq insert not supported by port %d\n",
4582 tx_vlan_reset(port_id);
4583 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
4584 DEV_TX_OFFLOAD_QINQ_INSERT);
4585 ports[port_id].tx_vlan_id = vlan_id;
4586 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
4590 tx_vlan_reset(portid_t port_id)
4592 ports[port_id].dev_conf.txmode.offloads &=
4593 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
4594 DEV_TX_OFFLOAD_QINQ_INSERT);
4595 ports[port_id].tx_vlan_id = 0;
4596 ports[port_id].tx_vlan_id_outer = 0;
4600 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
4602 if (port_id_is_invalid(port_id, ENABLED_WARN))
4605 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
4609 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
4613 if (port_id_is_invalid(port_id, ENABLED_WARN))
4616 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
4619 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
4620 fprintf(stderr, "map_value not in required range 0..%d\n",
4621 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
4625 if (!is_rx) { /* tx */
4626 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id,
4630 "failed to set tx queue stats mapping.\n");
4634 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id,
4638 "failed to set rx queue stats mapping.\n");
4645 set_xstats_hide_zero(uint8_t on_off)
4647 xstats_hide_zero = on_off;
4651 set_record_core_cycles(uint8_t on_off)
4653 record_core_cycles = on_off;
4657 set_record_burst_stats(uint8_t on_off)
4659 record_burst_stats = on_off;
4663 flowtype_to_str(uint16_t flow_type)
4665 struct flow_type_info {
4671 static struct flow_type_info flowtype_str_table[] = {
4672 {"raw", RTE_ETH_FLOW_RAW},
4673 {"ipv4", RTE_ETH_FLOW_IPV4},
4674 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
4675 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
4676 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
4677 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
4678 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
4679 {"ipv6", RTE_ETH_FLOW_IPV6},
4680 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
4681 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
4682 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
4683 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
4684 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
4685 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
4686 {"port", RTE_ETH_FLOW_PORT},
4687 {"vxlan", RTE_ETH_FLOW_VXLAN},
4688 {"geneve", RTE_ETH_FLOW_GENEVE},
4689 {"nvgre", RTE_ETH_FLOW_NVGRE},
4690 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
4693 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
4694 if (flowtype_str_table[i].ftype == flow_type)
4695 return flowtype_str_table[i].str;
4701 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
4704 print_fdir_mask(struct rte_eth_fdir_masks *mask)
4706 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
4708 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4709 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
4710 " tunnel_id: 0x%08x",
4711 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
4712 rte_be_to_cpu_32(mask->tunnel_id_mask));
4713 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
4714 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
4715 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
4716 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
4718 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
4719 rte_be_to_cpu_16(mask->src_port_mask),
4720 rte_be_to_cpu_16(mask->dst_port_mask));
4722 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4723 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
4724 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
4725 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
4726 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
4728 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4729 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
4730 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
4731 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
4732 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
4739 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4741 struct rte_eth_flex_payload_cfg *cfg;
4744 for (i = 0; i < flex_conf->nb_payloads; i++) {
4745 cfg = &flex_conf->flex_set[i];
4746 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
4748 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
4749 printf("\n L2_PAYLOAD: ");
4750 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
4751 printf("\n L3_PAYLOAD: ");
4752 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
4753 printf("\n L4_PAYLOAD: ");
4755 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
4756 for (j = 0; j < num; j++)
4757 printf(" %-5u", cfg->src_offset[j]);
4763 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4765 struct rte_eth_fdir_flex_mask *mask;
4769 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
4770 mask = &flex_conf->flex_mask[i];
4771 p = flowtype_to_str(mask->flow_type);
4772 printf("\n %s:\t", p ? p : "unknown");
4773 for (j = 0; j < num; j++)
4774 printf(" %02x", mask->mask[j]);
4780 print_fdir_flow_type(uint32_t flow_types_mask)
4785 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
4786 if (!(flow_types_mask & (1 << i)))
4788 p = flowtype_to_str(i);
4798 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
4799 struct rte_eth_fdir_stats *fdir_stat)
4804 if (ret == -ENOTSUP) {
4805 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
4807 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
4810 #ifdef RTE_NET_IXGBE
4811 if (ret == -ENOTSUP) {
4812 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
4814 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
4821 fprintf(stderr, "\n FDIR is not supported on port %-2d\n",
4825 fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
4832 fdir_get_infos(portid_t port_id)
4834 struct rte_eth_fdir_stats fdir_stat;
4835 struct rte_eth_fdir_info fdir_info;
4837 static const char *fdir_stats_border = "########################";
4839 if (port_id_is_invalid(port_id, ENABLED_WARN))
4842 memset(&fdir_info, 0, sizeof(fdir_info));
4843 memset(&fdir_stat, 0, sizeof(fdir_stat));
4844 if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
4847 printf("\n %s FDIR infos for port %-2d %s\n",
4848 fdir_stats_border, port_id, fdir_stats_border);
4850 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
4851 printf(" PERFECT\n");
4852 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
4853 printf(" PERFECT-MAC-VLAN\n");
4854 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4855 printf(" PERFECT-TUNNEL\n");
4856 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
4857 printf(" SIGNATURE\n");
4859 printf(" DISABLE\n");
4860 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
4861 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
4862 printf(" SUPPORTED FLOW TYPE: ");
4863 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
4865 printf(" FLEX PAYLOAD INFO:\n");
4866 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
4867 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
4868 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
4869 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
4870 fdir_info.flex_payload_unit,
4871 fdir_info.max_flex_payload_segment_num,
4872 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
4874 print_fdir_mask(&fdir_info.mask);
4875 if (fdir_info.flex_conf.nb_payloads > 0) {
4876 printf(" FLEX PAYLOAD SRC OFFSET:");
4877 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4879 if (fdir_info.flex_conf.nb_flexmasks > 0) {
4880 printf(" FLEX MASK CFG:");
4881 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4883 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
4884 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
4885 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
4886 fdir_info.guarant_spc, fdir_info.best_spc);
4887 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
4888 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
4889 " add: %-10"PRIu64" remove: %"PRIu64"\n"
4890 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
4891 fdir_stat.collision, fdir_stat.free,
4892 fdir_stat.maxhash, fdir_stat.maxlen,
4893 fdir_stat.add, fdir_stat.remove,
4894 fdir_stat.f_add, fdir_stat.f_remove);
4895 printf(" %s############################%s\n",
4896 fdir_stats_border, fdir_stats_border);
4899 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */
4902 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
4904 struct rte_port *port;
4905 struct rte_eth_fdir_flex_conf *flex_conf;
4908 port = &ports[port_id];
4909 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4910 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
4911 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
4916 if (i >= RTE_ETH_FLOW_MAX) {
4917 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
4918 idx = flex_conf->nb_flexmasks;
4919 flex_conf->nb_flexmasks++;
4922 "The flex mask table is full. Can not set flex mask for flow_type(%u).",
4927 rte_memcpy(&flex_conf->flex_mask[idx],
4929 sizeof(struct rte_eth_fdir_flex_mask));
4933 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
4935 struct rte_port *port;
4936 struct rte_eth_fdir_flex_conf *flex_conf;
4939 port = &ports[port_id];
4940 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4941 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
4942 if (cfg->type == flex_conf->flex_set[i].type) {
4947 if (i >= RTE_ETH_PAYLOAD_MAX) {
4948 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
4949 idx = flex_conf->nb_payloads;
4950 flex_conf->nb_payloads++;
4953 "The flex payload table is full. Can not set flex payload for type(%u).",
4958 rte_memcpy(&flex_conf->flex_set[idx],
4960 sizeof(struct rte_eth_flex_payload_cfg));
4965 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
4967 #ifdef RTE_NET_IXGBE
4971 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
4973 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
4978 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
4979 is_rx ? "rx" : "tx", port_id, diag);
4982 fprintf(stderr, "VF %s setting not supported for port %d\n",
4983 is_rx ? "Rx" : "Tx", port_id);
4989 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
4992 struct rte_eth_link link;
4995 if (port_id_is_invalid(port_id, ENABLED_WARN))
4997 ret = eth_link_get_nowait_print_err(port_id, &link);
5000 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
5001 rate > link.link_speed) {
5003 "Invalid rate value:%u bigger than link speed: %u\n",
5004 rate, link.link_speed);
5007 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
5011 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
5017 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
5019 int diag = -ENOTSUP;
5023 RTE_SET_USED(q_msk);
5025 #ifdef RTE_NET_IXGBE
5026 if (diag == -ENOTSUP)
5027 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
5031 if (diag == -ENOTSUP)
5032 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
5038 "%s for port_id=%d failed diag=%d\n",
5039 __func__, port_id, diag);
5044 * Functions to manage the set of filtered Multicast MAC addresses.
5046 * A pool of filtered multicast MAC addresses is associated with each port.
5047 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
5048 * The address of the pool and the number of valid multicast MAC addresses
5049 * recorded in the pool are stored in the fields "mc_addr_pool" and
5050 * "mc_addr_nb" of the "rte_port" data structure.
5052 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
5053 * to be supplied a contiguous array of multicast MAC addresses.
5054 * To comply with this constraint, the set of multicast addresses recorded
5055 * into the pool are systematically compacted at the beginning of the pool.
5056 * Hence, when a multicast address is removed from the pool, all following
5057 * addresses, if any, are copied back to keep the set contiguous.
5059 #define MCAST_POOL_INC 32
5062 mcast_addr_pool_extend(struct rte_port *port)
5064 struct rte_ether_addr *mc_pool;
5065 size_t mc_pool_size;
5068 * If a free entry is available at the end of the pool, just
5069 * increment the number of recorded multicast addresses.
5071 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
5077 * [re]allocate a pool with MCAST_POOL_INC more entries.
5078 * The previous test guarantees that port->mc_addr_nb is a multiple
5079 * of MCAST_POOL_INC.
5081 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
5083 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
5085 if (mc_pool == NULL) {
5087 "allocation of pool of %u multicast addresses failed\n",
5088 port->mc_addr_nb + MCAST_POOL_INC);
5092 port->mc_addr_pool = mc_pool;
5099 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
5101 if (mcast_addr_pool_extend(port) != 0)
5103 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
5107 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
5110 if (addr_idx == port->mc_addr_nb) {
5111 /* No need to recompact the set of multicast addressses. */
5112 if (port->mc_addr_nb == 0) {
5113 /* free the pool of multicast addresses. */
5114 free(port->mc_addr_pool);
5115 port->mc_addr_pool = NULL;
5119 memmove(&port->mc_addr_pool[addr_idx],
5120 &port->mc_addr_pool[addr_idx + 1],
5121 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
5125 eth_port_multicast_addr_list_set(portid_t port_id)
5127 struct rte_port *port;
5130 port = &ports[port_id];
5131 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
5135 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
5136 port_id, port->mc_addr_nb, diag);
5142 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
5144 struct rte_port *port;
5147 if (port_id_is_invalid(port_id, ENABLED_WARN))
5150 port = &ports[port_id];
5153 * Check that the added multicast MAC address is not already recorded
5154 * in the pool of multicast addresses.
5156 for (i = 0; i < port->mc_addr_nb; i++) {
5157 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
5159 "multicast address already filtered by port\n");
5164 mcast_addr_pool_append(port, mc_addr);
5165 if (eth_port_multicast_addr_list_set(port_id) < 0)
5166 /* Rollback on failure, remove the address from the pool */
5167 mcast_addr_pool_remove(port, i);
5171 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
5173 struct rte_port *port;
5176 if (port_id_is_invalid(port_id, ENABLED_WARN))
5179 port = &ports[port_id];
5182 * Search the pool of multicast MAC addresses for the removed address.
5184 for (i = 0; i < port->mc_addr_nb; i++) {
5185 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
5188 if (i == port->mc_addr_nb) {
5189 fprintf(stderr, "multicast address not filtered by port %d\n",
5194 mcast_addr_pool_remove(port, i);
5195 if (eth_port_multicast_addr_list_set(port_id) < 0)
5196 /* Rollback on failure, add the address back into the pool */
5197 mcast_addr_pool_append(port, mc_addr);
5201 port_dcb_info_display(portid_t port_id)
5203 struct rte_eth_dcb_info dcb_info;
5206 static const char *border = "================";
5208 if (port_id_is_invalid(port_id, ENABLED_WARN))
5211 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
5213 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n",
5217 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
5218 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
5220 for (i = 0; i < dcb_info.nb_tcs; i++)
5222 printf("\n Priority : ");
5223 for (i = 0; i < dcb_info.nb_tcs; i++)
5224 printf("\t%4d", dcb_info.prio_tc[i]);
5225 printf("\n BW percent :");
5226 for (i = 0; i < dcb_info.nb_tcs; i++)
5227 printf("\t%4d%%", dcb_info.tc_bws[i]);
5228 printf("\n RXQ base : ");
5229 for (i = 0; i < dcb_info.nb_tcs; i++)
5230 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
5231 printf("\n RXQ number :");
5232 for (i = 0; i < dcb_info.nb_tcs; i++)
5233 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
5234 printf("\n TXQ base : ");
5235 for (i = 0; i < dcb_info.nb_tcs; i++)
5236 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
5237 printf("\n TXQ number :");
5238 for (i = 0; i < dcb_info.nb_tcs; i++)
5239 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
5244 open_file(const char *file_path, uint32_t *size)
5246 int fd = open(file_path, O_RDONLY);
5248 uint8_t *buf = NULL;
5256 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
5260 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
5262 fprintf(stderr, "%s: File operations failed\n", __func__);
5266 pkg_size = st_buf.st_size;
5269 fprintf(stderr, "%s: File operations failed\n", __func__);
5273 buf = (uint8_t *)malloc(pkg_size);
5276 fprintf(stderr, "%s: Failed to malloc memory\n", __func__);
5280 ret = read(fd, buf, pkg_size);
5283 fprintf(stderr, "%s: File read operation failed\n", __func__);
5297 save_file(const char *file_path, uint8_t *buf, uint32_t size)
5299 FILE *fh = fopen(file_path, "wb");
5302 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
5306 if (fwrite(buf, 1, size, fh) != size) {
5308 fprintf(stderr, "%s: File write operation failed\n", __func__);
5318 close_file(uint8_t *buf)
5329 port_queue_region_info_display(portid_t port_id, void *buf)
5333 struct rte_pmd_i40e_queue_regions *info =
5334 (struct rte_pmd_i40e_queue_regions *)buf;
5335 static const char *queue_region_info_stats_border = "-------";
5337 if (!info->queue_region_number)
5338 printf("there is no region has been set before");
5340 printf("\n %s All queue region info for port=%2d %s",
5341 queue_region_info_stats_border, port_id,
5342 queue_region_info_stats_border);
5343 printf("\n queue_region_number: %-14u \n",
5344 info->queue_region_number);
5346 for (i = 0; i < info->queue_region_number; i++) {
5347 printf("\n region_id: %-14u queue_number: %-14u "
5348 "queue_start_index: %-14u \n",
5349 info->region[i].region_id,
5350 info->region[i].queue_num,
5351 info->region[i].queue_start_index);
5353 printf(" user_priority_num is %-14u :",
5354 info->region[i].user_priority_num);
5355 for (j = 0; j < info->region[i].user_priority_num; j++)
5356 printf(" %-14u ", info->region[i].user_priority[j]);
5358 printf("\n flowtype_num is %-14u :",
5359 info->region[i].flowtype_num);
5360 for (j = 0; j < info->region[i].flowtype_num; j++)
5361 printf(" %-14u ", info->region[i].hw_flowtype[j]);
5364 RTE_SET_USED(port_id);
5372 show_macs(portid_t port_id)
5374 char buf[RTE_ETHER_ADDR_FMT_SIZE];
5375 struct rte_eth_dev_info dev_info;
5376 int32_t i, rc, num_macs = 0;
5378 if (eth_dev_info_get_print_err(port_id, &dev_info))
5381 struct rte_ether_addr addr[dev_info.max_mac_addrs];
5382 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs);
5386 for (i = 0; i < rc; i++) {
5388 /* skip zero address */
5389 if (rte_is_zero_ether_addr(&addr[i]))
5395 printf("Number of MAC address added: %d\n", num_macs);
5397 for (i = 0; i < rc; i++) {
5399 /* skip zero address */
5400 if (rte_is_zero_ether_addr(&addr[i]))
5403 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]);
5404 printf(" %s\n", buf);
5409 show_mcast_macs(portid_t port_id)
5411 char buf[RTE_ETHER_ADDR_FMT_SIZE];
5412 struct rte_ether_addr *addr;
5413 struct rte_port *port;
5416 port = &ports[port_id];
5418 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
5420 for (i = 0; i < port->mc_addr_nb; i++) {
5421 addr = &port->mc_addr_pool[i];
5423 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
5424 printf(" %s\n", buf);