1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
52 #include <cmdline_parse_etheraddr.h>
53 #include <rte_config.h>
57 static char *flowtype_to_str(uint16_t flow_type);
60 enum tx_pkt_split split;
64 .split = TX_PKT_SPLIT_OFF,
68 .split = TX_PKT_SPLIT_ON,
72 .split = TX_PKT_SPLIT_RND,
77 const struct rss_type_info rss_type_table[] = {
78 { "ipv4", ETH_RSS_IPV4 },
79 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
80 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
81 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
82 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
83 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
84 { "ipv6", ETH_RSS_IPV6 },
85 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
86 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
87 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
88 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
89 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
90 { "l2-payload", ETH_RSS_L2_PAYLOAD },
91 { "ipv6-ex", ETH_RSS_IPV6_EX },
92 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
93 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
94 { "port", ETH_RSS_PORT },
95 { "vxlan", ETH_RSS_VXLAN },
96 { "geneve", ETH_RSS_GENEVE },
97 { "nvgre", ETH_RSS_NVGRE },
99 { "udp", ETH_RSS_UDP },
100 { "tcp", ETH_RSS_TCP },
101 { "sctp", ETH_RSS_SCTP },
102 { "tunnel", ETH_RSS_TUNNEL },
107 print_ethaddr(const char *name, struct ether_addr *eth_addr)
109 char buf[ETHER_ADDR_FMT_SIZE];
110 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
111 printf("%s%s", name, buf);
115 nic_stats_display(portid_t port_id)
117 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
118 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
119 static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
120 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles;
121 uint64_t mpps_rx, mpps_tx;
122 struct rte_eth_stats stats;
123 struct rte_port *port = &ports[port_id];
126 static const char *nic_stats_border = "########################";
128 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
132 rte_eth_stats_get(port_id, &stats);
133 printf("\n %s NIC statistics for port %-2d %s\n",
134 nic_stats_border, port_id, nic_stats_border);
136 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
137 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
139 stats.ipackets, stats.imissed, stats.ibytes);
140 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
141 printf(" RX-nombuf: %-10"PRIu64"\n",
143 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
145 stats.opackets, stats.oerrors, stats.obytes);
148 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
149 " RX-bytes: %10"PRIu64"\n",
150 stats.ipackets, stats.ierrors, stats.ibytes);
151 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
152 printf(" RX-nombuf: %10"PRIu64"\n",
154 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
155 " TX-bytes: %10"PRIu64"\n",
156 stats.opackets, stats.oerrors, stats.obytes);
159 if (port->rx_queue_stats_mapping_enabled) {
161 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
162 printf(" Stats reg %2d RX-packets: %10"PRIu64
163 " RX-errors: %10"PRIu64
164 " RX-bytes: %10"PRIu64"\n",
165 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
168 if (port->tx_queue_stats_mapping_enabled) {
170 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
171 printf(" Stats reg %2d TX-packets: %10"PRIu64
172 " TX-bytes: %10"PRIu64"\n",
173 i, stats.q_opackets[i], stats.q_obytes[i]);
177 diff_cycles = prev_cycles[port_id];
178 prev_cycles[port_id] = rte_rdtsc();
180 diff_cycles = prev_cycles[port_id] - diff_cycles;
182 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
183 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
184 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
185 (stats.opackets - prev_pkts_tx[port_id]) : 0;
186 prev_pkts_rx[port_id] = stats.ipackets;
187 prev_pkts_tx[port_id] = stats.opackets;
188 mpps_rx = diff_cycles > 0 ?
189 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
190 mpps_tx = diff_cycles > 0 ?
191 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
192 printf("\n Throughput (since last show)\n");
193 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n",
196 printf(" %s############################%s\n",
197 nic_stats_border, nic_stats_border);
201 nic_stats_clear(portid_t port_id)
203 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
207 rte_eth_stats_reset(port_id);
208 printf("\n NIC statistics for port %d cleared\n", port_id);
212 nic_xstats_display(portid_t port_id)
214 struct rte_eth_xstat *xstats;
215 int cnt_xstats, idx_xstat;
216 struct rte_eth_xstat_name *xstats_names;
218 printf("###### NIC extended statistics for port %-2d\n", port_id);
219 if (!rte_eth_dev_is_valid_port(port_id)) {
220 printf("Error: Invalid port number %i\n", port_id);
225 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
226 if (cnt_xstats < 0) {
227 printf("Error: Cannot get count of xstats\n");
231 /* Get id-name lookup table */
232 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
233 if (xstats_names == NULL) {
234 printf("Cannot allocate memory for xstats lookup\n");
237 if (cnt_xstats != rte_eth_xstats_get_names(
238 port_id, xstats_names, cnt_xstats)) {
239 printf("Error: Cannot get xstats lookup\n");
244 /* Get stats themselves */
245 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
246 if (xstats == NULL) {
247 printf("Cannot allocate memory for xstats\n");
251 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
252 printf("Error: Unable to get xstats\n");
259 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
260 if (xstats_hide_zero && !xstats[idx_xstat].value)
262 printf("%s: %"PRIu64"\n",
263 xstats_names[idx_xstat].name,
264 xstats[idx_xstat].value);
271 nic_xstats_clear(portid_t port_id)
273 rte_eth_xstats_reset(port_id);
277 nic_stats_mapping_display(portid_t port_id)
279 struct rte_port *port = &ports[port_id];
282 static const char *nic_stats_mapping_border = "########################";
284 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
289 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
290 printf("Port id %d - either does not support queue statistic mapping or"
291 " no queue statistic mapping set\n", port_id);
295 printf("\n %s NIC statistics mapping for port %-2d %s\n",
296 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
298 if (port->rx_queue_stats_mapping_enabled) {
299 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
300 if (rx_queue_stats_mappings[i].port_id == port_id) {
301 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
302 rx_queue_stats_mappings[i].queue_id,
303 rx_queue_stats_mappings[i].stats_counter_id);
310 if (port->tx_queue_stats_mapping_enabled) {
311 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
312 if (tx_queue_stats_mappings[i].port_id == port_id) {
313 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
314 tx_queue_stats_mappings[i].queue_id,
315 tx_queue_stats_mappings[i].stats_counter_id);
320 printf(" %s####################################%s\n",
321 nic_stats_mapping_border, nic_stats_mapping_border);
325 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
327 struct rte_eth_rxq_info qinfo;
329 static const char *info_border = "*********************";
331 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
333 printf("Failed to retrieve information for port: %u, "
334 "RX queue: %hu\nerror desc: %s(%d)\n",
335 port_id, queue_id, strerror(-rc), rc);
339 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
340 info_border, port_id, queue_id, info_border);
342 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
343 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
344 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
345 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
346 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
347 printf("\nRX drop packets: %s",
348 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
349 printf("\nRX deferred start: %s",
350 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
351 printf("\nRX scattered packets: %s",
352 (qinfo.scattered_rx != 0) ? "on" : "off");
353 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
358 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
360 struct rte_eth_txq_info qinfo;
362 static const char *info_border = "*********************";
364 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
366 printf("Failed to retrieve information for port: %u, "
367 "TX queue: %hu\nerror desc: %s(%d)\n",
368 port_id, queue_id, strerror(-rc), rc);
372 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
373 info_border, port_id, queue_id, info_border);
375 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
376 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
377 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
378 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
379 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
380 printf("\nTX deferred start: %s",
381 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
382 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
387 port_infos_display(portid_t port_id)
389 struct rte_port *port;
390 struct ether_addr mac_addr;
391 struct rte_eth_link link;
392 struct rte_eth_dev_info dev_info;
394 struct rte_mempool * mp;
395 static const char *info_border = "*********************";
397 char name[RTE_ETH_NAME_MAX_LEN];
399 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
403 port = &ports[port_id];
404 rte_eth_link_get_nowait(port_id, &link);
405 memset(&dev_info, 0, sizeof(dev_info));
406 rte_eth_dev_info_get(port_id, &dev_info);
407 printf("\n%s Infos for port %-2d %s\n",
408 info_border, port_id, info_border);
409 rte_eth_macaddr_get(port_id, &mac_addr);
410 print_ethaddr("MAC address: ", &mac_addr);
411 rte_eth_dev_get_name_by_port(port_id, name);
412 printf("\nDevice name: %s", name);
413 printf("\nDriver name: %s", dev_info.driver_name);
414 if (dev_info.device->devargs && dev_info.device->devargs->args)
415 printf("\nDevargs: %s", dev_info.device->devargs->args);
416 printf("\nConnect to socket: %u", port->socket_id);
418 if (port_numa[port_id] != NUMA_NO_CONFIG) {
419 mp = mbuf_pool_find(port_numa[port_id]);
421 printf("\nmemory allocation on the socket: %d",
424 printf("\nmemory allocation on the socket: %u",port->socket_id);
426 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
427 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
428 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
429 ("full-duplex") : ("half-duplex"));
431 if (!rte_eth_dev_get_mtu(port_id, &mtu))
432 printf("MTU: %u\n", mtu);
434 printf("Promiscuous mode: %s\n",
435 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
436 printf("Allmulticast mode: %s\n",
437 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
438 printf("Maximum number of MAC addresses: %u\n",
439 (unsigned int)(port->dev_info.max_mac_addrs));
440 printf("Maximum number of MAC addresses of hash filtering: %u\n",
441 (unsigned int)(port->dev_info.max_hash_mac_addrs));
443 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
444 if (vlan_offload >= 0){
445 printf("VLAN offload: \n");
446 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
447 printf(" strip on \n");
449 printf(" strip off \n");
451 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
452 printf(" filter on \n");
454 printf(" filter off \n");
456 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
457 printf(" qinq(extend) on \n");
459 printf(" qinq(extend) off \n");
462 if (dev_info.hash_key_size > 0)
463 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
464 if (dev_info.reta_size > 0)
465 printf("Redirection table size: %u\n", dev_info.reta_size);
466 if (!dev_info.flow_type_rss_offloads)
467 printf("No RSS offload flow type is supported.\n");
472 printf("Supported RSS offload flow types:\n");
473 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
474 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
475 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
477 p = flowtype_to_str(i);
481 printf(" user defined %d\n", i);
485 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
486 printf("Maximum configurable length of RX packet: %u\n",
487 dev_info.max_rx_pktlen);
488 if (dev_info.max_vfs)
489 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
490 if (dev_info.max_vmdq_pools)
491 printf("Maximum number of VMDq pools: %u\n",
492 dev_info.max_vmdq_pools);
494 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
495 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
496 printf("Max possible number of RXDs per queue: %hu\n",
497 dev_info.rx_desc_lim.nb_max);
498 printf("Min possible number of RXDs per queue: %hu\n",
499 dev_info.rx_desc_lim.nb_min);
500 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
502 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
503 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
504 printf("Max possible number of TXDs per queue: %hu\n",
505 dev_info.tx_desc_lim.nb_max);
506 printf("Min possible number of TXDs per queue: %hu\n",
507 dev_info.tx_desc_lim.nb_min);
508 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
510 /* Show switch info only if valid switch domain and port id is set */
511 if (dev_info.switch_info.domain_id !=
512 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
513 if (dev_info.switch_info.name)
514 printf("Switch name: %s\n", dev_info.switch_info.name);
516 printf("Switch domain Id: %u\n",
517 dev_info.switch_info.domain_id);
518 printf("Switch Port Id: %u\n",
519 dev_info.switch_info.port_id);
524 port_summary_header_display(void)
526 uint16_t port_number;
528 port_number = rte_eth_dev_count_avail();
529 printf("Number of available ports: %i\n", port_number);
530 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
531 "Driver", "Status", "Link");
535 port_summary_display(portid_t port_id)
537 struct ether_addr mac_addr;
538 struct rte_eth_link link;
539 struct rte_eth_dev_info dev_info;
540 char name[RTE_ETH_NAME_MAX_LEN];
542 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
547 rte_eth_link_get_nowait(port_id, &link);
548 rte_eth_dev_info_get(port_id, &dev_info);
549 rte_eth_dev_get_name_by_port(port_id, name);
550 rte_eth_macaddr_get(port_id, &mac_addr);
552 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n",
553 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
554 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
555 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
556 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
557 (unsigned int) link.link_speed);
561 port_offload_cap_display(portid_t port_id)
563 struct rte_eth_dev_info dev_info;
564 static const char *info_border = "************";
566 if (port_id_is_invalid(port_id, ENABLED_WARN))
569 rte_eth_dev_info_get(port_id, &dev_info);
571 printf("\n%s Port %d supported offload features: %s\n",
572 info_border, port_id, info_border);
574 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
575 printf("VLAN stripped: ");
576 if (ports[port_id].dev_conf.rxmode.offloads &
577 DEV_RX_OFFLOAD_VLAN_STRIP)
583 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
584 printf("Double VLANs stripped: ");
585 if (ports[port_id].dev_conf.rxmode.offloads &
586 DEV_RX_OFFLOAD_VLAN_EXTEND)
592 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
593 printf("RX IPv4 checksum: ");
594 if (ports[port_id].dev_conf.rxmode.offloads &
595 DEV_RX_OFFLOAD_IPV4_CKSUM)
601 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
602 printf("RX UDP checksum: ");
603 if (ports[port_id].dev_conf.rxmode.offloads &
604 DEV_RX_OFFLOAD_UDP_CKSUM)
610 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
611 printf("RX TCP checksum: ");
612 if (ports[port_id].dev_conf.rxmode.offloads &
613 DEV_RX_OFFLOAD_TCP_CKSUM)
619 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
620 printf("RX SCTP checksum: ");
621 if (ports[port_id].dev_conf.rxmode.offloads &
622 DEV_RX_OFFLOAD_SCTP_CKSUM)
628 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
629 printf("RX Outer IPv4 checksum: ");
630 if (ports[port_id].dev_conf.rxmode.offloads &
631 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
637 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
638 printf("RX Outer UDP checksum: ");
639 if (ports[port_id].dev_conf.rxmode.offloads &
640 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
646 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
647 printf("Large receive offload: ");
648 if (ports[port_id].dev_conf.rxmode.offloads &
649 DEV_RX_OFFLOAD_TCP_LRO)
655 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
656 printf("HW timestamp: ");
657 if (ports[port_id].dev_conf.rxmode.offloads &
658 DEV_RX_OFFLOAD_TIMESTAMP)
664 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
665 printf("Rx Keep CRC: ");
666 if (ports[port_id].dev_conf.rxmode.offloads &
667 DEV_RX_OFFLOAD_KEEP_CRC)
673 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
674 printf("RX offload security: ");
675 if (ports[port_id].dev_conf.rxmode.offloads &
676 DEV_RX_OFFLOAD_SECURITY)
682 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
683 printf("VLAN insert: ");
684 if (ports[port_id].dev_conf.txmode.offloads &
685 DEV_TX_OFFLOAD_VLAN_INSERT)
691 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
692 printf("Double VLANs insert: ");
693 if (ports[port_id].dev_conf.txmode.offloads &
694 DEV_TX_OFFLOAD_QINQ_INSERT)
700 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
701 printf("TX IPv4 checksum: ");
702 if (ports[port_id].dev_conf.txmode.offloads &
703 DEV_TX_OFFLOAD_IPV4_CKSUM)
709 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
710 printf("TX UDP checksum: ");
711 if (ports[port_id].dev_conf.txmode.offloads &
712 DEV_TX_OFFLOAD_UDP_CKSUM)
718 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
719 printf("TX TCP checksum: ");
720 if (ports[port_id].dev_conf.txmode.offloads &
721 DEV_TX_OFFLOAD_TCP_CKSUM)
727 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
728 printf("TX SCTP checksum: ");
729 if (ports[port_id].dev_conf.txmode.offloads &
730 DEV_TX_OFFLOAD_SCTP_CKSUM)
736 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
737 printf("TX Outer IPv4 checksum: ");
738 if (ports[port_id].dev_conf.txmode.offloads &
739 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
745 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
746 printf("TX TCP segmentation: ");
747 if (ports[port_id].dev_conf.txmode.offloads &
748 DEV_TX_OFFLOAD_TCP_TSO)
754 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
755 printf("TX UDP segmentation: ");
756 if (ports[port_id].dev_conf.txmode.offloads &
757 DEV_TX_OFFLOAD_UDP_TSO)
763 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
764 printf("TSO for VXLAN tunnel packet: ");
765 if (ports[port_id].dev_conf.txmode.offloads &
766 DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
772 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
773 printf("TSO for GRE tunnel packet: ");
774 if (ports[port_id].dev_conf.txmode.offloads &
775 DEV_TX_OFFLOAD_GRE_TNL_TSO)
781 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
782 printf("TSO for IPIP tunnel packet: ");
783 if (ports[port_id].dev_conf.txmode.offloads &
784 DEV_TX_OFFLOAD_IPIP_TNL_TSO)
790 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
791 printf("TSO for GENEVE tunnel packet: ");
792 if (ports[port_id].dev_conf.txmode.offloads &
793 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
799 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
800 printf("IP tunnel TSO: ");
801 if (ports[port_id].dev_conf.txmode.offloads &
802 DEV_TX_OFFLOAD_IP_TNL_TSO)
808 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
809 printf("UDP tunnel TSO: ");
810 if (ports[port_id].dev_conf.txmode.offloads &
811 DEV_TX_OFFLOAD_UDP_TNL_TSO)
817 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
818 printf("TX Outer UDP checksum: ");
819 if (ports[port_id].dev_conf.txmode.offloads &
820 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
829 port_id_is_invalid(portid_t port_id, enum print_warning warning)
833 if (port_id == (portid_t)RTE_PORT_ALL)
836 RTE_ETH_FOREACH_DEV(pid)
840 if (warning == ENABLED_WARN)
841 printf("Invalid port %d\n", port_id);
846 void print_valid_ports(void)
850 printf("The valid ports array is [");
851 RTE_ETH_FOREACH_DEV(pid) {
858 vlan_id_is_invalid(uint16_t vlan_id)
862 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
867 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
869 const struct rte_pci_device *pci_dev;
870 const struct rte_bus *bus;
874 printf("Port register offset 0x%X not aligned on a 4-byte "
880 if (!ports[port_id].dev_info.device) {
881 printf("Invalid device\n");
885 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
886 if (bus && !strcmp(bus->name, "pci")) {
887 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
889 printf("Not a PCI device\n");
893 pci_len = pci_dev->mem_resource[0].len;
894 if (reg_off >= pci_len) {
895 printf("Port %d: register offset %u (0x%X) out of port PCI "
896 "resource (length=%"PRIu64")\n",
897 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
904 reg_bit_pos_is_invalid(uint8_t bit_pos)
908 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
912 #define display_port_and_reg_off(port_id, reg_off) \
913 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
916 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
918 display_port_and_reg_off(port_id, (unsigned)reg_off);
919 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
923 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
928 if (port_id_is_invalid(port_id, ENABLED_WARN))
930 if (port_reg_off_is_invalid(port_id, reg_off))
932 if (reg_bit_pos_is_invalid(bit_x))
934 reg_v = port_id_pci_reg_read(port_id, reg_off);
935 display_port_and_reg_off(port_id, (unsigned)reg_off);
936 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
940 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
941 uint8_t bit1_pos, uint8_t bit2_pos)
947 if (port_id_is_invalid(port_id, ENABLED_WARN))
949 if (port_reg_off_is_invalid(port_id, reg_off))
951 if (reg_bit_pos_is_invalid(bit1_pos))
953 if (reg_bit_pos_is_invalid(bit2_pos))
955 if (bit1_pos > bit2_pos)
956 l_bit = bit2_pos, h_bit = bit1_pos;
958 l_bit = bit1_pos, h_bit = bit2_pos;
960 reg_v = port_id_pci_reg_read(port_id, reg_off);
963 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
964 display_port_and_reg_off(port_id, (unsigned)reg_off);
965 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
966 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
970 port_reg_display(portid_t port_id, uint32_t reg_off)
974 if (port_id_is_invalid(port_id, ENABLED_WARN))
976 if (port_reg_off_is_invalid(port_id, reg_off))
978 reg_v = port_id_pci_reg_read(port_id, reg_off);
979 display_port_reg_value(port_id, reg_off, reg_v);
983 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
988 if (port_id_is_invalid(port_id, ENABLED_WARN))
990 if (port_reg_off_is_invalid(port_id, reg_off))
992 if (reg_bit_pos_is_invalid(bit_pos))
995 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
998 reg_v = port_id_pci_reg_read(port_id, reg_off);
1000 reg_v &= ~(1 << bit_pos);
1002 reg_v |= (1 << bit_pos);
1003 port_id_pci_reg_write(port_id, reg_off, reg_v);
1004 display_port_reg_value(port_id, reg_off, reg_v);
1008 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1009 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1016 if (port_id_is_invalid(port_id, ENABLED_WARN))
1018 if (port_reg_off_is_invalid(port_id, reg_off))
1020 if (reg_bit_pos_is_invalid(bit1_pos))
1022 if (reg_bit_pos_is_invalid(bit2_pos))
1024 if (bit1_pos > bit2_pos)
1025 l_bit = bit2_pos, h_bit = bit1_pos;
1027 l_bit = bit1_pos, h_bit = bit2_pos;
1029 if ((h_bit - l_bit) < 31)
1030 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1034 if (value > max_v) {
1035 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1036 (unsigned)value, (unsigned)value,
1037 (unsigned)max_v, (unsigned)max_v);
1040 reg_v = port_id_pci_reg_read(port_id, reg_off);
1041 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1042 reg_v |= (value << l_bit); /* Set changed bits */
1043 port_id_pci_reg_write(port_id, reg_off, reg_v);
1044 display_port_reg_value(port_id, reg_off, reg_v);
1048 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1050 if (port_id_is_invalid(port_id, ENABLED_WARN))
1052 if (port_reg_off_is_invalid(port_id, reg_off))
1054 port_id_pci_reg_write(port_id, reg_off, reg_v);
1055 display_port_reg_value(port_id, reg_off, reg_v);
1059 port_mtu_set(portid_t port_id, uint16_t mtu)
1063 if (port_id_is_invalid(port_id, ENABLED_WARN))
1065 diag = rte_eth_dev_set_mtu(port_id, mtu);
1068 printf("Set MTU failed. diag=%d\n", diag);
1071 /* Generic flow management functions. */
1073 /** Generate a port_flow entry from attributes/pattern/actions. */
1074 static struct port_flow *
1075 port_flow_new(const struct rte_flow_attr *attr,
1076 const struct rte_flow_item *pattern,
1077 const struct rte_flow_action *actions,
1078 struct rte_flow_error *error)
1080 const struct rte_flow_conv_rule rule = {
1082 .pattern_ro = pattern,
1083 .actions_ro = actions,
1085 struct port_flow *pf;
1088 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1091 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1094 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1098 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1105 /** Print a message out of a flow error. */
1107 port_flow_complain(struct rte_flow_error *error)
1109 static const char *const errstrlist[] = {
1110 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1111 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1112 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1113 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1114 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1115 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1116 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1117 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1118 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1119 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1120 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1121 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1122 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1123 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1124 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1125 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1126 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1130 int err = rte_errno;
1132 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1133 !errstrlist[error->type])
1134 errstr = "unknown type";
1136 errstr = errstrlist[error->type];
1137 printf("Caught error type %d (%s): %s%s: %s\n",
1138 error->type, errstr,
1139 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1140 error->cause), buf) : "",
1141 error->message ? error->message : "(no stated reason)",
1146 /** Validate flow rule. */
1148 port_flow_validate(portid_t port_id,
1149 const struct rte_flow_attr *attr,
1150 const struct rte_flow_item *pattern,
1151 const struct rte_flow_action *actions)
1153 struct rte_flow_error error;
1155 /* Poisoning to make sure PMDs update it in case of error. */
1156 memset(&error, 0x11, sizeof(error));
1157 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1158 return port_flow_complain(&error);
1159 printf("Flow rule validated\n");
1163 /** Create flow rule. */
1165 port_flow_create(portid_t port_id,
1166 const struct rte_flow_attr *attr,
1167 const struct rte_flow_item *pattern,
1168 const struct rte_flow_action *actions)
1170 struct rte_flow *flow;
1171 struct rte_port *port;
1172 struct port_flow *pf;
1174 struct rte_flow_error error;
1176 /* Poisoning to make sure PMDs update it in case of error. */
1177 memset(&error, 0x22, sizeof(error));
1178 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1180 return port_flow_complain(&error);
1181 port = &ports[port_id];
1182 if (port->flow_list) {
1183 if (port->flow_list->id == UINT32_MAX) {
1184 printf("Highest rule ID is already assigned, delete"
1186 rte_flow_destroy(port_id, flow, NULL);
1189 id = port->flow_list->id + 1;
1192 pf = port_flow_new(attr, pattern, actions, &error);
1194 rte_flow_destroy(port_id, flow, NULL);
1195 return port_flow_complain(&error);
1197 pf->next = port->flow_list;
1200 port->flow_list = pf;
1201 printf("Flow rule #%u created\n", pf->id);
1205 /** Destroy a number of flow rules. */
1207 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1209 struct rte_port *port;
1210 struct port_flow **tmp;
1214 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1215 port_id == (portid_t)RTE_PORT_ALL)
1217 port = &ports[port_id];
1218 tmp = &port->flow_list;
1222 for (i = 0; i != n; ++i) {
1223 struct rte_flow_error error;
1224 struct port_flow *pf = *tmp;
1226 if (rule[i] != pf->id)
1229 * Poisoning to make sure PMDs update it in case
1232 memset(&error, 0x33, sizeof(error));
1233 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1234 ret = port_flow_complain(&error);
1237 printf("Flow rule #%u destroyed\n", pf->id);
1243 tmp = &(*tmp)->next;
1249 /** Remove all flow rules. */
1251 port_flow_flush(portid_t port_id)
1253 struct rte_flow_error error;
1254 struct rte_port *port;
1257 /* Poisoning to make sure PMDs update it in case of error. */
1258 memset(&error, 0x44, sizeof(error));
1259 if (rte_flow_flush(port_id, &error)) {
1260 ret = port_flow_complain(&error);
1261 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1262 port_id == (portid_t)RTE_PORT_ALL)
1265 port = &ports[port_id];
1266 while (port->flow_list) {
1267 struct port_flow *pf = port->flow_list->next;
1269 free(port->flow_list);
1270 port->flow_list = pf;
1275 /** Query a flow rule. */
1277 port_flow_query(portid_t port_id, uint32_t rule,
1278 const struct rte_flow_action *action)
1280 struct rte_flow_error error;
1281 struct rte_port *port;
1282 struct port_flow *pf;
1285 struct rte_flow_query_count count;
1289 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1290 port_id == (portid_t)RTE_PORT_ALL)
1292 port = &ports[port_id];
1293 for (pf = port->flow_list; pf; pf = pf->next)
1297 printf("Flow rule #%u not found\n", rule);
1300 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1301 &name, sizeof(name),
1302 (void *)(uintptr_t)action->type, &error);
1304 return port_flow_complain(&error);
1305 switch (action->type) {
1306 case RTE_FLOW_ACTION_TYPE_COUNT:
1309 printf("Cannot query action type %d (%s)\n",
1310 action->type, name);
1313 /* Poisoning to make sure PMDs update it in case of error. */
1314 memset(&error, 0x55, sizeof(error));
1315 memset(&query, 0, sizeof(query));
1316 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1317 return port_flow_complain(&error);
1318 switch (action->type) {
1319 case RTE_FLOW_ACTION_TYPE_COUNT:
1323 " hits: %" PRIu64 "\n"
1324 " bytes: %" PRIu64 "\n",
1326 query.count.hits_set,
1327 query.count.bytes_set,
1332 printf("Cannot display result for action type %d (%s)\n",
1333 action->type, name);
1339 /** List flow rules. */
1341 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1343 struct rte_port *port;
1344 struct port_flow *pf;
1345 struct port_flow *list = NULL;
1348 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1349 port_id == (portid_t)RTE_PORT_ALL)
1351 port = &ports[port_id];
1352 if (!port->flow_list)
1354 /* Sort flows by group, priority and ID. */
1355 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1356 struct port_flow **tmp;
1357 const struct rte_flow_attr *curr = pf->rule.attr;
1360 /* Filter out unwanted groups. */
1361 for (i = 0; i != n; ++i)
1362 if (curr->group == group[i])
1367 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1368 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1370 if (curr->group > comp->group ||
1371 (curr->group == comp->group &&
1372 curr->priority > comp->priority) ||
1373 (curr->group == comp->group &&
1374 curr->priority == comp->priority &&
1375 pf->id > (*tmp)->id))
1382 printf("ID\tGroup\tPrio\tAttr\tRule\n");
1383 for (pf = list; pf != NULL; pf = pf->tmp) {
1384 const struct rte_flow_item *item = pf->rule.pattern;
1385 const struct rte_flow_action *action = pf->rule.actions;
1388 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1390 pf->rule.attr->group,
1391 pf->rule.attr->priority,
1392 pf->rule.attr->ingress ? 'i' : '-',
1393 pf->rule.attr->egress ? 'e' : '-',
1394 pf->rule.attr->transfer ? 't' : '-');
1395 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1396 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1397 &name, sizeof(name),
1398 (void *)(uintptr_t)item->type,
1401 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1402 printf("%s ", name);
1406 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1407 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1408 &name, sizeof(name),
1409 (void *)(uintptr_t)action->type,
1412 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1413 printf(" %s", name);
1420 /** Restrict ingress traffic to the defined flow rules. */
1422 port_flow_isolate(portid_t port_id, int set)
1424 struct rte_flow_error error;
1426 /* Poisoning to make sure PMDs update it in case of error. */
1427 memset(&error, 0x66, sizeof(error));
1428 if (rte_flow_isolate(port_id, set, &error))
1429 return port_flow_complain(&error);
1430 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1432 set ? "now restricted" : "not restricted anymore");
1437 * RX/TX ring descriptors display functions.
1440 rx_queue_id_is_invalid(queueid_t rxq_id)
1442 if (rxq_id < nb_rxq)
1444 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1449 tx_queue_id_is_invalid(queueid_t txq_id)
1451 if (txq_id < nb_txq)
1453 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1458 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1460 if (rxdesc_id < nb_rxd)
1462 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1468 tx_desc_id_is_invalid(uint16_t txdesc_id)
1470 if (txdesc_id < nb_txd)
1472 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1477 static const struct rte_memzone *
1478 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1480 char mz_name[RTE_MEMZONE_NAMESIZE];
1481 const struct rte_memzone *mz;
1483 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
1484 port_id, q_id, ring_name);
1485 mz = rte_memzone_lookup(mz_name);
1487 printf("%s ring memory zoneof (port %d, queue %d) not"
1488 "found (zone name = %s\n",
1489 ring_name, port_id, q_id, mz_name);
1493 union igb_ring_dword {
1496 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1506 struct igb_ring_desc_32_bytes {
1507 union igb_ring_dword lo_dword;
1508 union igb_ring_dword hi_dword;
1509 union igb_ring_dword resv1;
1510 union igb_ring_dword resv2;
1513 struct igb_ring_desc_16_bytes {
1514 union igb_ring_dword lo_dword;
1515 union igb_ring_dword hi_dword;
1519 ring_rxd_display_dword(union igb_ring_dword dword)
1521 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1522 (unsigned)dword.words.hi);
1526 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1527 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1530 __rte_unused portid_t port_id,
1534 struct igb_ring_desc_16_bytes *ring =
1535 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1536 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1537 struct rte_eth_dev_info dev_info;
1539 memset(&dev_info, 0, sizeof(dev_info));
1540 rte_eth_dev_info_get(port_id, &dev_info);
1541 if (strstr(dev_info.driver_name, "i40e") != NULL) {
1542 /* 32 bytes RX descriptor, i40e only */
1543 struct igb_ring_desc_32_bytes *ring =
1544 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1545 ring[desc_id].lo_dword.dword =
1546 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1547 ring_rxd_display_dword(ring[desc_id].lo_dword);
1548 ring[desc_id].hi_dword.dword =
1549 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1550 ring_rxd_display_dword(ring[desc_id].hi_dword);
1551 ring[desc_id].resv1.dword =
1552 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1553 ring_rxd_display_dword(ring[desc_id].resv1);
1554 ring[desc_id].resv2.dword =
1555 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1556 ring_rxd_display_dword(ring[desc_id].resv2);
1561 /* 16 bytes RX descriptor */
1562 ring[desc_id].lo_dword.dword =
1563 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1564 ring_rxd_display_dword(ring[desc_id].lo_dword);
1565 ring[desc_id].hi_dword.dword =
1566 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1567 ring_rxd_display_dword(ring[desc_id].hi_dword);
1571 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1573 struct igb_ring_desc_16_bytes *ring;
1574 struct igb_ring_desc_16_bytes txd;
1576 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1577 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1578 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1579 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1580 (unsigned)txd.lo_dword.words.lo,
1581 (unsigned)txd.lo_dword.words.hi,
1582 (unsigned)txd.hi_dword.words.lo,
1583 (unsigned)txd.hi_dword.words.hi);
1587 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1589 const struct rte_memzone *rx_mz;
1591 if (port_id_is_invalid(port_id, ENABLED_WARN))
1593 if (rx_queue_id_is_invalid(rxq_id))
1595 if (rx_desc_id_is_invalid(rxd_id))
1597 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1600 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1604 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1606 const struct rte_memzone *tx_mz;
1608 if (port_id_is_invalid(port_id, ENABLED_WARN))
1610 if (tx_queue_id_is_invalid(txq_id))
1612 if (tx_desc_id_is_invalid(txd_id))
1614 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1617 ring_tx_descriptor_display(tx_mz, txd_id);
1621 fwd_lcores_config_display(void)
1625 printf("List of forwarding lcores:");
1626 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1627 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1631 rxtx_config_display(void)
1636 printf(" %s packet forwarding%s packets/burst=%d\n",
1637 cur_fwd_eng->fwd_mode_name,
1638 retry_enabled == 0 ? "" : " with retry",
1641 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1642 printf(" packet len=%u - nb packet segments=%d\n",
1643 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1645 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
1646 nb_fwd_lcores, nb_fwd_ports);
1648 RTE_ETH_FOREACH_DEV(pid) {
1649 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
1650 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
1651 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
1652 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
1653 uint16_t nb_rx_desc_tmp;
1654 uint16_t nb_tx_desc_tmp;
1655 struct rte_eth_rxq_info rx_qinfo;
1656 struct rte_eth_txq_info tx_qinfo;
1659 /* per port config */
1660 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
1661 (unsigned int)pid, nb_rxq, nb_txq);
1663 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
1664 ports[pid].dev_conf.rxmode.offloads,
1665 ports[pid].dev_conf.txmode.offloads);
1667 /* per rx queue config only for first queue to be less verbose */
1668 for (qid = 0; qid < 1; qid++) {
1669 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
1671 nb_rx_desc_tmp = nb_rx_desc[qid];
1673 nb_rx_desc_tmp = rx_qinfo.nb_desc;
1675 printf(" RX queue: %d\n", qid);
1676 printf(" RX desc=%d - RX free threshold=%d\n",
1677 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
1678 printf(" RX threshold registers: pthresh=%d hthresh=%d "
1680 rx_conf[qid].rx_thresh.pthresh,
1681 rx_conf[qid].rx_thresh.hthresh,
1682 rx_conf[qid].rx_thresh.wthresh);
1683 printf(" RX Offloads=0x%"PRIx64"\n",
1684 rx_conf[qid].offloads);
1687 /* per tx queue config only for first queue to be less verbose */
1688 for (qid = 0; qid < 1; qid++) {
1689 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
1691 nb_tx_desc_tmp = nb_tx_desc[qid];
1693 nb_tx_desc_tmp = tx_qinfo.nb_desc;
1695 printf(" TX queue: %d\n", qid);
1696 printf(" TX desc=%d - TX free threshold=%d\n",
1697 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
1698 printf(" TX threshold registers: pthresh=%d hthresh=%d "
1700 tx_conf[qid].tx_thresh.pthresh,
1701 tx_conf[qid].tx_thresh.hthresh,
1702 tx_conf[qid].tx_thresh.wthresh);
1703 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
1704 tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
1710 port_rss_reta_info(portid_t port_id,
1711 struct rte_eth_rss_reta_entry64 *reta_conf,
1712 uint16_t nb_entries)
1714 uint16_t i, idx, shift;
1717 if (port_id_is_invalid(port_id, ENABLED_WARN))
1720 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1722 printf("Failed to get RSS RETA info, return code = %d\n", ret);
1726 for (i = 0; i < nb_entries; i++) {
1727 idx = i / RTE_RETA_GROUP_SIZE;
1728 shift = i % RTE_RETA_GROUP_SIZE;
1729 if (!(reta_conf[idx].mask & (1ULL << shift)))
1731 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1732 i, reta_conf[idx].reta[shift]);
1737 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1741 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
1743 struct rte_eth_rss_conf rss_conf = {0};
1744 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1748 struct rte_eth_dev_info dev_info;
1749 uint8_t hash_key_size;
1751 if (port_id_is_invalid(port_id, ENABLED_WARN))
1754 rte_eth_dev_info_get(port_id, &dev_info);
1755 if (dev_info.hash_key_size > 0 &&
1756 dev_info.hash_key_size <= sizeof(rss_key))
1757 hash_key_size = dev_info.hash_key_size;
1759 printf("dev_info did not provide a valid hash key size\n");
1763 /* Get RSS hash key if asked to display it */
1764 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1765 rss_conf.rss_key_len = hash_key_size;
1766 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1770 printf("port index %d invalid\n", port_id);
1773 printf("operation not supported by device\n");
1776 printf("operation failed - diag=%d\n", diag);
1781 rss_hf = rss_conf.rss_hf;
1783 printf("RSS disabled\n");
1786 printf("RSS functions:\n ");
1787 for (i = 0; rss_type_table[i].str; i++) {
1788 if (rss_hf & rss_type_table[i].rss_type)
1789 printf("%s ", rss_type_table[i].str);
1794 printf("RSS key:\n");
1795 for (i = 0; i < hash_key_size; i++)
1796 printf("%02X", rss_key[i]);
1801 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1804 struct rte_eth_rss_conf rss_conf;
1808 rss_conf.rss_key = NULL;
1809 rss_conf.rss_key_len = hash_key_len;
1810 rss_conf.rss_hf = 0;
1811 for (i = 0; rss_type_table[i].str; i++) {
1812 if (!strcmp(rss_type_table[i].str, rss_type))
1813 rss_conf.rss_hf = rss_type_table[i].rss_type;
1815 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1817 rss_conf.rss_key = hash_key;
1818 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1825 printf("port index %d invalid\n", port_id);
1828 printf("operation not supported by device\n");
1831 printf("operation failed - diag=%d\n", diag);
1837 * Setup forwarding configuration for each logical core.
1840 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1842 streamid_t nb_fs_per_lcore;
1850 nb_fs = cfg->nb_fwd_streams;
1851 nb_fc = cfg->nb_fwd_lcores;
1852 if (nb_fs <= nb_fc) {
1853 nb_fs_per_lcore = 1;
1856 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
1857 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
1860 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
1862 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
1863 fwd_lcores[lc_id]->stream_idx = sm_id;
1864 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
1865 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1869 * Assign extra remaining streams, if any.
1871 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
1872 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
1873 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
1874 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
1875 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1880 fwd_topology_tx_port_get(portid_t rxp)
1882 static int warning_once = 1;
1884 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
1886 switch (port_topology) {
1888 case PORT_TOPOLOGY_PAIRED:
1889 if ((rxp & 0x1) == 0) {
1890 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
1893 printf("\nWarning! port-topology=paired"
1894 " and odd forward ports number,"
1895 " the last port will pair with"
1902 case PORT_TOPOLOGY_CHAINED:
1903 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
1904 case PORT_TOPOLOGY_LOOP:
1910 simple_fwd_config_setup(void)
1914 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
1915 cur_fwd_config.nb_fwd_streams =
1916 (streamid_t) cur_fwd_config.nb_fwd_ports;
1918 /* reinitialize forwarding streams */
1922 * In the simple forwarding test, the number of forwarding cores
1923 * must be lower or equal to the number of forwarding ports.
1925 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1926 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
1927 cur_fwd_config.nb_fwd_lcores =
1928 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
1929 setup_fwd_config_of_each_lcore(&cur_fwd_config);
1931 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1932 fwd_streams[i]->rx_port = fwd_ports_ids[i];
1933 fwd_streams[i]->rx_queue = 0;
1934 fwd_streams[i]->tx_port =
1935 fwd_ports_ids[fwd_topology_tx_port_get(i)];
1936 fwd_streams[i]->tx_queue = 0;
1937 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
1938 fwd_streams[i]->retry_enabled = retry_enabled;
1943 * For the RSS forwarding test all streams distributed over lcores. Each stream
1944 * being composed of a RX queue to poll on a RX port for input messages,
1945 * associated with a TX queue of a TX port where to send forwarded packets.
1948 rss_fwd_config_setup(void)
1959 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1960 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1961 cur_fwd_config.nb_fwd_streams =
1962 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1964 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1965 cur_fwd_config.nb_fwd_lcores =
1966 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
1968 /* reinitialize forwarding streams */
1971 setup_fwd_config_of_each_lcore(&cur_fwd_config);
1973 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1974 struct fwd_stream *fs;
1976 fs = fwd_streams[sm_id];
1977 txp = fwd_topology_tx_port_get(rxp);
1978 fs->rx_port = fwd_ports_ids[rxp];
1980 fs->tx_port = fwd_ports_ids[txp];
1982 fs->peer_addr = fs->tx_port;
1983 fs->retry_enabled = retry_enabled;
1985 if (rxp < nb_fwd_ports)
1993 * For the DCB forwarding test, each core is assigned on each traffic class.
1995 * Each core is assigned a multi-stream, each stream being composed of
1996 * a RX queue to poll on a RX port for input messages, associated with
1997 * a TX queue of a TX port where to send forwarded packets. All RX and
1998 * TX queues are mapping to the same traffic class.
1999 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2003 dcb_fwd_config_setup(void)
2005 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2006 portid_t txp, rxp = 0;
2007 queueid_t txq, rxq = 0;
2009 uint16_t nb_rx_queue, nb_tx_queue;
2010 uint16_t i, j, k, sm_id = 0;
2013 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2014 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2015 cur_fwd_config.nb_fwd_streams =
2016 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2018 /* reinitialize forwarding streams */
2022 /* get the dcb info on the first RX and TX ports */
2023 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2024 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2026 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2027 fwd_lcores[lc_id]->stream_nb = 0;
2028 fwd_lcores[lc_id]->stream_idx = sm_id;
2029 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2030 /* if the nb_queue is zero, means this tc is
2031 * not enabled on the POOL
2033 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2035 k = fwd_lcores[lc_id]->stream_nb +
2036 fwd_lcores[lc_id]->stream_idx;
2037 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2038 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2039 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2040 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2041 for (j = 0; j < nb_rx_queue; j++) {
2042 struct fwd_stream *fs;
2044 fs = fwd_streams[k + j];
2045 fs->rx_port = fwd_ports_ids[rxp];
2046 fs->rx_queue = rxq + j;
2047 fs->tx_port = fwd_ports_ids[txp];
2048 fs->tx_queue = txq + j % nb_tx_queue;
2049 fs->peer_addr = fs->tx_port;
2050 fs->retry_enabled = retry_enabled;
2052 fwd_lcores[lc_id]->stream_nb +=
2053 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2055 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2058 if (tc < rxp_dcb_info.nb_tcs)
2060 /* Restart from TC 0 on next RX port */
2062 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2064 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2067 if (rxp >= nb_fwd_ports)
2069 /* get the dcb information on next RX and TX ports */
2070 if ((rxp & 0x1) == 0)
2071 txp = (portid_t) (rxp + 1);
2073 txp = (portid_t) (rxp - 1);
2074 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2075 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2080 icmp_echo_config_setup(void)
2087 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2088 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2089 (nb_txq * nb_fwd_ports);
2091 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2092 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2093 cur_fwd_config.nb_fwd_streams =
2094 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2095 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2096 cur_fwd_config.nb_fwd_lcores =
2097 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2098 if (verbose_level > 0) {
2099 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2101 cur_fwd_config.nb_fwd_lcores,
2102 cur_fwd_config.nb_fwd_ports,
2103 cur_fwd_config.nb_fwd_streams);
2106 /* reinitialize forwarding streams */
2108 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2110 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2111 if (verbose_level > 0)
2112 printf(" core=%d: \n", lc_id);
2113 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2114 struct fwd_stream *fs;
2115 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2116 fs->rx_port = fwd_ports_ids[rxp];
2118 fs->tx_port = fs->rx_port;
2120 fs->peer_addr = fs->tx_port;
2121 fs->retry_enabled = retry_enabled;
2122 if (verbose_level > 0)
2123 printf(" stream=%d port=%d rxq=%d txq=%d\n",
2124 sm_id, fs->rx_port, fs->rx_queue,
2126 rxq = (queueid_t) (rxq + 1);
2127 if (rxq == nb_rxq) {
2129 rxp = (portid_t) (rxp + 1);
2135 #if defined RTE_LIBRTE_PMD_SOFTNIC
2137 softnic_fwd_config_setup(void)
2139 struct rte_port *port;
2140 portid_t pid, softnic_portid;
2142 uint8_t softnic_enable = 0;
2144 RTE_ETH_FOREACH_DEV(pid) {
2146 const char *driver = port->dev_info.driver_name;
2148 if (strcmp(driver, "net_softnic") == 0) {
2149 softnic_portid = pid;
2155 if (softnic_enable == 0) {
2156 printf("Softnic mode not configured(%s)!\n", __func__);
2160 cur_fwd_config.nb_fwd_ports = 1;
2161 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
2163 /* Re-initialize forwarding streams */
2167 * In the softnic forwarding test, the number of forwarding cores
2168 * is set to one and remaining are used for softnic packet processing.
2170 cur_fwd_config.nb_fwd_lcores = 1;
2171 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2173 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
2174 fwd_streams[i]->rx_port = softnic_portid;
2175 fwd_streams[i]->rx_queue = i;
2176 fwd_streams[i]->tx_port = softnic_portid;
2177 fwd_streams[i]->tx_queue = i;
2178 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2179 fwd_streams[i]->retry_enabled = retry_enabled;
2185 fwd_config_setup(void)
2187 cur_fwd_config.fwd_eng = cur_fwd_eng;
2188 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2189 icmp_echo_config_setup();
2193 #if defined RTE_LIBRTE_PMD_SOFTNIC
2194 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
2195 softnic_fwd_config_setup();
2200 if ((nb_rxq > 1) && (nb_txq > 1)){
2202 dcb_fwd_config_setup();
2204 rss_fwd_config_setup();
2207 simple_fwd_config_setup();
2211 mp_alloc_to_str(uint8_t mode)
2214 case MP_ALLOC_NATIVE:
2220 case MP_ALLOC_XMEM_HUGE:
2228 pkt_fwd_config_display(struct fwd_config *cfg)
2230 struct fwd_stream *fs;
2234 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2235 "NUMA support %s, MP allocation mode: %s\n",
2236 cfg->fwd_eng->fwd_mode_name,
2237 retry_enabled == 0 ? "" : " with retry",
2238 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2239 numa_support == 1 ? "enabled" : "disabled",
2240 mp_alloc_to_str(mp_alloc_type));
2243 printf("TX retry num: %u, delay between TX retries: %uus\n",
2244 burst_tx_retry_num, burst_tx_delay_time);
2245 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2246 printf("Logical Core %u (socket %u) forwards packets on "
2248 fwd_lcores_cpuids[lc_id],
2249 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2250 fwd_lcores[lc_id]->stream_nb);
2251 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2252 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2253 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
2254 "P=%d/Q=%d (socket %u) ",
2255 fs->rx_port, fs->rx_queue,
2256 ports[fs->rx_port].socket_id,
2257 fs->tx_port, fs->tx_queue,
2258 ports[fs->tx_port].socket_id);
2259 print_ethaddr("peer=",
2260 &peer_eth_addrs[fs->peer_addr]);
2268 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2270 uint8_t c, new_peer_addr[6];
2271 if (!rte_eth_dev_is_valid_port(port_id)) {
2272 printf("Error: Invalid port number %i\n", port_id);
2275 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr,
2276 sizeof(new_peer_addr)) < 0) {
2277 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2280 for (c = 0; c < 6; c++)
2281 peer_eth_addrs[port_id].addr_bytes[c] =
2286 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2289 unsigned int lcore_cpuid;
2294 for (i = 0; i < nb_lc; i++) {
2295 lcore_cpuid = lcorelist[i];
2296 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2297 printf("lcore %u not enabled\n", lcore_cpuid);
2300 if (lcore_cpuid == rte_get_master_lcore()) {
2301 printf("lcore %u cannot be masked on for running "
2302 "packet forwarding, which is the master lcore "
2303 "and reserved for command line parsing only\n",
2308 fwd_lcores_cpuids[i] = lcore_cpuid;
2310 if (record_now == 0) {
2314 nb_cfg_lcores = (lcoreid_t) nb_lc;
2315 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2316 printf("previous number of forwarding cores %u - changed to "
2317 "number of configured cores %u\n",
2318 (unsigned int) nb_fwd_lcores, nb_lc);
2319 nb_fwd_lcores = (lcoreid_t) nb_lc;
2326 set_fwd_lcores_mask(uint64_t lcoremask)
2328 unsigned int lcorelist[64];
2332 if (lcoremask == 0) {
2333 printf("Invalid NULL mask of cores\n");
2337 for (i = 0; i < 64; i++) {
2338 if (! ((uint64_t)(1ULL << i) & lcoremask))
2340 lcorelist[nb_lc++] = i;
2342 return set_fwd_lcores_list(lcorelist, nb_lc);
2346 set_fwd_lcores_number(uint16_t nb_lc)
2348 if (nb_lc > nb_cfg_lcores) {
2349 printf("nb fwd cores %u > %u (max. number of configured "
2350 "lcores) - ignored\n",
2351 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2354 nb_fwd_lcores = (lcoreid_t) nb_lc;
2355 printf("Number of forwarding cores set to %u\n",
2356 (unsigned int) nb_fwd_lcores);
2360 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2368 for (i = 0; i < nb_pt; i++) {
2369 port_id = (portid_t) portlist[i];
2370 if (port_id_is_invalid(port_id, ENABLED_WARN))
2373 fwd_ports_ids[i] = port_id;
2375 if (record_now == 0) {
2379 nb_cfg_ports = (portid_t) nb_pt;
2380 if (nb_fwd_ports != (portid_t) nb_pt) {
2381 printf("previous number of forwarding ports %u - changed to "
2382 "number of configured ports %u\n",
2383 (unsigned int) nb_fwd_ports, nb_pt);
2384 nb_fwd_ports = (portid_t) nb_pt;
2389 set_fwd_ports_mask(uint64_t portmask)
2391 unsigned int portlist[64];
2395 if (portmask == 0) {
2396 printf("Invalid NULL mask of ports\n");
2400 RTE_ETH_FOREACH_DEV(i) {
2401 if (! ((uint64_t)(1ULL << i) & portmask))
2403 portlist[nb_pt++] = i;
2405 set_fwd_ports_list(portlist, nb_pt);
2409 set_fwd_ports_number(uint16_t nb_pt)
2411 if (nb_pt > nb_cfg_ports) {
2412 printf("nb fwd ports %u > %u (number of configured "
2413 "ports) - ignored\n",
2414 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2417 nb_fwd_ports = (portid_t) nb_pt;
2418 printf("Number of forwarding ports set to %u\n",
2419 (unsigned int) nb_fwd_ports);
2423 port_is_forwarding(portid_t port_id)
2427 if (port_id_is_invalid(port_id, ENABLED_WARN))
2430 for (i = 0; i < nb_fwd_ports; i++) {
2431 if (fwd_ports_ids[i] == port_id)
2439 set_nb_pkt_per_burst(uint16_t nb)
2441 if (nb > MAX_PKT_BURST) {
2442 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2444 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2447 nb_pkt_per_burst = nb;
2448 printf("Number of packets per burst set to %u\n",
2449 (unsigned int) nb_pkt_per_burst);
2453 tx_split_get_name(enum tx_pkt_split split)
2457 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2458 if (tx_split_name[i].split == split)
2459 return tx_split_name[i].name;
2465 set_tx_pkt_split(const char *name)
2469 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2470 if (strcmp(tx_split_name[i].name, name) == 0) {
2471 tx_pkt_split = tx_split_name[i].split;
2475 printf("unknown value: \"%s\"\n", name);
2479 show_tx_pkt_segments(void)
2485 split = tx_split_get_name(tx_pkt_split);
2487 printf("Number of segments: %u\n", n);
2488 printf("Segment sizes: ");
2489 for (i = 0; i != n - 1; i++)
2490 printf("%hu,", tx_pkt_seg_lengths[i]);
2491 printf("%hu\n", tx_pkt_seg_lengths[i]);
2492 printf("Split packet: %s\n", split);
2496 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2498 uint16_t tx_pkt_len;
2501 if (nb_segs >= (unsigned) nb_txd) {
2502 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2503 nb_segs, (unsigned int) nb_txd);
2508 * Check that each segment length is greater or equal than
2509 * the mbuf data sise.
2510 * Check also that the total packet length is greater or equal than the
2511 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
2514 for (i = 0; i < nb_segs; i++) {
2515 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2516 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2517 i, seg_lengths[i], (unsigned) mbuf_data_size);
2520 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2522 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
2523 printf("total packet length=%u < %d - give up\n",
2524 (unsigned) tx_pkt_len,
2525 (int)(sizeof(struct ether_hdr) + 20 + 8));
2529 for (i = 0; i < nb_segs; i++)
2530 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2532 tx_pkt_length = tx_pkt_len;
2533 tx_pkt_nb_segs = (uint8_t) nb_segs;
2537 setup_gro(const char *onoff, portid_t port_id)
2539 if (!rte_eth_dev_is_valid_port(port_id)) {
2540 printf("invalid port id %u\n", port_id);
2543 if (test_done == 0) {
2544 printf("Before enable/disable GRO,"
2545 " please stop forwarding first\n");
2548 if (strcmp(onoff, "on") == 0) {
2549 if (gro_ports[port_id].enable != 0) {
2550 printf("Port %u has enabled GRO. Please"
2551 " disable GRO first\n", port_id);
2554 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2555 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
2556 gro_ports[port_id].param.max_flow_num =
2557 GRO_DEFAULT_FLOW_NUM;
2558 gro_ports[port_id].param.max_item_per_flow =
2559 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
2561 gro_ports[port_id].enable = 1;
2563 if (gro_ports[port_id].enable == 0) {
2564 printf("Port %u has disabled GRO\n", port_id);
2567 gro_ports[port_id].enable = 0;
2572 setup_gro_flush_cycles(uint8_t cycles)
2574 if (test_done == 0) {
2575 printf("Before change flush interval for GRO,"
2576 " please stop forwarding first.\n");
2580 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
2581 GRO_DEFAULT_FLUSH_CYCLES) {
2582 printf("The flushing cycle be in the range"
2583 " of 1 to %u. Revert to the default"
2585 GRO_MAX_FLUSH_CYCLES,
2586 GRO_DEFAULT_FLUSH_CYCLES);
2587 cycles = GRO_DEFAULT_FLUSH_CYCLES;
2590 gro_flush_cycles = cycles;
2594 show_gro(portid_t port_id)
2596 struct rte_gro_param *param;
2597 uint32_t max_pkts_num;
2599 param = &gro_ports[port_id].param;
2601 if (!rte_eth_dev_is_valid_port(port_id)) {
2602 printf("Invalid port id %u.\n", port_id);
2605 if (gro_ports[port_id].enable) {
2606 printf("GRO type: TCP/IPv4\n");
2607 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2608 max_pkts_num = param->max_flow_num *
2609 param->max_item_per_flow;
2611 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
2612 printf("Max number of packets to perform GRO: %u\n",
2614 printf("Flushing cycles: %u\n", gro_flush_cycles);
2616 printf("Port %u doesn't enable GRO.\n", port_id);
2620 setup_gso(const char *mode, portid_t port_id)
2622 if (!rte_eth_dev_is_valid_port(port_id)) {
2623 printf("invalid port id %u\n", port_id);
2626 if (strcmp(mode, "on") == 0) {
2627 if (test_done == 0) {
2628 printf("before enabling GSO,"
2629 " please stop forwarding first\n");
2632 gso_ports[port_id].enable = 1;
2633 } else if (strcmp(mode, "off") == 0) {
2634 if (test_done == 0) {
2635 printf("before disabling GSO,"
2636 " please stop forwarding first\n");
2639 gso_ports[port_id].enable = 0;
2644 list_pkt_forwarding_modes(void)
2646 static char fwd_modes[128] = "";
2647 const char *separator = "|";
2648 struct fwd_engine *fwd_eng;
2651 if (strlen (fwd_modes) == 0) {
2652 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2653 strncat(fwd_modes, fwd_eng->fwd_mode_name,
2654 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2655 strncat(fwd_modes, separator,
2656 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2658 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2665 list_pkt_forwarding_retry_modes(void)
2667 static char fwd_modes[128] = "";
2668 const char *separator = "|";
2669 struct fwd_engine *fwd_eng;
2672 if (strlen(fwd_modes) == 0) {
2673 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2674 if (fwd_eng == &rx_only_engine)
2676 strncat(fwd_modes, fwd_eng->fwd_mode_name,
2678 strlen(fwd_modes) - 1);
2679 strncat(fwd_modes, separator,
2681 strlen(fwd_modes) - 1);
2683 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2690 set_pkt_forwarding_mode(const char *fwd_mode_name)
2692 struct fwd_engine *fwd_eng;
2696 while ((fwd_eng = fwd_engines[i]) != NULL) {
2697 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
2698 printf("Set %s packet forwarding mode%s\n",
2700 retry_enabled == 0 ? "" : " with retry");
2701 cur_fwd_eng = fwd_eng;
2706 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
2710 add_rx_dump_callbacks(portid_t portid)
2712 struct rte_eth_dev_info dev_info;
2715 if (port_id_is_invalid(portid, ENABLED_WARN))
2718 rte_eth_dev_info_get(portid, &dev_info);
2719 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
2720 if (!ports[portid].rx_dump_cb[queue])
2721 ports[portid].rx_dump_cb[queue] =
2722 rte_eth_add_rx_callback(portid, queue,
2723 dump_rx_pkts, NULL);
2727 add_tx_dump_callbacks(portid_t portid)
2729 struct rte_eth_dev_info dev_info;
2732 if (port_id_is_invalid(portid, ENABLED_WARN))
2734 rte_eth_dev_info_get(portid, &dev_info);
2735 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
2736 if (!ports[portid].tx_dump_cb[queue])
2737 ports[portid].tx_dump_cb[queue] =
2738 rte_eth_add_tx_callback(portid, queue,
2739 dump_tx_pkts, NULL);
2743 remove_rx_dump_callbacks(portid_t portid)
2745 struct rte_eth_dev_info dev_info;
2748 if (port_id_is_invalid(portid, ENABLED_WARN))
2750 rte_eth_dev_info_get(portid, &dev_info);
2751 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
2752 if (ports[portid].rx_dump_cb[queue]) {
2753 rte_eth_remove_rx_callback(portid, queue,
2754 ports[portid].rx_dump_cb[queue]);
2755 ports[portid].rx_dump_cb[queue] = NULL;
2760 remove_tx_dump_callbacks(portid_t portid)
2762 struct rte_eth_dev_info dev_info;
2765 if (port_id_is_invalid(portid, ENABLED_WARN))
2767 rte_eth_dev_info_get(portid, &dev_info);
2768 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
2769 if (ports[portid].tx_dump_cb[queue]) {
2770 rte_eth_remove_tx_callback(portid, queue,
2771 ports[portid].tx_dump_cb[queue]);
2772 ports[portid].tx_dump_cb[queue] = NULL;
2777 configure_rxtx_dump_callbacks(uint16_t verbose)
2781 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2782 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
2786 RTE_ETH_FOREACH_DEV(portid)
2788 if (verbose == 1 || verbose > 2)
2789 add_rx_dump_callbacks(portid);
2791 remove_rx_dump_callbacks(portid);
2793 add_tx_dump_callbacks(portid);
2795 remove_tx_dump_callbacks(portid);
2800 set_verbose_level(uint16_t vb_level)
2802 printf("Change verbose level from %u to %u\n",
2803 (unsigned int) verbose_level, (unsigned int) vb_level);
2804 verbose_level = vb_level;
2805 configure_rxtx_dump_callbacks(verbose_level);
2809 vlan_extend_set(portid_t port_id, int on)
2813 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2815 if (port_id_is_invalid(port_id, ENABLED_WARN))
2818 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2821 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
2822 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2824 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
2825 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2828 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2830 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
2831 "diag=%d\n", port_id, on, diag);
2832 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2836 rx_vlan_strip_set(portid_t port_id, int on)
2840 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2842 if (port_id_is_invalid(port_id, ENABLED_WARN))
2845 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2848 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
2849 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2851 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
2852 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2855 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2857 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
2858 "diag=%d\n", port_id, on, diag);
2859 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2863 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
2867 if (port_id_is_invalid(port_id, ENABLED_WARN))
2870 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
2872 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
2873 "diag=%d\n", port_id, queue_id, on, diag);
2877 rx_vlan_filter_set(portid_t port_id, int on)
2881 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2883 if (port_id_is_invalid(port_id, ENABLED_WARN))
2886 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2889 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
2890 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2892 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
2893 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
2896 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2898 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
2899 "diag=%d\n", port_id, on, diag);
2900 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2904 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
2908 if (port_id_is_invalid(port_id, ENABLED_WARN))
2910 if (vlan_id_is_invalid(vlan_id))
2912 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2915 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
2917 port_id, vlan_id, on, diag);
2922 rx_vlan_all_filter_set(portid_t port_id, int on)
2926 if (port_id_is_invalid(port_id, ENABLED_WARN))
2928 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
2929 if (rx_vft_set(port_id, vlan_id, on))
2935 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
2939 if (port_id_is_invalid(port_id, ENABLED_WARN))
2942 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
2946 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
2948 port_id, vlan_type, tp_id, diag);
2952 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
2955 struct rte_eth_dev_info dev_info;
2957 if (port_id_is_invalid(port_id, ENABLED_WARN))
2959 if (vlan_id_is_invalid(vlan_id))
2962 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2963 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) {
2964 printf("Error, as QinQ has been enabled.\n");
2967 rte_eth_dev_info_get(port_id, &dev_info);
2968 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
2969 printf("Error: vlan insert is not supported by port %d\n",
2974 tx_vlan_reset(port_id);
2975 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
2976 ports[port_id].tx_vlan_id = vlan_id;
2980 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
2983 struct rte_eth_dev_info dev_info;
2985 if (port_id_is_invalid(port_id, ENABLED_WARN))
2987 if (vlan_id_is_invalid(vlan_id))
2989 if (vlan_id_is_invalid(vlan_id_outer))
2992 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2993 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) {
2994 printf("Error, as QinQ hasn't been enabled.\n");
2997 rte_eth_dev_info_get(port_id, &dev_info);
2998 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
2999 printf("Error: qinq insert not supported by port %d\n",
3004 tx_vlan_reset(port_id);
3005 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT;
3006 ports[port_id].tx_vlan_id = vlan_id;
3007 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
3011 tx_vlan_reset(portid_t port_id)
3013 if (port_id_is_invalid(port_id, ENABLED_WARN))
3015 ports[port_id].dev_conf.txmode.offloads &=
3016 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
3017 DEV_TX_OFFLOAD_QINQ_INSERT);
3018 ports[port_id].tx_vlan_id = 0;
3019 ports[port_id].tx_vlan_id_outer = 0;
3023 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
3025 if (port_id_is_invalid(port_id, ENABLED_WARN))
3028 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
3032 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
3035 uint8_t existing_mapping_found = 0;
3037 if (port_id_is_invalid(port_id, ENABLED_WARN))
3040 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
3043 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
3044 printf("map_value not in required range 0..%d\n",
3045 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
3049 if (!is_rx) { /*then tx*/
3050 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3051 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3052 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
3053 tx_queue_stats_mappings[i].stats_counter_id = map_value;
3054 existing_mapping_found = 1;
3058 if (!existing_mapping_found) { /* A new additional mapping... */
3059 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
3060 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
3061 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
3062 nb_tx_queue_stats_mappings++;
3066 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3067 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3068 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
3069 rx_queue_stats_mappings[i].stats_counter_id = map_value;
3070 existing_mapping_found = 1;
3074 if (!existing_mapping_found) { /* A new additional mapping... */
3075 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
3076 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
3077 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
3078 nb_rx_queue_stats_mappings++;
3084 set_xstats_hide_zero(uint8_t on_off)
3086 xstats_hide_zero = on_off;
3090 print_fdir_mask(struct rte_eth_fdir_masks *mask)
3092 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
3094 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3095 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
3096 " tunnel_id: 0x%08x",
3097 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
3098 rte_be_to_cpu_32(mask->tunnel_id_mask));
3099 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3100 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
3101 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
3102 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
3104 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
3105 rte_be_to_cpu_16(mask->src_port_mask),
3106 rte_be_to_cpu_16(mask->dst_port_mask));
3108 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3109 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
3110 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
3111 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
3112 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
3114 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3115 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
3116 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
3117 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
3118 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
3125 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3127 struct rte_eth_flex_payload_cfg *cfg;
3130 for (i = 0; i < flex_conf->nb_payloads; i++) {
3131 cfg = &flex_conf->flex_set[i];
3132 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3134 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3135 printf("\n L2_PAYLOAD: ");
3136 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3137 printf("\n L3_PAYLOAD: ");
3138 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3139 printf("\n L4_PAYLOAD: ");
3141 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
3142 for (j = 0; j < num; j++)
3143 printf(" %-5u", cfg->src_offset[j]);
3149 flowtype_to_str(uint16_t flow_type)
3151 struct flow_type_info {
3157 static struct flow_type_info flowtype_str_table[] = {
3158 {"raw", RTE_ETH_FLOW_RAW},
3159 {"ipv4", RTE_ETH_FLOW_IPV4},
3160 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3161 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3162 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3163 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3164 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3165 {"ipv6", RTE_ETH_FLOW_IPV6},
3166 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3167 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3168 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3169 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3170 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3171 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3172 {"port", RTE_ETH_FLOW_PORT},
3173 {"vxlan", RTE_ETH_FLOW_VXLAN},
3174 {"geneve", RTE_ETH_FLOW_GENEVE},
3175 {"nvgre", RTE_ETH_FLOW_NVGRE},
3176 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
3179 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3180 if (flowtype_str_table[i].ftype == flow_type)
3181 return flowtype_str_table[i].str;
3188 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3190 struct rte_eth_fdir_flex_mask *mask;
3194 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3195 mask = &flex_conf->flex_mask[i];
3196 p = flowtype_to_str(mask->flow_type);
3197 printf("\n %s:\t", p ? p : "unknown");
3198 for (j = 0; j < num; j++)
3199 printf(" %02x", mask->mask[j]);
3205 print_fdir_flow_type(uint32_t flow_types_mask)
3210 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3211 if (!(flow_types_mask & (1 << i)))
3213 p = flowtype_to_str(i);
3223 fdir_get_infos(portid_t port_id)
3225 struct rte_eth_fdir_stats fdir_stat;
3226 struct rte_eth_fdir_info fdir_info;
3229 static const char *fdir_stats_border = "########################";
3231 if (port_id_is_invalid(port_id, ENABLED_WARN))
3233 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3235 printf("\n FDIR is not supported on port %-2d\n",
3240 memset(&fdir_info, 0, sizeof(fdir_info));
3241 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3242 RTE_ETH_FILTER_INFO, &fdir_info);
3243 memset(&fdir_stat, 0, sizeof(fdir_stat));
3244 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3245 RTE_ETH_FILTER_STATS, &fdir_stat);
3246 printf("\n %s FDIR infos for port %-2d %s\n",
3247 fdir_stats_border, port_id, fdir_stats_border);
3249 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3250 printf(" PERFECT\n");
3251 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3252 printf(" PERFECT-MAC-VLAN\n");
3253 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3254 printf(" PERFECT-TUNNEL\n");
3255 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3256 printf(" SIGNATURE\n");
3258 printf(" DISABLE\n");
3259 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3260 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3261 printf(" SUPPORTED FLOW TYPE: ");
3262 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3264 printf(" FLEX PAYLOAD INFO:\n");
3265 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
3266 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
3267 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
3268 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3269 fdir_info.flex_payload_unit,
3270 fdir_info.max_flex_payload_segment_num,
3271 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3273 print_fdir_mask(&fdir_info.mask);
3274 if (fdir_info.flex_conf.nb_payloads > 0) {
3275 printf(" FLEX PAYLOAD SRC OFFSET:");
3276 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3278 if (fdir_info.flex_conf.nb_flexmasks > 0) {
3279 printf(" FLEX MASK CFG:");
3280 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3282 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
3283 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3284 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
3285 fdir_info.guarant_spc, fdir_info.best_spc);
3286 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
3287 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
3288 " add: %-10"PRIu64" remove: %"PRIu64"\n"
3289 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
3290 fdir_stat.collision, fdir_stat.free,
3291 fdir_stat.maxhash, fdir_stat.maxlen,
3292 fdir_stat.add, fdir_stat.remove,
3293 fdir_stat.f_add, fdir_stat.f_remove);
3294 printf(" %s############################%s\n",
3295 fdir_stats_border, fdir_stats_border);
3299 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3301 struct rte_port *port;
3302 struct rte_eth_fdir_flex_conf *flex_conf;
3305 port = &ports[port_id];
3306 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3307 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3308 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3313 if (i >= RTE_ETH_FLOW_MAX) {
3314 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3315 idx = flex_conf->nb_flexmasks;
3316 flex_conf->nb_flexmasks++;
3318 printf("The flex mask table is full. Can not set flex"
3319 " mask for flow_type(%u).", cfg->flow_type);
3323 rte_memcpy(&flex_conf->flex_mask[idx],
3325 sizeof(struct rte_eth_fdir_flex_mask));
3329 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3331 struct rte_port *port;
3332 struct rte_eth_fdir_flex_conf *flex_conf;
3335 port = &ports[port_id];
3336 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3337 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3338 if (cfg->type == flex_conf->flex_set[i].type) {
3343 if (i >= RTE_ETH_PAYLOAD_MAX) {
3344 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3345 idx = flex_conf->nb_payloads;
3346 flex_conf->nb_payloads++;
3348 printf("The flex payload table is full. Can not set"
3349 " flex payload for type(%u).", cfg->type);
3353 rte_memcpy(&flex_conf->flex_set[idx],
3355 sizeof(struct rte_eth_flex_payload_cfg));
3360 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3362 #ifdef RTE_LIBRTE_IXGBE_PMD
3366 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3368 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3372 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3373 is_rx ? "rx" : "tx", port_id, diag);
3376 printf("VF %s setting not supported for port %d\n",
3377 is_rx ? "Rx" : "Tx", port_id);
3383 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3386 struct rte_eth_link link;
3388 if (port_id_is_invalid(port_id, ENABLED_WARN))
3390 rte_eth_link_get_nowait(port_id, &link);
3391 if (rate > link.link_speed) {
3392 printf("Invalid rate value:%u bigger than link speed: %u\n",
3393 rate, link.link_speed);
3396 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3399 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3405 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3407 int diag = -ENOTSUP;
3411 RTE_SET_USED(q_msk);
3413 #ifdef RTE_LIBRTE_IXGBE_PMD
3414 if (diag == -ENOTSUP)
3415 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3418 #ifdef RTE_LIBRTE_BNXT_PMD
3419 if (diag == -ENOTSUP)
3420 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3425 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3431 * Functions to manage the set of filtered Multicast MAC addresses.
3433 * A pool of filtered multicast MAC addresses is associated with each port.
3434 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3435 * The address of the pool and the number of valid multicast MAC addresses
3436 * recorded in the pool are stored in the fields "mc_addr_pool" and
3437 * "mc_addr_nb" of the "rte_port" data structure.
3439 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3440 * to be supplied a contiguous array of multicast MAC addresses.
3441 * To comply with this constraint, the set of multicast addresses recorded
3442 * into the pool are systematically compacted at the beginning of the pool.
3443 * Hence, when a multicast address is removed from the pool, all following
3444 * addresses, if any, are copied back to keep the set contiguous.
3446 #define MCAST_POOL_INC 32
3449 mcast_addr_pool_extend(struct rte_port *port)
3451 struct ether_addr *mc_pool;
3452 size_t mc_pool_size;
3455 * If a free entry is available at the end of the pool, just
3456 * increment the number of recorded multicast addresses.
3458 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3464 * [re]allocate a pool with MCAST_POOL_INC more entries.
3465 * The previous test guarantees that port->mc_addr_nb is a multiple
3466 * of MCAST_POOL_INC.
3468 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb +
3470 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool,
3472 if (mc_pool == NULL) {
3473 printf("allocation of pool of %u multicast addresses failed\n",
3474 port->mc_addr_nb + MCAST_POOL_INC);
3478 port->mc_addr_pool = mc_pool;
3485 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3488 if (addr_idx == port->mc_addr_nb) {
3489 /* No need to recompact the set of multicast addressses. */
3490 if (port->mc_addr_nb == 0) {
3491 /* free the pool of multicast addresses. */
3492 free(port->mc_addr_pool);
3493 port->mc_addr_pool = NULL;
3497 memmove(&port->mc_addr_pool[addr_idx],
3498 &port->mc_addr_pool[addr_idx + 1],
3499 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx));
3503 eth_port_multicast_addr_list_set(portid_t port_id)
3505 struct rte_port *port;
3508 port = &ports[port_id];
3509 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3513 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3514 port->mc_addr_nb, port_id, -diag);
3518 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr)
3520 struct rte_port *port;
3523 if (port_id_is_invalid(port_id, ENABLED_WARN))
3526 port = &ports[port_id];
3529 * Check that the added multicast MAC address is not already recorded
3530 * in the pool of multicast addresses.
3532 for (i = 0; i < port->mc_addr_nb; i++) {
3533 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3534 printf("multicast address already filtered by port\n");
3539 if (mcast_addr_pool_extend(port) != 0)
3541 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
3542 eth_port_multicast_addr_list_set(port_id);
3546 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr)
3548 struct rte_port *port;
3551 if (port_id_is_invalid(port_id, ENABLED_WARN))
3554 port = &ports[port_id];
3557 * Search the pool of multicast MAC addresses for the removed address.
3559 for (i = 0; i < port->mc_addr_nb; i++) {
3560 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3563 if (i == port->mc_addr_nb) {
3564 printf("multicast address not filtered by port %d\n", port_id);
3568 mcast_addr_pool_remove(port, i);
3569 eth_port_multicast_addr_list_set(port_id);
3573 port_dcb_info_display(portid_t port_id)
3575 struct rte_eth_dcb_info dcb_info;
3578 static const char *border = "================";
3580 if (port_id_is_invalid(port_id, ENABLED_WARN))
3583 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3585 printf("\n Failed to get dcb infos on port %-2d\n",
3589 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
3590 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
3592 for (i = 0; i < dcb_info.nb_tcs; i++)
3594 printf("\n Priority : ");
3595 for (i = 0; i < dcb_info.nb_tcs; i++)
3596 printf("\t%4d", dcb_info.prio_tc[i]);
3597 printf("\n BW percent :");
3598 for (i = 0; i < dcb_info.nb_tcs; i++)
3599 printf("\t%4d%%", dcb_info.tc_bws[i]);
3600 printf("\n RXQ base : ");
3601 for (i = 0; i < dcb_info.nb_tcs; i++)
3602 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
3603 printf("\n RXQ number :");
3604 for (i = 0; i < dcb_info.nb_tcs; i++)
3605 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
3606 printf("\n TXQ base : ");
3607 for (i = 0; i < dcb_info.nb_tcs; i++)
3608 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
3609 printf("\n TXQ number :");
3610 for (i = 0; i < dcb_info.nb_tcs; i++)
3611 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
3616 open_file(const char *file_path, uint32_t *size)
3618 int fd = open(file_path, O_RDONLY);
3620 uint8_t *buf = NULL;
3628 printf("%s: Failed to open %s\n", __func__, file_path);
3632 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
3634 printf("%s: File operations failed\n", __func__);
3638 pkg_size = st_buf.st_size;
3641 printf("%s: File operations failed\n", __func__);
3645 buf = (uint8_t *)malloc(pkg_size);
3648 printf("%s: Failed to malloc memory\n", __func__);
3652 ret = read(fd, buf, pkg_size);
3655 printf("%s: File read operation failed\n", __func__);
3669 save_file(const char *file_path, uint8_t *buf, uint32_t size)
3671 FILE *fh = fopen(file_path, "wb");
3674 printf("%s: Failed to open %s\n", __func__, file_path);
3678 if (fwrite(buf, 1, size, fh) != size) {
3680 printf("%s: File write operation failed\n", __func__);
3690 close_file(uint8_t *buf)
3701 port_queue_region_info_display(portid_t port_id, void *buf)
3703 #ifdef RTE_LIBRTE_I40E_PMD
3705 struct rte_pmd_i40e_queue_regions *info =
3706 (struct rte_pmd_i40e_queue_regions *)buf;
3707 static const char *queue_region_info_stats_border = "-------";
3709 if (!info->queue_region_number)
3710 printf("there is no region has been set before");
3712 printf("\n %s All queue region info for port=%2d %s",
3713 queue_region_info_stats_border, port_id,
3714 queue_region_info_stats_border);
3715 printf("\n queue_region_number: %-14u \n",
3716 info->queue_region_number);
3718 for (i = 0; i < info->queue_region_number; i++) {
3719 printf("\n region_id: %-14u queue_number: %-14u "
3720 "queue_start_index: %-14u \n",
3721 info->region[i].region_id,
3722 info->region[i].queue_num,
3723 info->region[i].queue_start_index);
3725 printf(" user_priority_num is %-14u :",
3726 info->region[i].user_priority_num);
3727 for (j = 0; j < info->region[i].user_priority_num; j++)
3728 printf(" %-14u ", info->region[i].user_priority[j]);
3730 printf("\n flowtype_num is %-14u :",
3731 info->region[i].flowtype_num);
3732 for (j = 0; j < info->region[i].flowtype_num; j++)
3733 printf(" %-14u ", info->region[i].hw_flowtype[j]);
3736 RTE_SET_USED(port_id);