1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
13 #include <sys/queue.h>
14 #include <sys/types.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
34 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
52 #include <cmdline_parse_etheraddr.h>
56 static char *flowtype_to_str(uint16_t flow_type);
59 enum tx_pkt_split split;
63 .split = TX_PKT_SPLIT_OFF,
67 .split = TX_PKT_SPLIT_ON,
71 .split = TX_PKT_SPLIT_RND,
76 const struct rss_type_info rss_type_table[] = {
77 { "ipv4", ETH_RSS_IPV4 },
78 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
79 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
80 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
81 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
82 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
83 { "ipv6", ETH_RSS_IPV6 },
84 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
85 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
86 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
87 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
88 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
89 { "l2-payload", ETH_RSS_L2_PAYLOAD },
90 { "ipv6-ex", ETH_RSS_IPV6_EX },
91 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
92 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
93 { "port", ETH_RSS_PORT },
94 { "vxlan", ETH_RSS_VXLAN },
95 { "geneve", ETH_RSS_GENEVE },
96 { "nvgre", ETH_RSS_NVGRE },
98 { "udp", ETH_RSS_UDP },
99 { "tcp", ETH_RSS_TCP },
100 { "sctp", ETH_RSS_SCTP },
101 { "tunnel", ETH_RSS_TUNNEL },
106 print_ethaddr(const char *name, struct ether_addr *eth_addr)
108 char buf[ETHER_ADDR_FMT_SIZE];
109 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
110 printf("%s%s", name, buf);
114 nic_stats_display(portid_t port_id)
116 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
117 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
118 static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
119 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles;
120 uint64_t mpps_rx, mpps_tx;
121 struct rte_eth_stats stats;
122 struct rte_port *port = &ports[port_id];
125 static const char *nic_stats_border = "########################";
127 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
131 rte_eth_stats_get(port_id, &stats);
132 printf("\n %s NIC statistics for port %-2d %s\n",
133 nic_stats_border, port_id, nic_stats_border);
135 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
136 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
138 stats.ipackets, stats.imissed, stats.ibytes);
139 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
140 printf(" RX-nombuf: %-10"PRIu64"\n",
142 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
144 stats.opackets, stats.oerrors, stats.obytes);
147 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
148 " RX-bytes: %10"PRIu64"\n",
149 stats.ipackets, stats.ierrors, stats.ibytes);
150 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
151 printf(" RX-nombuf: %10"PRIu64"\n",
153 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
154 " TX-bytes: %10"PRIu64"\n",
155 stats.opackets, stats.oerrors, stats.obytes);
158 if (port->rx_queue_stats_mapping_enabled) {
160 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
161 printf(" Stats reg %2d RX-packets: %10"PRIu64
162 " RX-errors: %10"PRIu64
163 " RX-bytes: %10"PRIu64"\n",
164 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
167 if (port->tx_queue_stats_mapping_enabled) {
169 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
170 printf(" Stats reg %2d TX-packets: %10"PRIu64
171 " TX-bytes: %10"PRIu64"\n",
172 i, stats.q_opackets[i], stats.q_obytes[i]);
176 diff_cycles = prev_cycles[port_id];
177 prev_cycles[port_id] = rte_rdtsc();
179 diff_cycles = prev_cycles[port_id] - diff_cycles;
181 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
182 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
183 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
184 (stats.opackets - prev_pkts_tx[port_id]) : 0;
185 prev_pkts_rx[port_id] = stats.ipackets;
186 prev_pkts_tx[port_id] = stats.opackets;
187 mpps_rx = diff_cycles > 0 ?
188 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
189 mpps_tx = diff_cycles > 0 ?
190 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
191 printf("\n Throughput (since last show)\n");
192 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n",
195 printf(" %s############################%s\n",
196 nic_stats_border, nic_stats_border);
200 nic_stats_clear(portid_t port_id)
202 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
206 rte_eth_stats_reset(port_id);
207 printf("\n NIC statistics for port %d cleared\n", port_id);
211 nic_xstats_display(portid_t port_id)
213 struct rte_eth_xstat *xstats;
214 int cnt_xstats, idx_xstat;
215 struct rte_eth_xstat_name *xstats_names;
217 printf("###### NIC extended statistics for port %-2d\n", port_id);
218 if (!rte_eth_dev_is_valid_port(port_id)) {
219 printf("Error: Invalid port number %i\n", port_id);
224 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
225 if (cnt_xstats < 0) {
226 printf("Error: Cannot get count of xstats\n");
230 /* Get id-name lookup table */
231 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
232 if (xstats_names == NULL) {
233 printf("Cannot allocate memory for xstats lookup\n");
236 if (cnt_xstats != rte_eth_xstats_get_names(
237 port_id, xstats_names, cnt_xstats)) {
238 printf("Error: Cannot get xstats lookup\n");
243 /* Get stats themselves */
244 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
245 if (xstats == NULL) {
246 printf("Cannot allocate memory for xstats\n");
250 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
251 printf("Error: Unable to get xstats\n");
258 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
259 if (xstats_hide_zero && !xstats[idx_xstat].value)
261 printf("%s: %"PRIu64"\n",
262 xstats_names[idx_xstat].name,
263 xstats[idx_xstat].value);
270 nic_xstats_clear(portid_t port_id)
272 rte_eth_xstats_reset(port_id);
276 nic_stats_mapping_display(portid_t port_id)
278 struct rte_port *port = &ports[port_id];
281 static const char *nic_stats_mapping_border = "########################";
283 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
288 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
289 printf("Port id %d - either does not support queue statistic mapping or"
290 " no queue statistic mapping set\n", port_id);
294 printf("\n %s NIC statistics mapping for port %-2d %s\n",
295 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
297 if (port->rx_queue_stats_mapping_enabled) {
298 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
299 if (rx_queue_stats_mappings[i].port_id == port_id) {
300 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
301 rx_queue_stats_mappings[i].queue_id,
302 rx_queue_stats_mappings[i].stats_counter_id);
309 if (port->tx_queue_stats_mapping_enabled) {
310 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
311 if (tx_queue_stats_mappings[i].port_id == port_id) {
312 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
313 tx_queue_stats_mappings[i].queue_id,
314 tx_queue_stats_mappings[i].stats_counter_id);
319 printf(" %s####################################%s\n",
320 nic_stats_mapping_border, nic_stats_mapping_border);
324 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
326 struct rte_eth_rxq_info qinfo;
328 static const char *info_border = "*********************";
330 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
332 printf("Failed to retrieve information for port: %u, "
333 "RX queue: %hu\nerror desc: %s(%d)\n",
334 port_id, queue_id, strerror(-rc), rc);
338 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
339 info_border, port_id, queue_id, info_border);
341 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
342 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
343 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
344 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
345 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
346 printf("\nRX drop packets: %s",
347 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
348 printf("\nRX deferred start: %s",
349 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
350 printf("\nRX scattered packets: %s",
351 (qinfo.scattered_rx != 0) ? "on" : "off");
352 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
357 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
359 struct rte_eth_txq_info qinfo;
361 static const char *info_border = "*********************";
363 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
365 printf("Failed to retrieve information for port: %u, "
366 "TX queue: %hu\nerror desc: %s(%d)\n",
367 port_id, queue_id, strerror(-rc), rc);
371 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
372 info_border, port_id, queue_id, info_border);
374 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
375 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
376 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
377 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
378 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
379 printf("\nTX deferred start: %s",
380 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
381 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
386 port_infos_display(portid_t port_id)
388 struct rte_port *port;
389 struct ether_addr mac_addr;
390 struct rte_eth_link link;
391 struct rte_eth_dev_info dev_info;
393 struct rte_mempool * mp;
394 static const char *info_border = "*********************";
396 char name[RTE_ETH_NAME_MAX_LEN];
398 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
402 port = &ports[port_id];
403 rte_eth_link_get_nowait(port_id, &link);
404 memset(&dev_info, 0, sizeof(dev_info));
405 rte_eth_dev_info_get(port_id, &dev_info);
406 printf("\n%s Infos for port %-2d %s\n",
407 info_border, port_id, info_border);
408 rte_eth_macaddr_get(port_id, &mac_addr);
409 print_ethaddr("MAC address: ", &mac_addr);
410 rte_eth_dev_get_name_by_port(port_id, name);
411 printf("\nDevice name: %s", name);
412 printf("\nDriver name: %s", dev_info.driver_name);
413 printf("\nConnect to socket: %u", port->socket_id);
415 if (port_numa[port_id] != NUMA_NO_CONFIG) {
416 mp = mbuf_pool_find(port_numa[port_id]);
418 printf("\nmemory allocation on the socket: %d",
421 printf("\nmemory allocation on the socket: %u",port->socket_id);
423 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
424 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
425 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
426 ("full-duplex") : ("half-duplex"));
428 if (!rte_eth_dev_get_mtu(port_id, &mtu))
429 printf("MTU: %u\n", mtu);
431 printf("Promiscuous mode: %s\n",
432 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
433 printf("Allmulticast mode: %s\n",
434 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
435 printf("Maximum number of MAC addresses: %u\n",
436 (unsigned int)(port->dev_info.max_mac_addrs));
437 printf("Maximum number of MAC addresses of hash filtering: %u\n",
438 (unsigned int)(port->dev_info.max_hash_mac_addrs));
440 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
441 if (vlan_offload >= 0){
442 printf("VLAN offload: \n");
443 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
444 printf(" strip on \n");
446 printf(" strip off \n");
448 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
449 printf(" filter on \n");
451 printf(" filter off \n");
453 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
454 printf(" qinq(extend) on \n");
456 printf(" qinq(extend) off \n");
459 if (dev_info.hash_key_size > 0)
460 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
461 if (dev_info.reta_size > 0)
462 printf("Redirection table size: %u\n", dev_info.reta_size);
463 if (!dev_info.flow_type_rss_offloads)
464 printf("No RSS offload flow type is supported.\n");
469 printf("Supported RSS offload flow types:\n");
470 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
471 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
472 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
474 p = flowtype_to_str(i);
478 printf(" user defined %d\n", i);
482 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
483 printf("Maximum configurable length of RX packet: %u\n",
484 dev_info.max_rx_pktlen);
485 if (dev_info.max_vfs)
486 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
487 if (dev_info.max_vmdq_pools)
488 printf("Maximum number of VMDq pools: %u\n",
489 dev_info.max_vmdq_pools);
491 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
492 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
493 printf("Max possible number of RXDs per queue: %hu\n",
494 dev_info.rx_desc_lim.nb_max);
495 printf("Min possible number of RXDs per queue: %hu\n",
496 dev_info.rx_desc_lim.nb_min);
497 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
499 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
500 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
501 printf("Max possible number of TXDs per queue: %hu\n",
502 dev_info.tx_desc_lim.nb_max);
503 printf("Min possible number of TXDs per queue: %hu\n",
504 dev_info.tx_desc_lim.nb_min);
505 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
507 /* Show switch info only if valid switch domain and port id is set */
508 if (dev_info.switch_info.domain_id !=
509 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
510 if (dev_info.switch_info.name)
511 printf("Switch name: %s\n", dev_info.switch_info.name);
513 printf("Switch domain Id: %u\n",
514 dev_info.switch_info.domain_id);
515 printf("Switch Port Id: %u\n",
516 dev_info.switch_info.port_id);
521 port_offload_cap_display(portid_t port_id)
523 struct rte_eth_dev_info dev_info;
524 static const char *info_border = "************";
526 if (port_id_is_invalid(port_id, ENABLED_WARN))
529 rte_eth_dev_info_get(port_id, &dev_info);
531 printf("\n%s Port %d supported offload features: %s\n",
532 info_border, port_id, info_border);
534 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
535 printf("VLAN stripped: ");
536 if (ports[port_id].dev_conf.rxmode.offloads &
537 DEV_RX_OFFLOAD_VLAN_STRIP)
543 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
544 printf("Double VLANs stripped: ");
545 if (ports[port_id].dev_conf.rxmode.offloads &
546 DEV_RX_OFFLOAD_VLAN_EXTEND)
552 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
553 printf("RX IPv4 checksum: ");
554 if (ports[port_id].dev_conf.rxmode.offloads &
555 DEV_RX_OFFLOAD_IPV4_CKSUM)
561 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
562 printf("RX UDP checksum: ");
563 if (ports[port_id].dev_conf.rxmode.offloads &
564 DEV_RX_OFFLOAD_UDP_CKSUM)
570 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
571 printf("RX TCP checksum: ");
572 if (ports[port_id].dev_conf.rxmode.offloads &
573 DEV_RX_OFFLOAD_TCP_CKSUM)
579 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
580 printf("RX SCTP checksum: ");
581 if (ports[port_id].dev_conf.rxmode.offloads &
582 DEV_RX_OFFLOAD_SCTP_CKSUM)
588 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
589 printf("RX Outer IPv4 checksum: ");
590 if (ports[port_id].dev_conf.rxmode.offloads &
591 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
597 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
598 printf("RX Outer UDP checksum: ");
599 if (ports[port_id].dev_conf.rxmode.offloads &
600 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
606 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
607 printf("Large receive offload: ");
608 if (ports[port_id].dev_conf.rxmode.offloads &
609 DEV_RX_OFFLOAD_TCP_LRO)
615 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
616 printf("HW timestamp: ");
617 if (ports[port_id].dev_conf.rxmode.offloads &
618 DEV_RX_OFFLOAD_TIMESTAMP)
624 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
625 printf("Rx Keep CRC: ");
626 if (ports[port_id].dev_conf.rxmode.offloads &
627 DEV_RX_OFFLOAD_KEEP_CRC)
633 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
634 printf("RX offload security: ");
635 if (ports[port_id].dev_conf.rxmode.offloads &
636 DEV_RX_OFFLOAD_SECURITY)
642 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
643 printf("VLAN insert: ");
644 if (ports[port_id].dev_conf.txmode.offloads &
645 DEV_TX_OFFLOAD_VLAN_INSERT)
651 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
652 printf("Double VLANs insert: ");
653 if (ports[port_id].dev_conf.txmode.offloads &
654 DEV_TX_OFFLOAD_QINQ_INSERT)
660 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
661 printf("TX IPv4 checksum: ");
662 if (ports[port_id].dev_conf.txmode.offloads &
663 DEV_TX_OFFLOAD_IPV4_CKSUM)
669 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
670 printf("TX UDP checksum: ");
671 if (ports[port_id].dev_conf.txmode.offloads &
672 DEV_TX_OFFLOAD_UDP_CKSUM)
678 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
679 printf("TX TCP checksum: ");
680 if (ports[port_id].dev_conf.txmode.offloads &
681 DEV_TX_OFFLOAD_TCP_CKSUM)
687 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
688 printf("TX SCTP checksum: ");
689 if (ports[port_id].dev_conf.txmode.offloads &
690 DEV_TX_OFFLOAD_SCTP_CKSUM)
696 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
697 printf("TX Outer IPv4 checksum: ");
698 if (ports[port_id].dev_conf.txmode.offloads &
699 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
705 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
706 printf("TX TCP segmentation: ");
707 if (ports[port_id].dev_conf.txmode.offloads &
708 DEV_TX_OFFLOAD_TCP_TSO)
714 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
715 printf("TX UDP segmentation: ");
716 if (ports[port_id].dev_conf.txmode.offloads &
717 DEV_TX_OFFLOAD_UDP_TSO)
723 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
724 printf("TSO for VXLAN tunnel packet: ");
725 if (ports[port_id].dev_conf.txmode.offloads &
726 DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
732 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
733 printf("TSO for GRE tunnel packet: ");
734 if (ports[port_id].dev_conf.txmode.offloads &
735 DEV_TX_OFFLOAD_GRE_TNL_TSO)
741 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
742 printf("TSO for IPIP tunnel packet: ");
743 if (ports[port_id].dev_conf.txmode.offloads &
744 DEV_TX_OFFLOAD_IPIP_TNL_TSO)
750 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
751 printf("TSO for GENEVE tunnel packet: ");
752 if (ports[port_id].dev_conf.txmode.offloads &
753 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
759 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
760 printf("IP tunnel TSO: ");
761 if (ports[port_id].dev_conf.txmode.offloads &
762 DEV_TX_OFFLOAD_IP_TNL_TSO)
768 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
769 printf("UDP tunnel TSO: ");
770 if (ports[port_id].dev_conf.txmode.offloads &
771 DEV_TX_OFFLOAD_UDP_TNL_TSO)
777 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
778 printf("TX Outer UDP checksum: ");
779 if (ports[port_id].dev_conf.txmode.offloads &
780 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
789 port_id_is_invalid(portid_t port_id, enum print_warning warning)
793 if (port_id == (portid_t)RTE_PORT_ALL)
796 RTE_ETH_FOREACH_DEV(pid)
800 if (warning == ENABLED_WARN)
801 printf("Invalid port %d\n", port_id);
806 void print_valid_ports(void)
810 printf("The valid ports array is [");
811 RTE_ETH_FOREACH_DEV(pid) {
818 vlan_id_is_invalid(uint16_t vlan_id)
822 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
827 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
829 const struct rte_pci_device *pci_dev;
830 const struct rte_bus *bus;
834 printf("Port register offset 0x%X not aligned on a 4-byte "
840 if (!ports[port_id].dev_info.device) {
841 printf("Invalid device\n");
845 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
846 if (bus && !strcmp(bus->name, "pci")) {
847 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
849 printf("Not a PCI device\n");
853 pci_len = pci_dev->mem_resource[0].len;
854 if (reg_off >= pci_len) {
855 printf("Port %d: register offset %u (0x%X) out of port PCI "
856 "resource (length=%"PRIu64")\n",
857 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
864 reg_bit_pos_is_invalid(uint8_t bit_pos)
868 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
872 #define display_port_and_reg_off(port_id, reg_off) \
873 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
876 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
878 display_port_and_reg_off(port_id, (unsigned)reg_off);
879 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
883 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
888 if (port_id_is_invalid(port_id, ENABLED_WARN))
890 if (port_reg_off_is_invalid(port_id, reg_off))
892 if (reg_bit_pos_is_invalid(bit_x))
894 reg_v = port_id_pci_reg_read(port_id, reg_off);
895 display_port_and_reg_off(port_id, (unsigned)reg_off);
896 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
900 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
901 uint8_t bit1_pos, uint8_t bit2_pos)
907 if (port_id_is_invalid(port_id, ENABLED_WARN))
909 if (port_reg_off_is_invalid(port_id, reg_off))
911 if (reg_bit_pos_is_invalid(bit1_pos))
913 if (reg_bit_pos_is_invalid(bit2_pos))
915 if (bit1_pos > bit2_pos)
916 l_bit = bit2_pos, h_bit = bit1_pos;
918 l_bit = bit1_pos, h_bit = bit2_pos;
920 reg_v = port_id_pci_reg_read(port_id, reg_off);
923 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
924 display_port_and_reg_off(port_id, (unsigned)reg_off);
925 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
926 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
930 port_reg_display(portid_t port_id, uint32_t reg_off)
934 if (port_id_is_invalid(port_id, ENABLED_WARN))
936 if (port_reg_off_is_invalid(port_id, reg_off))
938 reg_v = port_id_pci_reg_read(port_id, reg_off);
939 display_port_reg_value(port_id, reg_off, reg_v);
943 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
948 if (port_id_is_invalid(port_id, ENABLED_WARN))
950 if (port_reg_off_is_invalid(port_id, reg_off))
952 if (reg_bit_pos_is_invalid(bit_pos))
955 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
958 reg_v = port_id_pci_reg_read(port_id, reg_off);
960 reg_v &= ~(1 << bit_pos);
962 reg_v |= (1 << bit_pos);
963 port_id_pci_reg_write(port_id, reg_off, reg_v);
964 display_port_reg_value(port_id, reg_off, reg_v);
968 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
969 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
976 if (port_id_is_invalid(port_id, ENABLED_WARN))
978 if (port_reg_off_is_invalid(port_id, reg_off))
980 if (reg_bit_pos_is_invalid(bit1_pos))
982 if (reg_bit_pos_is_invalid(bit2_pos))
984 if (bit1_pos > bit2_pos)
985 l_bit = bit2_pos, h_bit = bit1_pos;
987 l_bit = bit1_pos, h_bit = bit2_pos;
989 if ((h_bit - l_bit) < 31)
990 max_v = (1 << (h_bit - l_bit + 1)) - 1;
995 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
996 (unsigned)value, (unsigned)value,
997 (unsigned)max_v, (unsigned)max_v);
1000 reg_v = port_id_pci_reg_read(port_id, reg_off);
1001 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1002 reg_v |= (value << l_bit); /* Set changed bits */
1003 port_id_pci_reg_write(port_id, reg_off, reg_v);
1004 display_port_reg_value(port_id, reg_off, reg_v);
1008 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1010 if (port_id_is_invalid(port_id, ENABLED_WARN))
1012 if (port_reg_off_is_invalid(port_id, reg_off))
1014 port_id_pci_reg_write(port_id, reg_off, reg_v);
1015 display_port_reg_value(port_id, reg_off, reg_v);
1019 port_mtu_set(portid_t port_id, uint16_t mtu)
1023 if (port_id_is_invalid(port_id, ENABLED_WARN))
1025 diag = rte_eth_dev_set_mtu(port_id, mtu);
1028 printf("Set MTU failed. diag=%d\n", diag);
1031 /* Generic flow management functions. */
1033 /** Generate a port_flow entry from attributes/pattern/actions. */
1034 static struct port_flow *
1035 port_flow_new(const struct rte_flow_attr *attr,
1036 const struct rte_flow_item *pattern,
1037 const struct rte_flow_action *actions,
1038 struct rte_flow_error *error)
1040 const struct rte_flow_conv_rule rule = {
1042 .pattern_ro = pattern,
1043 .actions_ro = actions,
1045 struct port_flow *pf;
1048 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1051 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1054 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1058 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1065 /** Print a message out of a flow error. */
1067 port_flow_complain(struct rte_flow_error *error)
1069 static const char *const errstrlist[] = {
1070 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1071 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1072 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1073 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1074 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1075 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1076 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1077 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1078 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1079 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1080 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1081 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1082 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1083 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1084 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1085 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1086 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1090 int err = rte_errno;
1092 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1093 !errstrlist[error->type])
1094 errstr = "unknown type";
1096 errstr = errstrlist[error->type];
1097 printf("Caught error type %d (%s): %s%s: %s\n",
1098 error->type, errstr,
1099 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1100 error->cause), buf) : "",
1101 error->message ? error->message : "(no stated reason)",
1106 /** Validate flow rule. */
1108 port_flow_validate(portid_t port_id,
1109 const struct rte_flow_attr *attr,
1110 const struct rte_flow_item *pattern,
1111 const struct rte_flow_action *actions)
1113 struct rte_flow_error error;
1115 /* Poisoning to make sure PMDs update it in case of error. */
1116 memset(&error, 0x11, sizeof(error));
1117 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1118 return port_flow_complain(&error);
1119 printf("Flow rule validated\n");
1123 /** Create flow rule. */
1125 port_flow_create(portid_t port_id,
1126 const struct rte_flow_attr *attr,
1127 const struct rte_flow_item *pattern,
1128 const struct rte_flow_action *actions)
1130 struct rte_flow *flow;
1131 struct rte_port *port;
1132 struct port_flow *pf;
1134 struct rte_flow_error error;
1136 /* Poisoning to make sure PMDs update it in case of error. */
1137 memset(&error, 0x22, sizeof(error));
1138 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1140 return port_flow_complain(&error);
1141 port = &ports[port_id];
1142 if (port->flow_list) {
1143 if (port->flow_list->id == UINT32_MAX) {
1144 printf("Highest rule ID is already assigned, delete"
1146 rte_flow_destroy(port_id, flow, NULL);
1149 id = port->flow_list->id + 1;
1152 pf = port_flow_new(attr, pattern, actions, &error);
1154 rte_flow_destroy(port_id, flow, NULL);
1155 return port_flow_complain(&error);
1157 pf->next = port->flow_list;
1160 port->flow_list = pf;
1161 printf("Flow rule #%u created\n", pf->id);
1165 /** Destroy a number of flow rules. */
1167 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1169 struct rte_port *port;
1170 struct port_flow **tmp;
1174 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1175 port_id == (portid_t)RTE_PORT_ALL)
1177 port = &ports[port_id];
1178 tmp = &port->flow_list;
1182 for (i = 0; i != n; ++i) {
1183 struct rte_flow_error error;
1184 struct port_flow *pf = *tmp;
1186 if (rule[i] != pf->id)
1189 * Poisoning to make sure PMDs update it in case
1192 memset(&error, 0x33, sizeof(error));
1193 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1194 ret = port_flow_complain(&error);
1197 printf("Flow rule #%u destroyed\n", pf->id);
1203 tmp = &(*tmp)->next;
1209 /** Remove all flow rules. */
1211 port_flow_flush(portid_t port_id)
1213 struct rte_flow_error error;
1214 struct rte_port *port;
1217 /* Poisoning to make sure PMDs update it in case of error. */
1218 memset(&error, 0x44, sizeof(error));
1219 if (rte_flow_flush(port_id, &error)) {
1220 ret = port_flow_complain(&error);
1221 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1222 port_id == (portid_t)RTE_PORT_ALL)
1225 port = &ports[port_id];
1226 while (port->flow_list) {
1227 struct port_flow *pf = port->flow_list->next;
1229 free(port->flow_list);
1230 port->flow_list = pf;
1235 /** Query a flow rule. */
1237 port_flow_query(portid_t port_id, uint32_t rule,
1238 const struct rte_flow_action *action)
1240 struct rte_flow_error error;
1241 struct rte_port *port;
1242 struct port_flow *pf;
1245 struct rte_flow_query_count count;
1249 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1250 port_id == (portid_t)RTE_PORT_ALL)
1252 port = &ports[port_id];
1253 for (pf = port->flow_list; pf; pf = pf->next)
1257 printf("Flow rule #%u not found\n", rule);
1260 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1261 &name, sizeof(name),
1262 (void *)(uintptr_t)action->type, &error);
1264 return port_flow_complain(&error);
1265 switch (action->type) {
1266 case RTE_FLOW_ACTION_TYPE_COUNT:
1269 printf("Cannot query action type %d (%s)\n",
1270 action->type, name);
1273 /* Poisoning to make sure PMDs update it in case of error. */
1274 memset(&error, 0x55, sizeof(error));
1275 memset(&query, 0, sizeof(query));
1276 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1277 return port_flow_complain(&error);
1278 switch (action->type) {
1279 case RTE_FLOW_ACTION_TYPE_COUNT:
1283 " hits: %" PRIu64 "\n"
1284 " bytes: %" PRIu64 "\n",
1286 query.count.hits_set,
1287 query.count.bytes_set,
1292 printf("Cannot display result for action type %d (%s)\n",
1293 action->type, name);
1299 /** List flow rules. */
1301 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1303 struct rte_port *port;
1304 struct port_flow *pf;
1305 struct port_flow *list = NULL;
1308 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1309 port_id == (portid_t)RTE_PORT_ALL)
1311 port = &ports[port_id];
1312 if (!port->flow_list)
1314 /* Sort flows by group, priority and ID. */
1315 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1316 struct port_flow **tmp;
1317 const struct rte_flow_attr *curr = pf->rule.attr;
1320 /* Filter out unwanted groups. */
1321 for (i = 0; i != n; ++i)
1322 if (curr->group == group[i])
1327 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1328 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1330 if (curr->group > comp->group ||
1331 (curr->group == comp->group &&
1332 curr->priority > comp->priority) ||
1333 (curr->group == comp->group &&
1334 curr->priority == comp->priority &&
1335 pf->id > (*tmp)->id))
1342 printf("ID\tGroup\tPrio\tAttr\tRule\n");
1343 for (pf = list; pf != NULL; pf = pf->tmp) {
1344 const struct rte_flow_item *item = pf->rule.pattern;
1345 const struct rte_flow_action *action = pf->rule.actions;
1348 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1350 pf->rule.attr->group,
1351 pf->rule.attr->priority,
1352 pf->rule.attr->ingress ? 'i' : '-',
1353 pf->rule.attr->egress ? 'e' : '-',
1354 pf->rule.attr->transfer ? 't' : '-');
1355 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1356 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1357 &name, sizeof(name),
1358 (void *)(uintptr_t)item->type,
1361 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1362 printf("%s ", name);
1366 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1367 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1368 &name, sizeof(name),
1369 (void *)(uintptr_t)action->type,
1372 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1373 printf(" %s", name);
1380 /** Restrict ingress traffic to the defined flow rules. */
1382 port_flow_isolate(portid_t port_id, int set)
1384 struct rte_flow_error error;
1386 /* Poisoning to make sure PMDs update it in case of error. */
1387 memset(&error, 0x66, sizeof(error));
1388 if (rte_flow_isolate(port_id, set, &error))
1389 return port_flow_complain(&error);
1390 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1392 set ? "now restricted" : "not restricted anymore");
1397 * RX/TX ring descriptors display functions.
1400 rx_queue_id_is_invalid(queueid_t rxq_id)
1402 if (rxq_id < nb_rxq)
1404 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1409 tx_queue_id_is_invalid(queueid_t txq_id)
1411 if (txq_id < nb_txq)
1413 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1418 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1420 if (rxdesc_id < nb_rxd)
1422 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1428 tx_desc_id_is_invalid(uint16_t txdesc_id)
1430 if (txdesc_id < nb_txd)
1432 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1437 static const struct rte_memzone *
1438 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1440 char mz_name[RTE_MEMZONE_NAMESIZE];
1441 const struct rte_memzone *mz;
1443 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
1444 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
1445 mz = rte_memzone_lookup(mz_name);
1447 printf("%s ring memory zoneof (port %d, queue %d) not"
1448 "found (zone name = %s\n",
1449 ring_name, port_id, q_id, mz_name);
1453 union igb_ring_dword {
1456 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1466 struct igb_ring_desc_32_bytes {
1467 union igb_ring_dword lo_dword;
1468 union igb_ring_dword hi_dword;
1469 union igb_ring_dword resv1;
1470 union igb_ring_dword resv2;
1473 struct igb_ring_desc_16_bytes {
1474 union igb_ring_dword lo_dword;
1475 union igb_ring_dword hi_dword;
1479 ring_rxd_display_dword(union igb_ring_dword dword)
1481 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1482 (unsigned)dword.words.hi);
1486 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1487 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1490 __rte_unused portid_t port_id,
1494 struct igb_ring_desc_16_bytes *ring =
1495 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1496 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1497 struct rte_eth_dev_info dev_info;
1499 memset(&dev_info, 0, sizeof(dev_info));
1500 rte_eth_dev_info_get(port_id, &dev_info);
1501 if (strstr(dev_info.driver_name, "i40e") != NULL) {
1502 /* 32 bytes RX descriptor, i40e only */
1503 struct igb_ring_desc_32_bytes *ring =
1504 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1505 ring[desc_id].lo_dword.dword =
1506 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1507 ring_rxd_display_dword(ring[desc_id].lo_dword);
1508 ring[desc_id].hi_dword.dword =
1509 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1510 ring_rxd_display_dword(ring[desc_id].hi_dword);
1511 ring[desc_id].resv1.dword =
1512 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1513 ring_rxd_display_dword(ring[desc_id].resv1);
1514 ring[desc_id].resv2.dword =
1515 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1516 ring_rxd_display_dword(ring[desc_id].resv2);
1521 /* 16 bytes RX descriptor */
1522 ring[desc_id].lo_dword.dword =
1523 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1524 ring_rxd_display_dword(ring[desc_id].lo_dword);
1525 ring[desc_id].hi_dword.dword =
1526 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1527 ring_rxd_display_dword(ring[desc_id].hi_dword);
1531 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1533 struct igb_ring_desc_16_bytes *ring;
1534 struct igb_ring_desc_16_bytes txd;
1536 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1537 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1538 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1539 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1540 (unsigned)txd.lo_dword.words.lo,
1541 (unsigned)txd.lo_dword.words.hi,
1542 (unsigned)txd.hi_dword.words.lo,
1543 (unsigned)txd.hi_dword.words.hi);
1547 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1549 const struct rte_memzone *rx_mz;
1551 if (port_id_is_invalid(port_id, ENABLED_WARN))
1553 if (rx_queue_id_is_invalid(rxq_id))
1555 if (rx_desc_id_is_invalid(rxd_id))
1557 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1560 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1564 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1566 const struct rte_memzone *tx_mz;
1568 if (port_id_is_invalid(port_id, ENABLED_WARN))
1570 if (tx_queue_id_is_invalid(txq_id))
1572 if (tx_desc_id_is_invalid(txd_id))
1574 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1577 ring_tx_descriptor_display(tx_mz, txd_id);
1581 fwd_lcores_config_display(void)
1585 printf("List of forwarding lcores:");
1586 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1587 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1591 rxtx_config_display(void)
1596 printf(" %s packet forwarding%s packets/burst=%d\n",
1597 cur_fwd_eng->fwd_mode_name,
1598 retry_enabled == 0 ? "" : " with retry",
1601 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1602 printf(" packet len=%u - nb packet segments=%d\n",
1603 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1605 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
1606 nb_fwd_lcores, nb_fwd_ports);
1608 RTE_ETH_FOREACH_DEV(pid) {
1609 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
1610 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
1611 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
1612 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
1613 uint16_t nb_rx_desc_tmp;
1614 uint16_t nb_tx_desc_tmp;
1615 struct rte_eth_rxq_info rx_qinfo;
1616 struct rte_eth_txq_info tx_qinfo;
1619 /* per port config */
1620 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
1621 (unsigned int)pid, nb_rxq, nb_txq);
1623 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
1624 ports[pid].dev_conf.rxmode.offloads,
1625 ports[pid].dev_conf.txmode.offloads);
1627 /* per rx queue config only for first queue to be less verbose */
1628 for (qid = 0; qid < 1; qid++) {
1629 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
1631 nb_rx_desc_tmp = nb_rx_desc[qid];
1633 nb_rx_desc_tmp = rx_qinfo.nb_desc;
1635 printf(" RX queue: %d\n", qid);
1636 printf(" RX desc=%d - RX free threshold=%d\n",
1637 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
1638 printf(" RX threshold registers: pthresh=%d hthresh=%d "
1640 rx_conf[qid].rx_thresh.pthresh,
1641 rx_conf[qid].rx_thresh.hthresh,
1642 rx_conf[qid].rx_thresh.wthresh);
1643 printf(" RX Offloads=0x%"PRIx64"\n",
1644 rx_conf[qid].offloads);
1647 /* per tx queue config only for first queue to be less verbose */
1648 for (qid = 0; qid < 1; qid++) {
1649 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
1651 nb_tx_desc_tmp = nb_tx_desc[qid];
1653 nb_tx_desc_tmp = tx_qinfo.nb_desc;
1655 printf(" TX queue: %d\n", qid);
1656 printf(" TX desc=%d - TX free threshold=%d\n",
1657 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
1658 printf(" TX threshold registers: pthresh=%d hthresh=%d "
1660 tx_conf[qid].tx_thresh.pthresh,
1661 tx_conf[qid].tx_thresh.hthresh,
1662 tx_conf[qid].tx_thresh.wthresh);
1663 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
1664 tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
1670 port_rss_reta_info(portid_t port_id,
1671 struct rte_eth_rss_reta_entry64 *reta_conf,
1672 uint16_t nb_entries)
1674 uint16_t i, idx, shift;
1677 if (port_id_is_invalid(port_id, ENABLED_WARN))
1680 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1682 printf("Failed to get RSS RETA info, return code = %d\n", ret);
1686 for (i = 0; i < nb_entries; i++) {
1687 idx = i / RTE_RETA_GROUP_SIZE;
1688 shift = i % RTE_RETA_GROUP_SIZE;
1689 if (!(reta_conf[idx].mask & (1ULL << shift)))
1691 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1692 i, reta_conf[idx].reta[shift]);
1697 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1701 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
1703 struct rte_eth_rss_conf rss_conf = {0};
1704 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1708 struct rte_eth_dev_info dev_info;
1709 uint8_t hash_key_size;
1711 if (port_id_is_invalid(port_id, ENABLED_WARN))
1714 rte_eth_dev_info_get(port_id, &dev_info);
1715 if (dev_info.hash_key_size > 0 &&
1716 dev_info.hash_key_size <= sizeof(rss_key))
1717 hash_key_size = dev_info.hash_key_size;
1719 printf("dev_info did not provide a valid hash key size\n");
1723 /* Get RSS hash key if asked to display it */
1724 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1725 rss_conf.rss_key_len = hash_key_size;
1726 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1730 printf("port index %d invalid\n", port_id);
1733 printf("operation not supported by device\n");
1736 printf("operation failed - diag=%d\n", diag);
1741 rss_hf = rss_conf.rss_hf;
1743 printf("RSS disabled\n");
1746 printf("RSS functions:\n ");
1747 for (i = 0; rss_type_table[i].str; i++) {
1748 if (rss_hf & rss_type_table[i].rss_type)
1749 printf("%s ", rss_type_table[i].str);
1754 printf("RSS key:\n");
1755 for (i = 0; i < hash_key_size; i++)
1756 printf("%02X", rss_key[i]);
1761 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1764 struct rte_eth_rss_conf rss_conf;
1768 rss_conf.rss_key = NULL;
1769 rss_conf.rss_key_len = hash_key_len;
1770 rss_conf.rss_hf = 0;
1771 for (i = 0; rss_type_table[i].str; i++) {
1772 if (!strcmp(rss_type_table[i].str, rss_type))
1773 rss_conf.rss_hf = rss_type_table[i].rss_type;
1775 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1777 rss_conf.rss_key = hash_key;
1778 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1785 printf("port index %d invalid\n", port_id);
1788 printf("operation not supported by device\n");
1791 printf("operation failed - diag=%d\n", diag);
1797 * Setup forwarding configuration for each logical core.
1800 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1802 streamid_t nb_fs_per_lcore;
1810 nb_fs = cfg->nb_fwd_streams;
1811 nb_fc = cfg->nb_fwd_lcores;
1812 if (nb_fs <= nb_fc) {
1813 nb_fs_per_lcore = 1;
1816 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
1817 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
1820 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
1822 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
1823 fwd_lcores[lc_id]->stream_idx = sm_id;
1824 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
1825 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1829 * Assign extra remaining streams, if any.
1831 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
1832 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
1833 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
1834 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
1835 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1840 fwd_topology_tx_port_get(portid_t rxp)
1842 static int warning_once = 1;
1844 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
1846 switch (port_topology) {
1848 case PORT_TOPOLOGY_PAIRED:
1849 if ((rxp & 0x1) == 0) {
1850 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
1853 printf("\nWarning! port-topology=paired"
1854 " and odd forward ports number,"
1855 " the last port will pair with"
1862 case PORT_TOPOLOGY_CHAINED:
1863 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
1864 case PORT_TOPOLOGY_LOOP:
1870 simple_fwd_config_setup(void)
1874 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
1875 cur_fwd_config.nb_fwd_streams =
1876 (streamid_t) cur_fwd_config.nb_fwd_ports;
1878 /* reinitialize forwarding streams */
1882 * In the simple forwarding test, the number of forwarding cores
1883 * must be lower or equal to the number of forwarding ports.
1885 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1886 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
1887 cur_fwd_config.nb_fwd_lcores =
1888 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
1889 setup_fwd_config_of_each_lcore(&cur_fwd_config);
1891 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1892 fwd_streams[i]->rx_port = fwd_ports_ids[i];
1893 fwd_streams[i]->rx_queue = 0;
1894 fwd_streams[i]->tx_port =
1895 fwd_ports_ids[fwd_topology_tx_port_get(i)];
1896 fwd_streams[i]->tx_queue = 0;
1897 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
1898 fwd_streams[i]->retry_enabled = retry_enabled;
1903 * For the RSS forwarding test all streams distributed over lcores. Each stream
1904 * being composed of a RX queue to poll on a RX port for input messages,
1905 * associated with a TX queue of a TX port where to send forwarded packets.
1908 rss_fwd_config_setup(void)
1919 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1920 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1921 cur_fwd_config.nb_fwd_streams =
1922 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1924 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1925 cur_fwd_config.nb_fwd_lcores =
1926 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
1928 /* reinitialize forwarding streams */
1931 setup_fwd_config_of_each_lcore(&cur_fwd_config);
1933 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1934 struct fwd_stream *fs;
1936 fs = fwd_streams[sm_id];
1937 txp = fwd_topology_tx_port_get(rxp);
1938 fs->rx_port = fwd_ports_ids[rxp];
1940 fs->tx_port = fwd_ports_ids[txp];
1942 fs->peer_addr = fs->tx_port;
1943 fs->retry_enabled = retry_enabled;
1945 if (rxp < nb_fwd_ports)
1953 * For the DCB forwarding test, each core is assigned on each traffic class.
1955 * Each core is assigned a multi-stream, each stream being composed of
1956 * a RX queue to poll on a RX port for input messages, associated with
1957 * a TX queue of a TX port where to send forwarded packets. All RX and
1958 * TX queues are mapping to the same traffic class.
1959 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
1963 dcb_fwd_config_setup(void)
1965 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
1966 portid_t txp, rxp = 0;
1967 queueid_t txq, rxq = 0;
1969 uint16_t nb_rx_queue, nb_tx_queue;
1970 uint16_t i, j, k, sm_id = 0;
1973 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1974 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1975 cur_fwd_config.nb_fwd_streams =
1976 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1978 /* reinitialize forwarding streams */
1982 /* get the dcb info on the first RX and TX ports */
1983 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
1984 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
1986 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1987 fwd_lcores[lc_id]->stream_nb = 0;
1988 fwd_lcores[lc_id]->stream_idx = sm_id;
1989 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
1990 /* if the nb_queue is zero, means this tc is
1991 * not enabled on the POOL
1993 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
1995 k = fwd_lcores[lc_id]->stream_nb +
1996 fwd_lcores[lc_id]->stream_idx;
1997 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
1998 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
1999 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2000 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2001 for (j = 0; j < nb_rx_queue; j++) {
2002 struct fwd_stream *fs;
2004 fs = fwd_streams[k + j];
2005 fs->rx_port = fwd_ports_ids[rxp];
2006 fs->rx_queue = rxq + j;
2007 fs->tx_port = fwd_ports_ids[txp];
2008 fs->tx_queue = txq + j % nb_tx_queue;
2009 fs->peer_addr = fs->tx_port;
2010 fs->retry_enabled = retry_enabled;
2012 fwd_lcores[lc_id]->stream_nb +=
2013 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2015 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2018 if (tc < rxp_dcb_info.nb_tcs)
2020 /* Restart from TC 0 on next RX port */
2022 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2024 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2027 if (rxp >= nb_fwd_ports)
2029 /* get the dcb information on next RX and TX ports */
2030 if ((rxp & 0x1) == 0)
2031 txp = (portid_t) (rxp + 1);
2033 txp = (portid_t) (rxp - 1);
2034 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2035 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2040 icmp_echo_config_setup(void)
2047 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2048 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2049 (nb_txq * nb_fwd_ports);
2051 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2052 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2053 cur_fwd_config.nb_fwd_streams =
2054 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2055 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2056 cur_fwd_config.nb_fwd_lcores =
2057 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2058 if (verbose_level > 0) {
2059 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2061 cur_fwd_config.nb_fwd_lcores,
2062 cur_fwd_config.nb_fwd_ports,
2063 cur_fwd_config.nb_fwd_streams);
2066 /* reinitialize forwarding streams */
2068 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2070 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2071 if (verbose_level > 0)
2072 printf(" core=%d: \n", lc_id);
2073 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2074 struct fwd_stream *fs;
2075 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2076 fs->rx_port = fwd_ports_ids[rxp];
2078 fs->tx_port = fs->rx_port;
2080 fs->peer_addr = fs->tx_port;
2081 fs->retry_enabled = retry_enabled;
2082 if (verbose_level > 0)
2083 printf(" stream=%d port=%d rxq=%d txq=%d\n",
2084 sm_id, fs->rx_port, fs->rx_queue,
2086 rxq = (queueid_t) (rxq + 1);
2087 if (rxq == nb_rxq) {
2089 rxp = (portid_t) (rxp + 1);
2095 #if defined RTE_LIBRTE_PMD_SOFTNIC
2097 softnic_fwd_config_setup(void)
2099 struct rte_port *port;
2100 portid_t pid, softnic_portid;
2102 uint8_t softnic_enable = 0;
2104 RTE_ETH_FOREACH_DEV(pid) {
2106 const char *driver = port->dev_info.driver_name;
2108 if (strcmp(driver, "net_softnic") == 0) {
2109 softnic_portid = pid;
2115 if (softnic_enable == 0) {
2116 printf("Softnic mode not configured(%s)!\n", __func__);
2120 cur_fwd_config.nb_fwd_ports = 1;
2121 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
2123 /* Re-initialize forwarding streams */
2127 * In the softnic forwarding test, the number of forwarding cores
2128 * is set to one and remaining are used for softnic packet processing.
2130 cur_fwd_config.nb_fwd_lcores = 1;
2131 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2133 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
2134 fwd_streams[i]->rx_port = softnic_portid;
2135 fwd_streams[i]->rx_queue = i;
2136 fwd_streams[i]->tx_port = softnic_portid;
2137 fwd_streams[i]->tx_queue = i;
2138 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2139 fwd_streams[i]->retry_enabled = retry_enabled;
2145 fwd_config_setup(void)
2147 cur_fwd_config.fwd_eng = cur_fwd_eng;
2148 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2149 icmp_echo_config_setup();
2153 #if defined RTE_LIBRTE_PMD_SOFTNIC
2154 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
2155 softnic_fwd_config_setup();
2160 if ((nb_rxq > 1) && (nb_txq > 1)){
2162 dcb_fwd_config_setup();
2164 rss_fwd_config_setup();
2167 simple_fwd_config_setup();
2171 mp_alloc_to_str(uint8_t mode)
2174 case MP_ALLOC_NATIVE:
2180 case MP_ALLOC_XMEM_HUGE:
2188 pkt_fwd_config_display(struct fwd_config *cfg)
2190 struct fwd_stream *fs;
2194 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2195 "NUMA support %s, MP allocation mode: %s\n",
2196 cfg->fwd_eng->fwd_mode_name,
2197 retry_enabled == 0 ? "" : " with retry",
2198 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2199 numa_support == 1 ? "enabled" : "disabled",
2200 mp_alloc_to_str(mp_alloc_type));
2203 printf("TX retry num: %u, delay between TX retries: %uus\n",
2204 burst_tx_retry_num, burst_tx_delay_time);
2205 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2206 printf("Logical Core %u (socket %u) forwards packets on "
2208 fwd_lcores_cpuids[lc_id],
2209 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2210 fwd_lcores[lc_id]->stream_nb);
2211 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2212 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2213 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
2214 "P=%d/Q=%d (socket %u) ",
2215 fs->rx_port, fs->rx_queue,
2216 ports[fs->rx_port].socket_id,
2217 fs->tx_port, fs->tx_queue,
2218 ports[fs->tx_port].socket_id);
2219 print_ethaddr("peer=",
2220 &peer_eth_addrs[fs->peer_addr]);
2228 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2230 uint8_t c, new_peer_addr[6];
2231 if (!rte_eth_dev_is_valid_port(port_id)) {
2232 printf("Error: Invalid port number %i\n", port_id);
2235 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr,
2236 sizeof(new_peer_addr)) < 0) {
2237 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2240 for (c = 0; c < 6; c++)
2241 peer_eth_addrs[port_id].addr_bytes[c] =
2246 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2249 unsigned int lcore_cpuid;
2254 for (i = 0; i < nb_lc; i++) {
2255 lcore_cpuid = lcorelist[i];
2256 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2257 printf("lcore %u not enabled\n", lcore_cpuid);
2260 if (lcore_cpuid == rte_get_master_lcore()) {
2261 printf("lcore %u cannot be masked on for running "
2262 "packet forwarding, which is the master lcore "
2263 "and reserved for command line parsing only\n",
2268 fwd_lcores_cpuids[i] = lcore_cpuid;
2270 if (record_now == 0) {
2274 nb_cfg_lcores = (lcoreid_t) nb_lc;
2275 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2276 printf("previous number of forwarding cores %u - changed to "
2277 "number of configured cores %u\n",
2278 (unsigned int) nb_fwd_lcores, nb_lc);
2279 nb_fwd_lcores = (lcoreid_t) nb_lc;
2286 set_fwd_lcores_mask(uint64_t lcoremask)
2288 unsigned int lcorelist[64];
2292 if (lcoremask == 0) {
2293 printf("Invalid NULL mask of cores\n");
2297 for (i = 0; i < 64; i++) {
2298 if (! ((uint64_t)(1ULL << i) & lcoremask))
2300 lcorelist[nb_lc++] = i;
2302 return set_fwd_lcores_list(lcorelist, nb_lc);
2306 set_fwd_lcores_number(uint16_t nb_lc)
2308 if (nb_lc > nb_cfg_lcores) {
2309 printf("nb fwd cores %u > %u (max. number of configured "
2310 "lcores) - ignored\n",
2311 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2314 nb_fwd_lcores = (lcoreid_t) nb_lc;
2315 printf("Number of forwarding cores set to %u\n",
2316 (unsigned int) nb_fwd_lcores);
2320 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2328 for (i = 0; i < nb_pt; i++) {
2329 port_id = (portid_t) portlist[i];
2330 if (port_id_is_invalid(port_id, ENABLED_WARN))
2333 fwd_ports_ids[i] = port_id;
2335 if (record_now == 0) {
2339 nb_cfg_ports = (portid_t) nb_pt;
2340 if (nb_fwd_ports != (portid_t) nb_pt) {
2341 printf("previous number of forwarding ports %u - changed to "
2342 "number of configured ports %u\n",
2343 (unsigned int) nb_fwd_ports, nb_pt);
2344 nb_fwd_ports = (portid_t) nb_pt;
2349 set_fwd_ports_mask(uint64_t portmask)
2351 unsigned int portlist[64];
2355 if (portmask == 0) {
2356 printf("Invalid NULL mask of ports\n");
2360 RTE_ETH_FOREACH_DEV(i) {
2361 if (! ((uint64_t)(1ULL << i) & portmask))
2363 portlist[nb_pt++] = i;
2365 set_fwd_ports_list(portlist, nb_pt);
2369 set_fwd_ports_number(uint16_t nb_pt)
2371 if (nb_pt > nb_cfg_ports) {
2372 printf("nb fwd ports %u > %u (number of configured "
2373 "ports) - ignored\n",
2374 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2377 nb_fwd_ports = (portid_t) nb_pt;
2378 printf("Number of forwarding ports set to %u\n",
2379 (unsigned int) nb_fwd_ports);
2383 port_is_forwarding(portid_t port_id)
2387 if (port_id_is_invalid(port_id, ENABLED_WARN))
2390 for (i = 0; i < nb_fwd_ports; i++) {
2391 if (fwd_ports_ids[i] == port_id)
2399 set_nb_pkt_per_burst(uint16_t nb)
2401 if (nb > MAX_PKT_BURST) {
2402 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2404 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2407 nb_pkt_per_burst = nb;
2408 printf("Number of packets per burst set to %u\n",
2409 (unsigned int) nb_pkt_per_burst);
2413 tx_split_get_name(enum tx_pkt_split split)
2417 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2418 if (tx_split_name[i].split == split)
2419 return tx_split_name[i].name;
2425 set_tx_pkt_split(const char *name)
2429 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2430 if (strcmp(tx_split_name[i].name, name) == 0) {
2431 tx_pkt_split = tx_split_name[i].split;
2435 printf("unknown value: \"%s\"\n", name);
2439 show_tx_pkt_segments(void)
2445 split = tx_split_get_name(tx_pkt_split);
2447 printf("Number of segments: %u\n", n);
2448 printf("Segment sizes: ");
2449 for (i = 0; i != n - 1; i++)
2450 printf("%hu,", tx_pkt_seg_lengths[i]);
2451 printf("%hu\n", tx_pkt_seg_lengths[i]);
2452 printf("Split packet: %s\n", split);
2456 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2458 uint16_t tx_pkt_len;
2461 if (nb_segs >= (unsigned) nb_txd) {
2462 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2463 nb_segs, (unsigned int) nb_txd);
2468 * Check that each segment length is greater or equal than
2469 * the mbuf data sise.
2470 * Check also that the total packet length is greater or equal than the
2471 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
2474 for (i = 0; i < nb_segs; i++) {
2475 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2476 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2477 i, seg_lengths[i], (unsigned) mbuf_data_size);
2480 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2482 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
2483 printf("total packet length=%u < %d - give up\n",
2484 (unsigned) tx_pkt_len,
2485 (int)(sizeof(struct ether_hdr) + 20 + 8));
2489 for (i = 0; i < nb_segs; i++)
2490 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2492 tx_pkt_length = tx_pkt_len;
2493 tx_pkt_nb_segs = (uint8_t) nb_segs;
2497 setup_gro(const char *onoff, portid_t port_id)
2499 if (!rte_eth_dev_is_valid_port(port_id)) {
2500 printf("invalid port id %u\n", port_id);
2503 if (test_done == 0) {
2504 printf("Before enable/disable GRO,"
2505 " please stop forwarding first\n");
2508 if (strcmp(onoff, "on") == 0) {
2509 if (gro_ports[port_id].enable != 0) {
2510 printf("Port %u has enabled GRO. Please"
2511 " disable GRO first\n", port_id);
2514 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2515 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
2516 gro_ports[port_id].param.max_flow_num =
2517 GRO_DEFAULT_FLOW_NUM;
2518 gro_ports[port_id].param.max_item_per_flow =
2519 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
2521 gro_ports[port_id].enable = 1;
2523 if (gro_ports[port_id].enable == 0) {
2524 printf("Port %u has disabled GRO\n", port_id);
2527 gro_ports[port_id].enable = 0;
2532 setup_gro_flush_cycles(uint8_t cycles)
2534 if (test_done == 0) {
2535 printf("Before change flush interval for GRO,"
2536 " please stop forwarding first.\n");
2540 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
2541 GRO_DEFAULT_FLUSH_CYCLES) {
2542 printf("The flushing cycle be in the range"
2543 " of 1 to %u. Revert to the default"
2545 GRO_MAX_FLUSH_CYCLES,
2546 GRO_DEFAULT_FLUSH_CYCLES);
2547 cycles = GRO_DEFAULT_FLUSH_CYCLES;
2550 gro_flush_cycles = cycles;
2554 show_gro(portid_t port_id)
2556 struct rte_gro_param *param;
2557 uint32_t max_pkts_num;
2559 param = &gro_ports[port_id].param;
2561 if (!rte_eth_dev_is_valid_port(port_id)) {
2562 printf("Invalid port id %u.\n", port_id);
2565 if (gro_ports[port_id].enable) {
2566 printf("GRO type: TCP/IPv4\n");
2567 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2568 max_pkts_num = param->max_flow_num *
2569 param->max_item_per_flow;
2571 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
2572 printf("Max number of packets to perform GRO: %u\n",
2574 printf("Flushing cycles: %u\n", gro_flush_cycles);
2576 printf("Port %u doesn't enable GRO.\n", port_id);
2580 setup_gso(const char *mode, portid_t port_id)
2582 if (!rte_eth_dev_is_valid_port(port_id)) {
2583 printf("invalid port id %u\n", port_id);
2586 if (strcmp(mode, "on") == 0) {
2587 if (test_done == 0) {
2588 printf("before enabling GSO,"
2589 " please stop forwarding first\n");
2592 gso_ports[port_id].enable = 1;
2593 } else if (strcmp(mode, "off") == 0) {
2594 if (test_done == 0) {
2595 printf("before disabling GSO,"
2596 " please stop forwarding first\n");
2599 gso_ports[port_id].enable = 0;
2604 list_pkt_forwarding_modes(void)
2606 static char fwd_modes[128] = "";
2607 const char *separator = "|";
2608 struct fwd_engine *fwd_eng;
2611 if (strlen (fwd_modes) == 0) {
2612 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2613 strncat(fwd_modes, fwd_eng->fwd_mode_name,
2614 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2615 strncat(fwd_modes, separator,
2616 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2618 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2625 list_pkt_forwarding_retry_modes(void)
2627 static char fwd_modes[128] = "";
2628 const char *separator = "|";
2629 struct fwd_engine *fwd_eng;
2632 if (strlen(fwd_modes) == 0) {
2633 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2634 if (fwd_eng == &rx_only_engine)
2636 strncat(fwd_modes, fwd_eng->fwd_mode_name,
2638 strlen(fwd_modes) - 1);
2639 strncat(fwd_modes, separator,
2641 strlen(fwd_modes) - 1);
2643 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2650 set_pkt_forwarding_mode(const char *fwd_mode_name)
2652 struct fwd_engine *fwd_eng;
2656 while ((fwd_eng = fwd_engines[i]) != NULL) {
2657 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
2658 printf("Set %s packet forwarding mode%s\n",
2660 retry_enabled == 0 ? "" : " with retry");
2661 cur_fwd_eng = fwd_eng;
2666 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
2670 set_verbose_level(uint16_t vb_level)
2672 printf("Change verbose level from %u to %u\n",
2673 (unsigned int) verbose_level, (unsigned int) vb_level);
2674 verbose_level = vb_level;
2678 vlan_extend_set(portid_t port_id, int on)
2682 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2684 if (port_id_is_invalid(port_id, ENABLED_WARN))
2687 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2690 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
2691 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2693 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
2694 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2697 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2699 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
2700 "diag=%d\n", port_id, on, diag);
2701 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2705 rx_vlan_strip_set(portid_t port_id, int on)
2709 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2711 if (port_id_is_invalid(port_id, ENABLED_WARN))
2714 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2717 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
2718 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2720 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
2721 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2724 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2726 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
2727 "diag=%d\n", port_id, on, diag);
2728 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2732 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
2736 if (port_id_is_invalid(port_id, ENABLED_WARN))
2739 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
2741 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
2742 "diag=%d\n", port_id, queue_id, on, diag);
2746 rx_vlan_filter_set(portid_t port_id, int on)
2750 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2752 if (port_id_is_invalid(port_id, ENABLED_WARN))
2755 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2758 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
2759 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2761 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
2762 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
2765 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2767 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
2768 "diag=%d\n", port_id, on, diag);
2769 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2773 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
2777 if (port_id_is_invalid(port_id, ENABLED_WARN))
2779 if (vlan_id_is_invalid(vlan_id))
2781 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2784 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
2786 port_id, vlan_id, on, diag);
2791 rx_vlan_all_filter_set(portid_t port_id, int on)
2795 if (port_id_is_invalid(port_id, ENABLED_WARN))
2797 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
2798 if (rx_vft_set(port_id, vlan_id, on))
2804 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
2808 if (port_id_is_invalid(port_id, ENABLED_WARN))
2811 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
2815 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
2817 port_id, vlan_type, tp_id, diag);
2821 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
2824 struct rte_eth_dev_info dev_info;
2826 if (port_id_is_invalid(port_id, ENABLED_WARN))
2828 if (vlan_id_is_invalid(vlan_id))
2831 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2832 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) {
2833 printf("Error, as QinQ has been enabled.\n");
2836 rte_eth_dev_info_get(port_id, &dev_info);
2837 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
2838 printf("Error: vlan insert is not supported by port %d\n",
2843 tx_vlan_reset(port_id);
2844 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
2845 ports[port_id].tx_vlan_id = vlan_id;
2849 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
2852 struct rte_eth_dev_info dev_info;
2854 if (port_id_is_invalid(port_id, ENABLED_WARN))
2856 if (vlan_id_is_invalid(vlan_id))
2858 if (vlan_id_is_invalid(vlan_id_outer))
2861 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2862 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) {
2863 printf("Error, as QinQ hasn't been enabled.\n");
2866 rte_eth_dev_info_get(port_id, &dev_info);
2867 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
2868 printf("Error: qinq insert not supported by port %d\n",
2873 tx_vlan_reset(port_id);
2874 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT;
2875 ports[port_id].tx_vlan_id = vlan_id;
2876 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
2880 tx_vlan_reset(portid_t port_id)
2882 if (port_id_is_invalid(port_id, ENABLED_WARN))
2884 ports[port_id].dev_conf.txmode.offloads &=
2885 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
2886 DEV_TX_OFFLOAD_QINQ_INSERT);
2887 ports[port_id].tx_vlan_id = 0;
2888 ports[port_id].tx_vlan_id_outer = 0;
2892 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
2894 if (port_id_is_invalid(port_id, ENABLED_WARN))
2897 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
2901 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
2904 uint8_t existing_mapping_found = 0;
2906 if (port_id_is_invalid(port_id, ENABLED_WARN))
2909 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
2912 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
2913 printf("map_value not in required range 0..%d\n",
2914 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
2918 if (!is_rx) { /*then tx*/
2919 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2920 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2921 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
2922 tx_queue_stats_mappings[i].stats_counter_id = map_value;
2923 existing_mapping_found = 1;
2927 if (!existing_mapping_found) { /* A new additional mapping... */
2928 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
2929 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
2930 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
2931 nb_tx_queue_stats_mappings++;
2935 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2936 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2937 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
2938 rx_queue_stats_mappings[i].stats_counter_id = map_value;
2939 existing_mapping_found = 1;
2943 if (!existing_mapping_found) { /* A new additional mapping... */
2944 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
2945 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
2946 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
2947 nb_rx_queue_stats_mappings++;
2953 set_xstats_hide_zero(uint8_t on_off)
2955 xstats_hide_zero = on_off;
2959 print_fdir_mask(struct rte_eth_fdir_masks *mask)
2961 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
2963 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2964 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
2965 " tunnel_id: 0x%08x",
2966 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
2967 rte_be_to_cpu_32(mask->tunnel_id_mask));
2968 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2969 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
2970 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
2971 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
2973 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
2974 rte_be_to_cpu_16(mask->src_port_mask),
2975 rte_be_to_cpu_16(mask->dst_port_mask));
2977 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2978 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
2979 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
2980 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
2981 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
2983 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2984 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
2985 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
2986 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
2987 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
2994 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2996 struct rte_eth_flex_payload_cfg *cfg;
2999 for (i = 0; i < flex_conf->nb_payloads; i++) {
3000 cfg = &flex_conf->flex_set[i];
3001 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3003 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3004 printf("\n L2_PAYLOAD: ");
3005 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3006 printf("\n L3_PAYLOAD: ");
3007 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3008 printf("\n L4_PAYLOAD: ");
3010 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
3011 for (j = 0; j < num; j++)
3012 printf(" %-5u", cfg->src_offset[j]);
3018 flowtype_to_str(uint16_t flow_type)
3020 struct flow_type_info {
3026 static struct flow_type_info flowtype_str_table[] = {
3027 {"raw", RTE_ETH_FLOW_RAW},
3028 {"ipv4", RTE_ETH_FLOW_IPV4},
3029 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3030 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3031 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3032 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3033 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3034 {"ipv6", RTE_ETH_FLOW_IPV6},
3035 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3036 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3037 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3038 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3039 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3040 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3041 {"port", RTE_ETH_FLOW_PORT},
3042 {"vxlan", RTE_ETH_FLOW_VXLAN},
3043 {"geneve", RTE_ETH_FLOW_GENEVE},
3044 {"nvgre", RTE_ETH_FLOW_NVGRE},
3045 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
3048 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3049 if (flowtype_str_table[i].ftype == flow_type)
3050 return flowtype_str_table[i].str;
3057 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3059 struct rte_eth_fdir_flex_mask *mask;
3063 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3064 mask = &flex_conf->flex_mask[i];
3065 p = flowtype_to_str(mask->flow_type);
3066 printf("\n %s:\t", p ? p : "unknown");
3067 for (j = 0; j < num; j++)
3068 printf(" %02x", mask->mask[j]);
3074 print_fdir_flow_type(uint32_t flow_types_mask)
3079 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3080 if (!(flow_types_mask & (1 << i)))
3082 p = flowtype_to_str(i);
3092 fdir_get_infos(portid_t port_id)
3094 struct rte_eth_fdir_stats fdir_stat;
3095 struct rte_eth_fdir_info fdir_info;
3098 static const char *fdir_stats_border = "########################";
3100 if (port_id_is_invalid(port_id, ENABLED_WARN))
3102 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3104 printf("\n FDIR is not supported on port %-2d\n",
3109 memset(&fdir_info, 0, sizeof(fdir_info));
3110 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3111 RTE_ETH_FILTER_INFO, &fdir_info);
3112 memset(&fdir_stat, 0, sizeof(fdir_stat));
3113 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3114 RTE_ETH_FILTER_STATS, &fdir_stat);
3115 printf("\n %s FDIR infos for port %-2d %s\n",
3116 fdir_stats_border, port_id, fdir_stats_border);
3118 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3119 printf(" PERFECT\n");
3120 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3121 printf(" PERFECT-MAC-VLAN\n");
3122 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3123 printf(" PERFECT-TUNNEL\n");
3124 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3125 printf(" SIGNATURE\n");
3127 printf(" DISABLE\n");
3128 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3129 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3130 printf(" SUPPORTED FLOW TYPE: ");
3131 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3133 printf(" FLEX PAYLOAD INFO:\n");
3134 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
3135 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
3136 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
3137 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3138 fdir_info.flex_payload_unit,
3139 fdir_info.max_flex_payload_segment_num,
3140 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3142 print_fdir_mask(&fdir_info.mask);
3143 if (fdir_info.flex_conf.nb_payloads > 0) {
3144 printf(" FLEX PAYLOAD SRC OFFSET:");
3145 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3147 if (fdir_info.flex_conf.nb_flexmasks > 0) {
3148 printf(" FLEX MASK CFG:");
3149 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3151 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
3152 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3153 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
3154 fdir_info.guarant_spc, fdir_info.best_spc);
3155 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
3156 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
3157 " add: %-10"PRIu64" remove: %"PRIu64"\n"
3158 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
3159 fdir_stat.collision, fdir_stat.free,
3160 fdir_stat.maxhash, fdir_stat.maxlen,
3161 fdir_stat.add, fdir_stat.remove,
3162 fdir_stat.f_add, fdir_stat.f_remove);
3163 printf(" %s############################%s\n",
3164 fdir_stats_border, fdir_stats_border);
3168 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3170 struct rte_port *port;
3171 struct rte_eth_fdir_flex_conf *flex_conf;
3174 port = &ports[port_id];
3175 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3176 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3177 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3182 if (i >= RTE_ETH_FLOW_MAX) {
3183 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3184 idx = flex_conf->nb_flexmasks;
3185 flex_conf->nb_flexmasks++;
3187 printf("The flex mask table is full. Can not set flex"
3188 " mask for flow_type(%u).", cfg->flow_type);
3192 rte_memcpy(&flex_conf->flex_mask[idx],
3194 sizeof(struct rte_eth_fdir_flex_mask));
3198 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3200 struct rte_port *port;
3201 struct rte_eth_fdir_flex_conf *flex_conf;
3204 port = &ports[port_id];
3205 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3206 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3207 if (cfg->type == flex_conf->flex_set[i].type) {
3212 if (i >= RTE_ETH_PAYLOAD_MAX) {
3213 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3214 idx = flex_conf->nb_payloads;
3215 flex_conf->nb_payloads++;
3217 printf("The flex payload table is full. Can not set"
3218 " flex payload for type(%u).", cfg->type);
3222 rte_memcpy(&flex_conf->flex_set[idx],
3224 sizeof(struct rte_eth_flex_payload_cfg));
3229 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3231 #ifdef RTE_LIBRTE_IXGBE_PMD
3235 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3237 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3241 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3242 is_rx ? "rx" : "tx", port_id, diag);
3245 printf("VF %s setting not supported for port %d\n",
3246 is_rx ? "Rx" : "Tx", port_id);
3252 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3255 struct rte_eth_link link;
3257 if (port_id_is_invalid(port_id, ENABLED_WARN))
3259 rte_eth_link_get_nowait(port_id, &link);
3260 if (rate > link.link_speed) {
3261 printf("Invalid rate value:%u bigger than link speed: %u\n",
3262 rate, link.link_speed);
3265 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3268 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3274 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3276 int diag = -ENOTSUP;
3280 RTE_SET_USED(q_msk);
3282 #ifdef RTE_LIBRTE_IXGBE_PMD
3283 if (diag == -ENOTSUP)
3284 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3287 #ifdef RTE_LIBRTE_BNXT_PMD
3288 if (diag == -ENOTSUP)
3289 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3294 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3300 * Functions to manage the set of filtered Multicast MAC addresses.
3302 * A pool of filtered multicast MAC addresses is associated with each port.
3303 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3304 * The address of the pool and the number of valid multicast MAC addresses
3305 * recorded in the pool are stored in the fields "mc_addr_pool" and
3306 * "mc_addr_nb" of the "rte_port" data structure.
3308 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3309 * to be supplied a contiguous array of multicast MAC addresses.
3310 * To comply with this constraint, the set of multicast addresses recorded
3311 * into the pool are systematically compacted at the beginning of the pool.
3312 * Hence, when a multicast address is removed from the pool, all following
3313 * addresses, if any, are copied back to keep the set contiguous.
3315 #define MCAST_POOL_INC 32
3318 mcast_addr_pool_extend(struct rte_port *port)
3320 struct ether_addr *mc_pool;
3321 size_t mc_pool_size;
3324 * If a free entry is available at the end of the pool, just
3325 * increment the number of recorded multicast addresses.
3327 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3333 * [re]allocate a pool with MCAST_POOL_INC more entries.
3334 * The previous test guarantees that port->mc_addr_nb is a multiple
3335 * of MCAST_POOL_INC.
3337 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb +
3339 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool,
3341 if (mc_pool == NULL) {
3342 printf("allocation of pool of %u multicast addresses failed\n",
3343 port->mc_addr_nb + MCAST_POOL_INC);
3347 port->mc_addr_pool = mc_pool;
3354 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3357 if (addr_idx == port->mc_addr_nb) {
3358 /* No need to recompact the set of multicast addressses. */
3359 if (port->mc_addr_nb == 0) {
3360 /* free the pool of multicast addresses. */
3361 free(port->mc_addr_pool);
3362 port->mc_addr_pool = NULL;
3366 memmove(&port->mc_addr_pool[addr_idx],
3367 &port->mc_addr_pool[addr_idx + 1],
3368 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx));
3372 eth_port_multicast_addr_list_set(portid_t port_id)
3374 struct rte_port *port;
3377 port = &ports[port_id];
3378 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3382 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3383 port->mc_addr_nb, port_id, -diag);
3387 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr)
3389 struct rte_port *port;
3392 if (port_id_is_invalid(port_id, ENABLED_WARN))
3395 port = &ports[port_id];
3398 * Check that the added multicast MAC address is not already recorded
3399 * in the pool of multicast addresses.
3401 for (i = 0; i < port->mc_addr_nb; i++) {
3402 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3403 printf("multicast address already filtered by port\n");
3408 if (mcast_addr_pool_extend(port) != 0)
3410 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
3411 eth_port_multicast_addr_list_set(port_id);
3415 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr)
3417 struct rte_port *port;
3420 if (port_id_is_invalid(port_id, ENABLED_WARN))
3423 port = &ports[port_id];
3426 * Search the pool of multicast MAC addresses for the removed address.
3428 for (i = 0; i < port->mc_addr_nb; i++) {
3429 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3432 if (i == port->mc_addr_nb) {
3433 printf("multicast address not filtered by port %d\n", port_id);
3437 mcast_addr_pool_remove(port, i);
3438 eth_port_multicast_addr_list_set(port_id);
3442 port_dcb_info_display(portid_t port_id)
3444 struct rte_eth_dcb_info dcb_info;
3447 static const char *border = "================";
3449 if (port_id_is_invalid(port_id, ENABLED_WARN))
3452 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3454 printf("\n Failed to get dcb infos on port %-2d\n",
3458 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
3459 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
3461 for (i = 0; i < dcb_info.nb_tcs; i++)
3463 printf("\n Priority : ");
3464 for (i = 0; i < dcb_info.nb_tcs; i++)
3465 printf("\t%4d", dcb_info.prio_tc[i]);
3466 printf("\n BW percent :");
3467 for (i = 0; i < dcb_info.nb_tcs; i++)
3468 printf("\t%4d%%", dcb_info.tc_bws[i]);
3469 printf("\n RXQ base : ");
3470 for (i = 0; i < dcb_info.nb_tcs; i++)
3471 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
3472 printf("\n RXQ number :");
3473 for (i = 0; i < dcb_info.nb_tcs; i++)
3474 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
3475 printf("\n TXQ base : ");
3476 for (i = 0; i < dcb_info.nb_tcs; i++)
3477 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
3478 printf("\n TXQ number :");
3479 for (i = 0; i < dcb_info.nb_tcs; i++)
3480 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
3485 open_file(const char *file_path, uint32_t *size)
3487 int fd = open(file_path, O_RDONLY);
3489 uint8_t *buf = NULL;
3497 printf("%s: Failed to open %s\n", __func__, file_path);
3501 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
3503 printf("%s: File operations failed\n", __func__);
3507 pkg_size = st_buf.st_size;
3510 printf("%s: File operations failed\n", __func__);
3514 buf = (uint8_t *)malloc(pkg_size);
3517 printf("%s: Failed to malloc memory\n", __func__);
3521 ret = read(fd, buf, pkg_size);
3524 printf("%s: File read operation failed\n", __func__);
3538 save_file(const char *file_path, uint8_t *buf, uint32_t size)
3540 FILE *fh = fopen(file_path, "wb");
3543 printf("%s: Failed to open %s\n", __func__, file_path);
3547 if (fwrite(buf, 1, size, fh) != size) {
3549 printf("%s: File write operation failed\n", __func__);
3559 close_file(uint8_t *buf)
3570 port_queue_region_info_display(portid_t port_id, void *buf)
3572 #ifdef RTE_LIBRTE_I40E_PMD
3574 struct rte_pmd_i40e_queue_regions *info =
3575 (struct rte_pmd_i40e_queue_regions *)buf;
3576 static const char *queue_region_info_stats_border = "-------";
3578 if (!info->queue_region_number)
3579 printf("there is no region has been set before");
3581 printf("\n %s All queue region info for port=%2d %s",
3582 queue_region_info_stats_border, port_id,
3583 queue_region_info_stats_border);
3584 printf("\n queue_region_number: %-14u \n",
3585 info->queue_region_number);
3587 for (i = 0; i < info->queue_region_number; i++) {
3588 printf("\n region_id: %-14u queue_number: %-14u "
3589 "queue_start_index: %-14u \n",
3590 info->region[i].region_id,
3591 info->region[i].queue_num,
3592 info->region[i].queue_start_index);
3594 printf(" user_priority_num is %-14u :",
3595 info->region[i].user_priority_num);
3596 for (j = 0; j < info->region[i].user_priority_num; j++)
3597 printf(" %-14u ", info->region[i].user_priority[j]);
3599 printf("\n flowtype_num is %-14u :",
3600 info->region[i].flowtype_num);
3601 for (j = 0; j < info->region[i].flowtype_num; j++)
3602 printf(" %-14u ", info->region[i].hw_flowtype[j]);
3605 RTE_SET_USED(port_id);