4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Copyright 2013-2014 6WIND S.A.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name of 6WIND S.A. nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
72 #include <sys/queue.h>
74 #include <rte_common.h>
75 #include <rte_byteorder.h>
76 #include <rte_debug.h>
78 #include <rte_memory.h>
79 #include <rte_memcpy.h>
80 #include <rte_memzone.h>
81 #include <rte_launch.h>
83 #include <rte_per_lcore.h>
84 #include <rte_lcore.h>
85 #include <rte_atomic.h>
86 #include <rte_branch_prediction.h>
88 #include <rte_mempool.h>
90 #include <rte_interrupts.h>
92 #include <rte_ether.h>
93 #include <rte_ethdev.h>
94 #include <rte_string_fns.h>
98 static char *flowtype_to_str(uint16_t flow_type);
100 static const struct {
101 enum tx_pkt_split split;
103 } tx_split_name[] = {
105 .split = TX_PKT_SPLIT_OFF,
109 .split = TX_PKT_SPLIT_ON,
113 .split = TX_PKT_SPLIT_RND,
118 struct rss_type_info {
123 static const struct rss_type_info rss_type_table[] = {
124 { "ipv4", ETH_RSS_IPV4 },
125 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
126 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
127 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
128 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
129 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
130 { "ipv6", ETH_RSS_IPV6 },
131 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
132 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
133 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
134 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
135 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
136 { "l2-payload", ETH_RSS_L2_PAYLOAD },
137 { "ipv6-ex", ETH_RSS_IPV6_EX },
138 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
139 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
143 print_ethaddr(const char *name, struct ether_addr *eth_addr)
145 char buf[ETHER_ADDR_FMT_SIZE];
146 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
147 printf("%s%s", name, buf);
151 nic_stats_display(portid_t port_id)
153 struct rte_eth_stats stats;
154 struct rte_port *port = &ports[port_id];
158 static const char *nic_stats_border = "########################";
160 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
161 printf("Valid port range is [0");
162 FOREACH_PORT(pid, ports)
167 rte_eth_stats_get(port_id, &stats);
168 printf("\n %s NIC statistics for port %-2d %s\n",
169 nic_stats_border, port_id, nic_stats_border);
171 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
172 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
174 stats.ipackets, stats.imissed, stats.ibytes);
175 printf(" RX-badcrc: %-10"PRIu64" RX-badlen: %-10"PRIu64" RX-errors: "
177 stats.ibadcrc, stats.ibadlen, stats.ierrors);
178 printf(" RX-nombuf: %-10"PRIu64"\n",
180 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
182 stats.opackets, stats.oerrors, stats.obytes);
185 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
186 " RX-bytes: %10"PRIu64"\n",
187 stats.ipackets, stats.ierrors, stats.ibytes);
188 printf(" RX-badcrc: %10"PRIu64" RX-badlen: %10"PRIu64
189 " RX-errors: %10"PRIu64"\n",
190 stats.ibadcrc, stats.ibadlen, stats.ierrors);
191 printf(" RX-nombuf: %10"PRIu64"\n",
193 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
194 " TX-bytes: %10"PRIu64"\n",
195 stats.opackets, stats.oerrors, stats.obytes);
199 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
200 printf(" Fdirmiss: %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n",
204 if (port->rx_queue_stats_mapping_enabled) {
206 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
207 printf(" Stats reg %2d RX-packets: %10"PRIu64
208 " RX-errors: %10"PRIu64
209 " RX-bytes: %10"PRIu64"\n",
210 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
213 if (port->tx_queue_stats_mapping_enabled) {
215 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
216 printf(" Stats reg %2d TX-packets: %10"PRIu64
217 " TX-bytes: %10"PRIu64"\n",
218 i, stats.q_opackets[i], stats.q_obytes[i]);
222 /* Display statistics of XON/XOFF pause frames, if any. */
223 if ((stats.tx_pause_xon | stats.rx_pause_xon |
224 stats.tx_pause_xoff | stats.rx_pause_xoff) > 0) {
225 printf(" RX-XOFF: %-10"PRIu64" RX-XON: %-10"PRIu64"\n",
226 stats.rx_pause_xoff, stats.rx_pause_xon);
227 printf(" TX-XOFF: %-10"PRIu64" TX-XON: %-10"PRIu64"\n",
228 stats.tx_pause_xoff, stats.tx_pause_xon);
230 printf(" %s############################%s\n",
231 nic_stats_border, nic_stats_border);
235 nic_stats_clear(portid_t port_id)
239 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
240 printf("Valid port range is [0");
241 FOREACH_PORT(pid, ports)
246 rte_eth_stats_reset(port_id);
247 printf("\n NIC statistics for port %d cleared\n", port_id);
251 nic_xstats_display(portid_t port_id)
253 struct rte_eth_xstats *xstats;
256 printf("###### NIC extended statistics for port %-2d\n", port_id);
258 len = rte_eth_xstats_get(port_id, NULL, 0);
260 printf("Cannot get xstats count\n");
263 xstats = malloc(sizeof(xstats[0]) * len);
264 if (xstats == NULL) {
265 printf("Cannot allocate memory for xstats\n");
268 ret = rte_eth_xstats_get(port_id, xstats, len);
269 if (ret < 0 || ret > len) {
270 printf("Cannot get xstats\n");
274 for (i = 0; i < len; i++)
275 printf("%s: %"PRIu64"\n", xstats[i].name, xstats[i].value);
280 nic_xstats_clear(portid_t port_id)
282 rte_eth_xstats_reset(port_id);
286 nic_stats_mapping_display(portid_t port_id)
288 struct rte_port *port = &ports[port_id];
292 static const char *nic_stats_mapping_border = "########################";
294 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
295 printf("Valid port range is [0");
296 FOREACH_PORT(pid, ports)
302 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
303 printf("Port id %d - either does not support queue statistic mapping or"
304 " no queue statistic mapping set\n", port_id);
308 printf("\n %s NIC statistics mapping for port %-2d %s\n",
309 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
311 if (port->rx_queue_stats_mapping_enabled) {
312 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
313 if (rx_queue_stats_mappings[i].port_id == port_id) {
314 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
315 rx_queue_stats_mappings[i].queue_id,
316 rx_queue_stats_mappings[i].stats_counter_id);
323 if (port->tx_queue_stats_mapping_enabled) {
324 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
325 if (tx_queue_stats_mappings[i].port_id == port_id) {
326 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
327 tx_queue_stats_mappings[i].queue_id,
328 tx_queue_stats_mappings[i].stats_counter_id);
333 printf(" %s####################################%s\n",
334 nic_stats_mapping_border, nic_stats_mapping_border);
338 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
340 struct rte_eth_rxq_info qinfo;
342 static const char *info_border = "*********************";
344 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
346 printf("Failed to retrieve information for port: %hhu, "
347 "RX queue: %hu\nerror desc: %s(%d)\n",
348 port_id, queue_id, strerror(-rc), rc);
352 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
353 info_border, port_id, queue_id, info_border);
355 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
356 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
357 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
358 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
359 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
360 printf("\nRX drop packets: %s",
361 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
362 printf("\nRX deferred start: %s",
363 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
364 printf("\nRX scattered packets: %s",
365 (qinfo.scattered_rx != 0) ? "on" : "off");
366 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
371 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
373 struct rte_eth_txq_info qinfo;
375 static const char *info_border = "*********************";
377 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
379 printf("Failed to retrieve information for port: %hhu, "
380 "TX queue: %hu\nerror desc: %s(%d)\n",
381 port_id, queue_id, strerror(-rc), rc);
385 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
386 info_border, port_id, queue_id, info_border);
388 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
389 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
390 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
391 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
392 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
393 printf("\nTX flags: %#x", qinfo.conf.txq_flags);
394 printf("\nTX deferred start: %s",
395 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
396 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
401 port_infos_display(portid_t port_id)
403 struct rte_port *port;
404 struct ether_addr mac_addr;
405 struct rte_eth_link link;
406 struct rte_eth_dev_info dev_info;
408 struct rte_mempool * mp;
409 static const char *info_border = "*********************";
412 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
413 printf("Valid port range is [0");
414 FOREACH_PORT(pid, ports)
419 port = &ports[port_id];
420 rte_eth_link_get_nowait(port_id, &link);
421 printf("\n%s Infos for port %-2d %s\n",
422 info_border, port_id, info_border);
423 rte_eth_macaddr_get(port_id, &mac_addr);
424 print_ethaddr("MAC address: ", &mac_addr);
425 printf("\nConnect to socket: %u", port->socket_id);
427 if (port_numa[port_id] != NUMA_NO_CONFIG) {
428 mp = mbuf_pool_find(port_numa[port_id]);
430 printf("\nmemory allocation on the socket: %d",
433 printf("\nmemory allocation on the socket: %u",port->socket_id);
435 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
436 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
437 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
438 ("full-duplex") : ("half-duplex"));
439 printf("Promiscuous mode: %s\n",
440 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
441 printf("Allmulticast mode: %s\n",
442 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
443 printf("Maximum number of MAC addresses: %u\n",
444 (unsigned int)(port->dev_info.max_mac_addrs));
445 printf("Maximum number of MAC addresses of hash filtering: %u\n",
446 (unsigned int)(port->dev_info.max_hash_mac_addrs));
448 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
449 if (vlan_offload >= 0){
450 printf("VLAN offload: \n");
451 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
452 printf(" strip on \n");
454 printf(" strip off \n");
456 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
457 printf(" filter on \n");
459 printf(" filter off \n");
461 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
462 printf(" qinq(extend) on \n");
464 printf(" qinq(extend) off \n");
467 memset(&dev_info, 0, sizeof(dev_info));
468 rte_eth_dev_info_get(port_id, &dev_info);
469 if (dev_info.hash_key_size > 0)
470 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
471 if (dev_info.reta_size > 0)
472 printf("Redirection table size: %u\n", dev_info.reta_size);
473 if (!dev_info.flow_type_rss_offloads)
474 printf("No flow type is supported.\n");
479 printf("Supported flow types:\n");
480 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX;
482 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
484 p = flowtype_to_str(i);
485 printf(" %s\n", (p ? p : "unknown"));
489 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
490 printf("Max possible number of RXDs per queue: %hu\n",
491 dev_info.rx_desc_lim.nb_max);
492 printf("Min possible number of RXDs per queue: %hu\n",
493 dev_info.rx_desc_lim.nb_min);
494 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
496 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
497 printf("Max possible number of TXDs per queue: %hu\n",
498 dev_info.tx_desc_lim.nb_max);
499 printf("Min possible number of TXDs per queue: %hu\n",
500 dev_info.tx_desc_lim.nb_min);
501 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
505 port_id_is_invalid(portid_t port_id, enum print_warning warning)
507 if (port_id == (portid_t)RTE_PORT_ALL)
510 if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled)
513 if (warning == ENABLED_WARN)
514 printf("Invalid port %d\n", port_id);
520 vlan_id_is_invalid(uint16_t vlan_id)
524 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
529 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
534 printf("Port register offset 0x%X not aligned on a 4-byte "
539 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
540 if (reg_off >= pci_len) {
541 printf("Port %d: register offset %u (0x%X) out of port PCI "
542 "resource (length=%"PRIu64")\n",
543 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
550 reg_bit_pos_is_invalid(uint8_t bit_pos)
554 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
558 #define display_port_and_reg_off(port_id, reg_off) \
559 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
562 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
564 display_port_and_reg_off(port_id, (unsigned)reg_off);
565 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
569 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
574 if (port_id_is_invalid(port_id, ENABLED_WARN))
576 if (port_reg_off_is_invalid(port_id, reg_off))
578 if (reg_bit_pos_is_invalid(bit_x))
580 reg_v = port_id_pci_reg_read(port_id, reg_off);
581 display_port_and_reg_off(port_id, (unsigned)reg_off);
582 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
586 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
587 uint8_t bit1_pos, uint8_t bit2_pos)
593 if (port_id_is_invalid(port_id, ENABLED_WARN))
595 if (port_reg_off_is_invalid(port_id, reg_off))
597 if (reg_bit_pos_is_invalid(bit1_pos))
599 if (reg_bit_pos_is_invalid(bit2_pos))
601 if (bit1_pos > bit2_pos)
602 l_bit = bit2_pos, h_bit = bit1_pos;
604 l_bit = bit1_pos, h_bit = bit2_pos;
606 reg_v = port_id_pci_reg_read(port_id, reg_off);
609 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
610 display_port_and_reg_off(port_id, (unsigned)reg_off);
611 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
612 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
616 port_reg_display(portid_t port_id, uint32_t reg_off)
620 if (port_id_is_invalid(port_id, ENABLED_WARN))
622 if (port_reg_off_is_invalid(port_id, reg_off))
624 reg_v = port_id_pci_reg_read(port_id, reg_off);
625 display_port_reg_value(port_id, reg_off, reg_v);
629 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
634 if (port_id_is_invalid(port_id, ENABLED_WARN))
636 if (port_reg_off_is_invalid(port_id, reg_off))
638 if (reg_bit_pos_is_invalid(bit_pos))
641 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
644 reg_v = port_id_pci_reg_read(port_id, reg_off);
646 reg_v &= ~(1 << bit_pos);
648 reg_v |= (1 << bit_pos);
649 port_id_pci_reg_write(port_id, reg_off, reg_v);
650 display_port_reg_value(port_id, reg_off, reg_v);
654 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
655 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
662 if (port_id_is_invalid(port_id, ENABLED_WARN))
664 if (port_reg_off_is_invalid(port_id, reg_off))
666 if (reg_bit_pos_is_invalid(bit1_pos))
668 if (reg_bit_pos_is_invalid(bit2_pos))
670 if (bit1_pos > bit2_pos)
671 l_bit = bit2_pos, h_bit = bit1_pos;
673 l_bit = bit1_pos, h_bit = bit2_pos;
675 if ((h_bit - l_bit) < 31)
676 max_v = (1 << (h_bit - l_bit + 1)) - 1;
681 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
682 (unsigned)value, (unsigned)value,
683 (unsigned)max_v, (unsigned)max_v);
686 reg_v = port_id_pci_reg_read(port_id, reg_off);
687 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
688 reg_v |= (value << l_bit); /* Set changed bits */
689 port_id_pci_reg_write(port_id, reg_off, reg_v);
690 display_port_reg_value(port_id, reg_off, reg_v);
694 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
696 if (port_id_is_invalid(port_id, ENABLED_WARN))
698 if (port_reg_off_is_invalid(port_id, reg_off))
700 port_id_pci_reg_write(port_id, reg_off, reg_v);
701 display_port_reg_value(port_id, reg_off, reg_v);
705 port_mtu_set(portid_t port_id, uint16_t mtu)
709 if (port_id_is_invalid(port_id, ENABLED_WARN))
711 diag = rte_eth_dev_set_mtu(port_id, mtu);
714 printf("Set MTU failed. diag=%d\n", diag);
718 * RX/TX ring descriptors display functions.
721 rx_queue_id_is_invalid(queueid_t rxq_id)
725 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
730 tx_queue_id_is_invalid(queueid_t txq_id)
734 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
739 rx_desc_id_is_invalid(uint16_t rxdesc_id)
741 if (rxdesc_id < nb_rxd)
743 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
749 tx_desc_id_is_invalid(uint16_t txdesc_id)
751 if (txdesc_id < nb_txd)
753 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
758 static const struct rte_memzone *
759 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
761 char mz_name[RTE_MEMZONE_NAMESIZE];
762 const struct rte_memzone *mz;
764 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
765 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
766 mz = rte_memzone_lookup(mz_name);
768 printf("%s ring memory zoneof (port %d, queue %d) not"
769 "found (zone name = %s\n",
770 ring_name, port_id, q_id, mz_name);
774 union igb_ring_dword {
777 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
787 struct igb_ring_desc_32_bytes {
788 union igb_ring_dword lo_dword;
789 union igb_ring_dword hi_dword;
790 union igb_ring_dword resv1;
791 union igb_ring_dword resv2;
794 struct igb_ring_desc_16_bytes {
795 union igb_ring_dword lo_dword;
796 union igb_ring_dword hi_dword;
800 ring_rxd_display_dword(union igb_ring_dword dword)
802 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
803 (unsigned)dword.words.hi);
807 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
808 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
811 __rte_unused uint8_t port_id,
815 struct igb_ring_desc_16_bytes *ring =
816 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
817 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
818 struct rte_eth_dev_info dev_info;
820 memset(&dev_info, 0, sizeof(dev_info));
821 rte_eth_dev_info_get(port_id, &dev_info);
822 if (strstr(dev_info.driver_name, "i40e") != NULL) {
823 /* 32 bytes RX descriptor, i40e only */
824 struct igb_ring_desc_32_bytes *ring =
825 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
826 ring[desc_id].lo_dword.dword =
827 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
828 ring_rxd_display_dword(ring[desc_id].lo_dword);
829 ring[desc_id].hi_dword.dword =
830 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
831 ring_rxd_display_dword(ring[desc_id].hi_dword);
832 ring[desc_id].resv1.dword =
833 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
834 ring_rxd_display_dword(ring[desc_id].resv1);
835 ring[desc_id].resv2.dword =
836 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
837 ring_rxd_display_dword(ring[desc_id].resv2);
842 /* 16 bytes RX descriptor */
843 ring[desc_id].lo_dword.dword =
844 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
845 ring_rxd_display_dword(ring[desc_id].lo_dword);
846 ring[desc_id].hi_dword.dword =
847 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
848 ring_rxd_display_dword(ring[desc_id].hi_dword);
852 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
854 struct igb_ring_desc_16_bytes *ring;
855 struct igb_ring_desc_16_bytes txd;
857 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
858 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
859 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
860 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
861 (unsigned)txd.lo_dword.words.lo,
862 (unsigned)txd.lo_dword.words.hi,
863 (unsigned)txd.hi_dword.words.lo,
864 (unsigned)txd.hi_dword.words.hi);
868 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
870 const struct rte_memzone *rx_mz;
872 if (port_id_is_invalid(port_id, ENABLED_WARN))
874 if (rx_queue_id_is_invalid(rxq_id))
876 if (rx_desc_id_is_invalid(rxd_id))
878 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
881 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
885 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
887 const struct rte_memzone *tx_mz;
889 if (port_id_is_invalid(port_id, ENABLED_WARN))
891 if (tx_queue_id_is_invalid(txq_id))
893 if (tx_desc_id_is_invalid(txd_id))
895 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
898 ring_tx_descriptor_display(tx_mz, txd_id);
902 fwd_lcores_config_display(void)
906 printf("List of forwarding lcores:");
907 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
908 printf(" %2u", fwd_lcores_cpuids[lc_id]);
912 rxtx_config_display(void)
914 printf(" %s packet forwarding - CRC stripping %s - "
915 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
916 rx_mode.hw_strip_crc ? "enabled" : "disabled",
919 if (cur_fwd_eng == &tx_only_engine)
920 printf(" packet len=%u - nb packet segments=%d\n",
921 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
923 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf;
924 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf;
926 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
927 nb_fwd_lcores, nb_fwd_ports);
928 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n",
929 nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
930 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
931 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh,
932 rx_conf->rx_thresh.wthresh);
933 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n",
934 nb_txq, nb_txd, tx_conf->tx_free_thresh);
935 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
936 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh,
937 tx_conf->tx_thresh.wthresh);
938 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
939 tx_conf->tx_rs_thresh, tx_conf->txq_flags);
943 port_rss_reta_info(portid_t port_id,
944 struct rte_eth_rss_reta_entry64 *reta_conf,
947 uint16_t i, idx, shift;
950 if (port_id_is_invalid(port_id, ENABLED_WARN))
953 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
955 printf("Failed to get RSS RETA info, return code = %d\n", ret);
959 for (i = 0; i < nb_entries; i++) {
960 idx = i / RTE_RETA_GROUP_SIZE;
961 shift = i % RTE_RETA_GROUP_SIZE;
962 if (!(reta_conf[idx].mask & (1ULL << shift)))
964 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
965 i, reta_conf[idx].reta[shift]);
970 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
974 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
976 struct rte_eth_rss_conf rss_conf;
977 uint8_t rss_key[10 * 4] = "";
982 if (port_id_is_invalid(port_id, ENABLED_WARN))
986 for (i = 0; i < RTE_DIM(rss_type_table); i++) {
987 if (!strcmp(rss_info, rss_type_table[i].str))
988 rss_conf.rss_hf = rss_type_table[i].rss_type;
991 /* Get RSS hash key if asked to display it */
992 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
993 rss_conf.rss_key_len = sizeof(rss_key);
994 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
998 printf("port index %d invalid\n", port_id);
1001 printf("operation not supported by device\n");
1004 printf("operation failed - diag=%d\n", diag);
1009 rss_hf = rss_conf.rss_hf;
1011 printf("RSS disabled\n");
1014 printf("RSS functions:\n ");
1015 for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1016 if (rss_hf & rss_type_table[i].rss_type)
1017 printf("%s ", rss_type_table[i].str);
1022 printf("RSS key:\n");
1023 for (i = 0; i < sizeof(rss_key); i++)
1024 printf("%02X", rss_key[i]);
1029 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1032 struct rte_eth_rss_conf rss_conf;
1036 rss_conf.rss_key = NULL;
1037 rss_conf.rss_key_len = hash_key_len;
1038 rss_conf.rss_hf = 0;
1039 for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1040 if (!strcmp(rss_type_table[i].str, rss_type))
1041 rss_conf.rss_hf = rss_type_table[i].rss_type;
1043 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1045 rss_conf.rss_key = hash_key;
1046 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1053 printf("port index %d invalid\n", port_id);
1056 printf("operation not supported by device\n");
1059 printf("operation failed - diag=%d\n", diag);
1065 * Setup forwarding configuration for each logical core.
1068 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1070 streamid_t nb_fs_per_lcore;
1078 nb_fs = cfg->nb_fwd_streams;
1079 nb_fc = cfg->nb_fwd_lcores;
1080 if (nb_fs <= nb_fc) {
1081 nb_fs_per_lcore = 1;
1084 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
1085 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
1088 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
1090 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
1091 fwd_lcores[lc_id]->stream_idx = sm_id;
1092 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
1093 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1097 * Assign extra remaining streams, if any.
1099 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
1100 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
1101 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
1102 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
1103 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1108 simple_fwd_config_setup(void)
1114 if (port_topology == PORT_TOPOLOGY_CHAINED ||
1115 port_topology == PORT_TOPOLOGY_LOOP) {
1117 } else if (nb_fwd_ports % 2) {
1118 printf("\nWarning! Cannot handle an odd number of ports "
1119 "with the current port topology. Configuration "
1120 "must be changed to have an even number of ports, "
1121 "or relaunch application with "
1122 "--port-topology=chained\n\n");
1125 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
1126 cur_fwd_config.nb_fwd_streams =
1127 (streamid_t) cur_fwd_config.nb_fwd_ports;
1129 /* reinitialize forwarding streams */
1133 * In the simple forwarding test, the number of forwarding cores
1134 * must be lower or equal to the number of forwarding ports.
1136 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1137 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
1138 cur_fwd_config.nb_fwd_lcores =
1139 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
1140 setup_fwd_config_of_each_lcore(&cur_fwd_config);
1142 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
1143 if (port_topology != PORT_TOPOLOGY_LOOP)
1144 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
1147 fwd_streams[i]->rx_port = fwd_ports_ids[i];
1148 fwd_streams[i]->rx_queue = 0;
1149 fwd_streams[i]->tx_port = fwd_ports_ids[j];
1150 fwd_streams[i]->tx_queue = 0;
1151 fwd_streams[i]->peer_addr = j;
1153 if (port_topology == PORT_TOPOLOGY_PAIRED) {
1154 fwd_streams[j]->rx_port = fwd_ports_ids[j];
1155 fwd_streams[j]->rx_queue = 0;
1156 fwd_streams[j]->tx_port = fwd_ports_ids[i];
1157 fwd_streams[j]->tx_queue = 0;
1158 fwd_streams[j]->peer_addr = i;
1164 * For the RSS forwarding test, each core is assigned on every port a transmit
1165 * queue whose index is the index of the core itself. This approach limits the
1166 * maximumm number of processing cores of the RSS test to the maximum number of
1167 * TX queues supported by the devices.
1169 * Each core is assigned a single stream, each stream being composed of
1170 * a RX queue to poll on a RX port for input messages, associated with
1171 * a TX queue of a TX port where to send forwarded packets.
1172 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
1173 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
1175 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1179 rss_fwd_config_setup(void)
1190 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1191 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1192 cur_fwd_config.nb_fwd_streams =
1193 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1194 if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores)
1195 cur_fwd_config.nb_fwd_streams =
1196 (streamid_t)cur_fwd_config.nb_fwd_lcores;
1198 cur_fwd_config.nb_fwd_lcores =
1199 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
1201 /* reinitialize forwarding streams */
1204 setup_fwd_config_of_each_lcore(&cur_fwd_config);
1206 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1207 struct fwd_stream *fs;
1209 fs = fwd_streams[lc_id];
1211 if ((rxp & 0x1) == 0)
1212 txp = (portid_t) (rxp + 1);
1214 txp = (portid_t) (rxp - 1);
1216 * if we are in loopback, simply send stuff out through the
1219 if (port_topology == PORT_TOPOLOGY_LOOP)
1222 fs->rx_port = fwd_ports_ids[rxp];
1224 fs->tx_port = fwd_ports_ids[txp];
1226 fs->peer_addr = fs->tx_port;
1227 rxq = (queueid_t) (rxq + 1);
1232 * Restart from RX queue 0 on next RX port
1235 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1237 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
1239 rxp = (portid_t) (rxp + 1);
1244 * For the DCB forwarding test, each core is assigned on each traffic class.
1246 * Each core is assigned a multi-stream, each stream being composed of
1247 * a RX queue to poll on a RX port for input messages, associated with
1248 * a TX queue of a TX port where to send forwarded packets. All RX and
1249 * TX queues are mapping to the same traffic class.
1250 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
1254 dcb_fwd_config_setup(void)
1256 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
1257 portid_t txp, rxp = 0;
1258 queueid_t txq, rxq = 0;
1260 uint16_t nb_rx_queue, nb_tx_queue;
1261 uint16_t i, j, k, sm_id = 0;
1264 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1265 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1266 cur_fwd_config.nb_fwd_streams =
1267 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1269 /* reinitialize forwarding streams */
1272 if ((rxp & 0x1) == 0)
1273 txp = (portid_t) (rxp + 1);
1275 txp = (portid_t) (rxp - 1);
1276 /* get the dcb info on the first RX and TX ports */
1277 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
1278 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
1280 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1281 fwd_lcores[lc_id]->stream_nb = 0;
1282 fwd_lcores[lc_id]->stream_idx = sm_id;
1283 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
1284 /* if the nb_queue is zero, means this tc is
1285 * not enabled on the POOL
1287 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
1289 k = fwd_lcores[lc_id]->stream_nb +
1290 fwd_lcores[lc_id]->stream_idx;
1291 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
1292 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
1293 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
1294 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
1295 for (j = 0; j < nb_rx_queue; j++) {
1296 struct fwd_stream *fs;
1298 fs = fwd_streams[k + j];
1299 fs->rx_port = fwd_ports_ids[rxp];
1300 fs->rx_queue = rxq + j;
1301 fs->tx_port = fwd_ports_ids[txp];
1302 fs->tx_queue = txq + j % nb_tx_queue;
1303 fs->peer_addr = fs->tx_port;
1305 fwd_lcores[lc_id]->stream_nb +=
1306 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
1308 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
1311 if (tc < rxp_dcb_info.nb_tcs)
1313 /* Restart from TC 0 on next RX port */
1315 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1317 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
1320 if (rxp >= nb_fwd_ports)
1322 /* get the dcb information on next RX and TX ports */
1323 if ((rxp & 0x1) == 0)
1324 txp = (portid_t) (rxp + 1);
1326 txp = (portid_t) (rxp - 1);
1327 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
1328 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
1333 icmp_echo_config_setup(void)
1340 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
1341 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
1342 (nb_txq * nb_fwd_ports);
1344 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1345 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1346 cur_fwd_config.nb_fwd_streams =
1347 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1348 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1349 cur_fwd_config.nb_fwd_lcores =
1350 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
1351 if (verbose_level > 0) {
1352 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
1354 cur_fwd_config.nb_fwd_lcores,
1355 cur_fwd_config.nb_fwd_ports,
1356 cur_fwd_config.nb_fwd_streams);
1359 /* reinitialize forwarding streams */
1361 setup_fwd_config_of_each_lcore(&cur_fwd_config);
1363 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1364 if (verbose_level > 0)
1365 printf(" core=%d: \n", lc_id);
1366 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1367 struct fwd_stream *fs;
1368 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1369 fs->rx_port = fwd_ports_ids[rxp];
1371 fs->tx_port = fs->rx_port;
1373 fs->peer_addr = fs->tx_port;
1374 if (verbose_level > 0)
1375 printf(" stream=%d port=%d rxq=%d txq=%d\n",
1376 sm_id, fs->rx_port, fs->rx_queue,
1378 rxq = (queueid_t) (rxq + 1);
1379 if (rxq == nb_rxq) {
1381 rxp = (portid_t) (rxp + 1);
1388 fwd_config_setup(void)
1390 cur_fwd_config.fwd_eng = cur_fwd_eng;
1391 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
1392 icmp_echo_config_setup();
1395 if ((nb_rxq > 1) && (nb_txq > 1)){
1397 dcb_fwd_config_setup();
1399 rss_fwd_config_setup();
1402 simple_fwd_config_setup();
1406 pkt_fwd_config_display(struct fwd_config *cfg)
1408 struct fwd_stream *fs;
1412 printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - "
1413 "NUMA support %s, MP over anonymous pages %s\n",
1414 cfg->fwd_eng->fwd_mode_name,
1415 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
1416 numa_support == 1 ? "enabled" : "disabled",
1417 mp_anon != 0 ? "enabled" : "disabled");
1419 if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0)
1420 printf("TX retry num: %u, delay between TX retries: %uus\n",
1421 burst_tx_retry_num, burst_tx_delay_time);
1422 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
1423 printf("Logical Core %u (socket %u) forwards packets on "
1425 fwd_lcores_cpuids[lc_id],
1426 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
1427 fwd_lcores[lc_id]->stream_nb);
1428 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1429 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1430 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
1431 "P=%d/Q=%d (socket %u) ",
1432 fs->rx_port, fs->rx_queue,
1433 ports[fs->rx_port].socket_id,
1434 fs->tx_port, fs->tx_queue,
1435 ports[fs->tx_port].socket_id);
1436 print_ethaddr("peer=",
1437 &peer_eth_addrs[fs->peer_addr]);
1446 fwd_config_display(void)
1449 pkt_fwd_config_display(&cur_fwd_config);
1453 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
1456 unsigned int lcore_cpuid;
1461 for (i = 0; i < nb_lc; i++) {
1462 lcore_cpuid = lcorelist[i];
1463 if (! rte_lcore_is_enabled(lcore_cpuid)) {
1464 printf("lcore %u not enabled\n", lcore_cpuid);
1467 if (lcore_cpuid == rte_get_master_lcore()) {
1468 printf("lcore %u cannot be masked on for running "
1469 "packet forwarding, which is the master lcore "
1470 "and reserved for command line parsing only\n",
1475 fwd_lcores_cpuids[i] = lcore_cpuid;
1477 if (record_now == 0) {
1481 nb_cfg_lcores = (lcoreid_t) nb_lc;
1482 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
1483 printf("previous number of forwarding cores %u - changed to "
1484 "number of configured cores %u\n",
1485 (unsigned int) nb_fwd_lcores, nb_lc);
1486 nb_fwd_lcores = (lcoreid_t) nb_lc;
1493 set_fwd_lcores_mask(uint64_t lcoremask)
1495 unsigned int lcorelist[64];
1499 if (lcoremask == 0) {
1500 printf("Invalid NULL mask of cores\n");
1504 for (i = 0; i < 64; i++) {
1505 if (! ((uint64_t)(1ULL << i) & lcoremask))
1507 lcorelist[nb_lc++] = i;
1509 return set_fwd_lcores_list(lcorelist, nb_lc);
1513 set_fwd_lcores_number(uint16_t nb_lc)
1515 if (nb_lc > nb_cfg_lcores) {
1516 printf("nb fwd cores %u > %u (max. number of configured "
1517 "lcores) - ignored\n",
1518 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
1521 nb_fwd_lcores = (lcoreid_t) nb_lc;
1522 printf("Number of forwarding cores set to %u\n",
1523 (unsigned int) nb_fwd_lcores);
1527 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
1535 for (i = 0; i < nb_pt; i++) {
1536 port_id = (portid_t) portlist[i];
1537 if (port_id_is_invalid(port_id, ENABLED_WARN))
1540 fwd_ports_ids[i] = port_id;
1542 if (record_now == 0) {
1546 nb_cfg_ports = (portid_t) nb_pt;
1547 if (nb_fwd_ports != (portid_t) nb_pt) {
1548 printf("previous number of forwarding ports %u - changed to "
1549 "number of configured ports %u\n",
1550 (unsigned int) nb_fwd_ports, nb_pt);
1551 nb_fwd_ports = (portid_t) nb_pt;
1556 set_fwd_ports_mask(uint64_t portmask)
1558 unsigned int portlist[64];
1562 if (portmask == 0) {
1563 printf("Invalid NULL mask of ports\n");
1567 for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) {
1568 if (! ((uint64_t)(1ULL << i) & portmask))
1570 portlist[nb_pt++] = i;
1572 set_fwd_ports_list(portlist, nb_pt);
1576 set_fwd_ports_number(uint16_t nb_pt)
1578 if (nb_pt > nb_cfg_ports) {
1579 printf("nb fwd ports %u > %u (number of configured "
1580 "ports) - ignored\n",
1581 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
1584 nb_fwd_ports = (portid_t) nb_pt;
1585 printf("Number of forwarding ports set to %u\n",
1586 (unsigned int) nb_fwd_ports);
1590 set_nb_pkt_per_burst(uint16_t nb)
1592 if (nb > MAX_PKT_BURST) {
1593 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
1595 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
1598 nb_pkt_per_burst = nb;
1599 printf("Number of packets per burst set to %u\n",
1600 (unsigned int) nb_pkt_per_burst);
1604 tx_split_get_name(enum tx_pkt_split split)
1608 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
1609 if (tx_split_name[i].split == split)
1610 return tx_split_name[i].name;
1616 set_tx_pkt_split(const char *name)
1620 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
1621 if (strcmp(tx_split_name[i].name, name) == 0) {
1622 tx_pkt_split = tx_split_name[i].split;
1626 printf("unknown value: \"%s\"\n", name);
1630 show_tx_pkt_segments(void)
1636 split = tx_split_get_name(tx_pkt_split);
1638 printf("Number of segments: %u\n", n);
1639 printf("Segment sizes: ");
1640 for (i = 0; i != n - 1; i++)
1641 printf("%hu,", tx_pkt_seg_lengths[i]);
1642 printf("%hu\n", tx_pkt_seg_lengths[i]);
1643 printf("Split packet: %s\n", split);
1647 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
1649 uint16_t tx_pkt_len;
1652 if (nb_segs >= (unsigned) nb_txd) {
1653 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
1654 nb_segs, (unsigned int) nb_txd);
1659 * Check that each segment length is greater or equal than
1660 * the mbuf data sise.
1661 * Check also that the total packet length is greater or equal than the
1662 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
1665 for (i = 0; i < nb_segs; i++) {
1666 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
1667 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
1668 i, seg_lengths[i], (unsigned) mbuf_data_size);
1671 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
1673 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
1674 printf("total packet length=%u < %d - give up\n",
1675 (unsigned) tx_pkt_len,
1676 (int)(sizeof(struct ether_hdr) + 20 + 8));
1680 for (i = 0; i < nb_segs; i++)
1681 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
1683 tx_pkt_length = tx_pkt_len;
1684 tx_pkt_nb_segs = (uint8_t) nb_segs;
1688 list_pkt_forwarding_modes(void)
1690 static char fwd_modes[128] = "";
1691 const char *separator = "|";
1692 struct fwd_engine *fwd_eng;
1695 if (strlen (fwd_modes) == 0) {
1696 while ((fwd_eng = fwd_engines[i++]) != NULL) {
1697 strcat(fwd_modes, fwd_eng->fwd_mode_name);
1698 strcat(fwd_modes, separator);
1700 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
1707 set_pkt_forwarding_mode(const char *fwd_mode_name)
1709 struct fwd_engine *fwd_eng;
1713 while ((fwd_eng = fwd_engines[i]) != NULL) {
1714 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
1715 printf("Set %s packet forwarding mode\n",
1717 cur_fwd_eng = fwd_eng;
1722 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
1726 set_verbose_level(uint16_t vb_level)
1728 printf("Change verbose level from %u to %u\n",
1729 (unsigned int) verbose_level, (unsigned int) vb_level);
1730 verbose_level = vb_level;
1734 vlan_extend_set(portid_t port_id, int on)
1739 if (port_id_is_invalid(port_id, ENABLED_WARN))
1742 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1745 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
1747 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
1749 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1751 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
1752 "diag=%d\n", port_id, on, diag);
1756 rx_vlan_strip_set(portid_t port_id, int on)
1761 if (port_id_is_invalid(port_id, ENABLED_WARN))
1764 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1767 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
1769 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
1771 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1773 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
1774 "diag=%d\n", port_id, on, diag);
1778 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
1782 if (port_id_is_invalid(port_id, ENABLED_WARN))
1785 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
1787 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
1788 "diag=%d\n", port_id, queue_id, on, diag);
1792 rx_vlan_filter_set(portid_t port_id, int on)
1797 if (port_id_is_invalid(port_id, ENABLED_WARN))
1800 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1803 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1805 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
1807 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1809 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
1810 "diag=%d\n", port_id, on, diag);
1814 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
1818 if (port_id_is_invalid(port_id, ENABLED_WARN))
1820 if (vlan_id_is_invalid(vlan_id))
1822 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1825 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
1827 port_id, vlan_id, on, diag);
1832 rx_vlan_all_filter_set(portid_t port_id, int on)
1836 if (port_id_is_invalid(port_id, ENABLED_WARN))
1838 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
1839 if (rx_vft_set(port_id, vlan_id, on))
1845 vlan_tpid_set(portid_t port_id, uint16_t tp_id)
1848 if (port_id_is_invalid(port_id, ENABLED_WARN))
1851 diag = rte_eth_dev_set_vlan_ether_type(port_id, tp_id);
1855 printf("tx_vlan_tpid_set(port_pi=%d, tpid=%d) failed "
1857 port_id, tp_id, diag);
1861 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
1863 if (port_id_is_invalid(port_id, ENABLED_WARN))
1865 if (vlan_id_is_invalid(vlan_id))
1867 tx_vlan_reset(port_id);
1868 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN;
1869 ports[port_id].tx_vlan_id = vlan_id;
1873 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
1875 if (port_id_is_invalid(port_id, ENABLED_WARN))
1877 if (vlan_id_is_invalid(vlan_id))
1879 if (vlan_id_is_invalid(vlan_id_outer))
1881 tx_vlan_reset(port_id);
1882 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ;
1883 ports[port_id].tx_vlan_id = vlan_id;
1884 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
1888 tx_vlan_reset(portid_t port_id)
1890 if (port_id_is_invalid(port_id, ENABLED_WARN))
1892 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN |
1893 TESTPMD_TX_OFFLOAD_INSERT_QINQ);
1894 ports[port_id].tx_vlan_id = 0;
1895 ports[port_id].tx_vlan_id_outer = 0;
1899 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
1901 if (port_id_is_invalid(port_id, ENABLED_WARN))
1904 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
1908 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
1911 uint8_t existing_mapping_found = 0;
1913 if (port_id_is_invalid(port_id, ENABLED_WARN))
1916 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
1919 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1920 printf("map_value not in required range 0..%d\n",
1921 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1925 if (!is_rx) { /*then tx*/
1926 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1927 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1928 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
1929 tx_queue_stats_mappings[i].stats_counter_id = map_value;
1930 existing_mapping_found = 1;
1934 if (!existing_mapping_found) { /* A new additional mapping... */
1935 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
1936 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
1937 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
1938 nb_tx_queue_stats_mappings++;
1942 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1943 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1944 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
1945 rx_queue_stats_mappings[i].stats_counter_id = map_value;
1946 existing_mapping_found = 1;
1950 if (!existing_mapping_found) { /* A new additional mapping... */
1951 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
1952 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
1953 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
1954 nb_rx_queue_stats_mappings++;
1960 print_fdir_mask(struct rte_eth_fdir_masks *mask)
1962 printf("\n vlan_tci: 0x%04x, ", mask->vlan_tci_mask);
1964 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
1965 printf("mac_addr: 0x%02x", mask->mac_addr_byte_mask);
1966 else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
1967 printf("mac_addr: 0x%02x, tunnel_type: 0x%01x, tunnel_id: 0x%08x",
1968 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
1969 mask->tunnel_id_mask);
1971 printf("src_ipv4: 0x%08x, dst_ipv4: 0x%08x,"
1972 " src_port: 0x%04x, dst_port: 0x%04x",
1973 mask->ipv4_mask.src_ip, mask->ipv4_mask.dst_ip,
1974 mask->src_port_mask, mask->dst_port_mask);
1976 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x,"
1977 " dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
1978 mask->ipv6_mask.src_ip[0], mask->ipv6_mask.src_ip[1],
1979 mask->ipv6_mask.src_ip[2], mask->ipv6_mask.src_ip[3],
1980 mask->ipv6_mask.dst_ip[0], mask->ipv6_mask.dst_ip[1],
1981 mask->ipv6_mask.dst_ip[2], mask->ipv6_mask.dst_ip[3]);
1988 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
1990 struct rte_eth_flex_payload_cfg *cfg;
1993 for (i = 0; i < flex_conf->nb_payloads; i++) {
1994 cfg = &flex_conf->flex_set[i];
1995 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
1997 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
1998 printf("\n L2_PAYLOAD: ");
1999 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
2000 printf("\n L3_PAYLOAD: ");
2001 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
2002 printf("\n L4_PAYLOAD: ");
2004 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
2005 for (j = 0; j < num; j++)
2006 printf(" %-5u", cfg->src_offset[j]);
2012 flowtype_to_str(uint16_t flow_type)
2014 struct flow_type_info {
2020 static struct flow_type_info flowtype_str_table[] = {
2021 {"raw", RTE_ETH_FLOW_RAW},
2022 {"ipv4", RTE_ETH_FLOW_IPV4},
2023 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
2024 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
2025 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
2026 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
2027 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
2028 {"ipv6", RTE_ETH_FLOW_IPV6},
2029 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
2030 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
2031 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
2032 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
2033 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
2034 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
2037 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
2038 if (flowtype_str_table[i].ftype == flow_type)
2039 return flowtype_str_table[i].str;
2046 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2048 struct rte_eth_fdir_flex_mask *mask;
2052 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
2053 mask = &flex_conf->flex_mask[i];
2054 p = flowtype_to_str(mask->flow_type);
2055 printf("\n %s:\t", p ? p : "unknown");
2056 for (j = 0; j < num; j++)
2057 printf(" %02x", mask->mask[j]);
2063 print_fdir_flow_type(uint32_t flow_types_mask)
2068 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
2069 if (!(flow_types_mask & (1 << i)))
2071 p = flowtype_to_str(i);
2081 fdir_get_infos(portid_t port_id)
2083 struct rte_eth_fdir_stats fdir_stat;
2084 struct rte_eth_fdir_info fdir_info;
2087 static const char *fdir_stats_border = "########################";
2089 if (port_id_is_invalid(port_id, ENABLED_WARN))
2091 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
2093 printf("\n FDIR is not supported on port %-2d\n",
2098 memset(&fdir_info, 0, sizeof(fdir_info));
2099 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
2100 RTE_ETH_FILTER_INFO, &fdir_info);
2101 memset(&fdir_stat, 0, sizeof(fdir_stat));
2102 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
2103 RTE_ETH_FILTER_STATS, &fdir_stat);
2104 printf("\n %s FDIR infos for port %-2d %s\n",
2105 fdir_stats_border, port_id, fdir_stats_border);
2107 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
2108 printf(" PERFECT\n");
2109 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
2110 printf(" PERFECT-MAC-VLAN\n");
2111 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2112 printf(" PERFECT-TUNNEL\n");
2113 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
2114 printf(" SIGNATURE\n");
2116 printf(" DISABLE\n");
2117 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
2118 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
2119 printf(" SUPPORTED FLOW TYPE: ");
2120 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
2122 printf(" FLEX PAYLOAD INFO:\n");
2123 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
2124 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
2125 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
2126 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
2127 fdir_info.flex_payload_unit,
2128 fdir_info.max_flex_payload_segment_num,
2129 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
2131 print_fdir_mask(&fdir_info.mask);
2132 if (fdir_info.flex_conf.nb_payloads > 0) {
2133 printf(" FLEX PAYLOAD SRC OFFSET:");
2134 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
2136 if (fdir_info.flex_conf.nb_flexmasks > 0) {
2137 printf(" FLEX MASK CFG:");
2138 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
2140 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
2141 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
2142 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
2143 fdir_info.guarant_spc, fdir_info.best_spc);
2144 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
2145 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
2146 " add: %-10"PRIu64" remove: %"PRIu64"\n"
2147 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
2148 fdir_stat.collision, fdir_stat.free,
2149 fdir_stat.maxhash, fdir_stat.maxlen,
2150 fdir_stat.add, fdir_stat.remove,
2151 fdir_stat.f_add, fdir_stat.f_remove);
2152 printf(" %s############################%s\n",
2153 fdir_stats_border, fdir_stats_border);
2157 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
2159 struct rte_port *port;
2160 struct rte_eth_fdir_flex_conf *flex_conf;
2163 port = &ports[port_id];
2164 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
2165 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
2166 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
2171 if (i >= RTE_ETH_FLOW_MAX) {
2172 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
2173 idx = flex_conf->nb_flexmasks;
2174 flex_conf->nb_flexmasks++;
2176 printf("The flex mask table is full. Can not set flex"
2177 " mask for flow_type(%u).", cfg->flow_type);
2181 (void)rte_memcpy(&flex_conf->flex_mask[idx],
2183 sizeof(struct rte_eth_fdir_flex_mask));
2187 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
2189 struct rte_port *port;
2190 struct rte_eth_fdir_flex_conf *flex_conf;
2193 port = &ports[port_id];
2194 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
2195 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
2196 if (cfg->type == flex_conf->flex_set[i].type) {
2201 if (i >= RTE_ETH_PAYLOAD_MAX) {
2202 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
2203 idx = flex_conf->nb_payloads;
2204 flex_conf->nb_payloads++;
2206 printf("The flex payload table is full. Can not set"
2207 " flex payload for type(%u).", cfg->type);
2211 (void)rte_memcpy(&flex_conf->flex_set[idx],
2213 sizeof(struct rte_eth_flex_payload_cfg));
2218 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
2222 if (port_id_is_invalid(port_id, ENABLED_WARN))
2225 diag = rte_eth_dev_set_vf_rx(port_id,vf,on);
2227 diag = rte_eth_dev_set_vf_tx(port_id,vf,on);
2231 printf("rte_eth_dev_set_vf_rx for port_id=%d failed "
2232 "diag=%d\n", port_id, diag);
2234 printf("rte_eth_dev_set_vf_tx for port_id=%d failed "
2235 "diag=%d\n", port_id, diag);
2240 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on)
2244 if (port_id_is_invalid(port_id, ENABLED_WARN))
2246 if (vlan_id_is_invalid(vlan_id))
2248 diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on);
2251 printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed "
2252 "diag=%d\n", port_id, diag);
2256 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
2259 struct rte_eth_link link;
2261 if (port_id_is_invalid(port_id, ENABLED_WARN))
2263 rte_eth_link_get_nowait(port_id, &link);
2264 if (rate > link.link_speed) {
2265 printf("Invalid rate value:%u bigger than link speed: %u\n",
2266 rate, link.link_speed);
2269 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
2272 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
2278 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
2281 struct rte_eth_link link;
2286 if (port_id_is_invalid(port_id, ENABLED_WARN))
2288 rte_eth_link_get_nowait(port_id, &link);
2289 if (rate > link.link_speed) {
2290 printf("Invalid rate value:%u bigger than link speed: %u\n",
2291 rate, link.link_speed);
2294 diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk);
2297 printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n",
2303 * Functions to manage the set of filtered Multicast MAC addresses.
2305 * A pool of filtered multicast MAC addresses is associated with each port.
2306 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
2307 * The address of the pool and the number of valid multicast MAC addresses
2308 * recorded in the pool are stored in the fields "mc_addr_pool" and
2309 * "mc_addr_nb" of the "rte_port" data structure.
2311 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
2312 * to be supplied a contiguous array of multicast MAC addresses.
2313 * To comply with this constraint, the set of multicast addresses recorded
2314 * into the pool are systematically compacted at the beginning of the pool.
2315 * Hence, when a multicast address is removed from the pool, all following
2316 * addresses, if any, are copied back to keep the set contiguous.
2318 #define MCAST_POOL_INC 32
2321 mcast_addr_pool_extend(struct rte_port *port)
2323 struct ether_addr *mc_pool;
2324 size_t mc_pool_size;
2327 * If a free entry is available at the end of the pool, just
2328 * increment the number of recorded multicast addresses.
2330 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
2336 * [re]allocate a pool with MCAST_POOL_INC more entries.
2337 * The previous test guarantees that port->mc_addr_nb is a multiple
2338 * of MCAST_POOL_INC.
2340 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb +
2342 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool,
2344 if (mc_pool == NULL) {
2345 printf("allocation of pool of %u multicast addresses failed\n",
2346 port->mc_addr_nb + MCAST_POOL_INC);
2350 port->mc_addr_pool = mc_pool;
2357 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
2360 if (addr_idx == port->mc_addr_nb) {
2361 /* No need to recompact the set of multicast addressses. */
2362 if (port->mc_addr_nb == 0) {
2363 /* free the pool of multicast addresses. */
2364 free(port->mc_addr_pool);
2365 port->mc_addr_pool = NULL;
2369 memmove(&port->mc_addr_pool[addr_idx],
2370 &port->mc_addr_pool[addr_idx + 1],
2371 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx));
2375 eth_port_multicast_addr_list_set(uint8_t port_id)
2377 struct rte_port *port;
2380 port = &ports[port_id];
2381 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
2385 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
2386 port->mc_addr_nb, port_id, -diag);
2390 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr)
2392 struct rte_port *port;
2395 if (port_id_is_invalid(port_id, ENABLED_WARN))
2398 port = &ports[port_id];
2401 * Check that the added multicast MAC address is not already recorded
2402 * in the pool of multicast addresses.
2404 for (i = 0; i < port->mc_addr_nb; i++) {
2405 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
2406 printf("multicast address already filtered by port\n");
2411 if (mcast_addr_pool_extend(port) != 0)
2413 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
2414 eth_port_multicast_addr_list_set(port_id);
2418 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr)
2420 struct rte_port *port;
2423 if (port_id_is_invalid(port_id, ENABLED_WARN))
2426 port = &ports[port_id];
2429 * Search the pool of multicast MAC addresses for the removed address.
2431 for (i = 0; i < port->mc_addr_nb; i++) {
2432 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
2435 if (i == port->mc_addr_nb) {
2436 printf("multicast address not filtered by port %d\n", port_id);
2440 mcast_addr_pool_remove(port, i);
2441 eth_port_multicast_addr_list_set(port_id);
2445 port_dcb_info_display(uint8_t port_id)
2447 struct rte_eth_dcb_info dcb_info;
2450 static const char *border = "================";
2452 if (port_id_is_invalid(port_id, ENABLED_WARN))
2455 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
2457 printf("\n Failed to get dcb infos on port %-2d\n",
2461 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
2462 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
2464 for (i = 0; i < dcb_info.nb_tcs; i++)
2466 printf("\n Priority : ");
2467 for (i = 0; i < dcb_info.nb_tcs; i++)
2468 printf("\t%4d", dcb_info.prio_tc[i]);
2469 printf("\n BW percent :");
2470 for (i = 0; i < dcb_info.nb_tcs; i++)
2471 printf("\t%4d%%", dcb_info.tc_bws[i]);
2472 printf("\n RXQ base : ");
2473 for (i = 0; i < dcb_info.nb_tcs; i++)
2474 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
2475 printf("\n RXQ number :");
2476 for (i = 0; i < dcb_info.nb_tcs; i++)
2477 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
2478 printf("\n TXQ base : ");
2479 for (i = 0; i < dcb_info.nb_tcs; i++)
2480 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
2481 printf("\n TXQ number :");
2482 for (i = 0; i < dcb_info.nb_tcs; i++)
2483 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);