#include <termios.h>
#include <unistd.h>
#include <inttypes.h>
-#ifndef __linux__
-#ifndef __FreeBSD__
-#include <net/socket.h>
-#else
+#ifdef __FreeBSD__
#include <sys/socket.h>
#endif
-#endif
#include <netinet/in.h>
#include <sys/queue.h>
" Show Tx metadata value set"
" for a specific port\n\n"
+ "show port (port_id) ptypes\n"
+ " Show port supported ptypes"
+ " for a specific port\n\n"
+
"show device info (<identifier>|all)"
" Show general information about devices probed.\n\n"
);
"set vf broadcast (port_id) (vf_id) (on|off)\n"
" Set VF broadcast for a VF from the PF.\n\n"
- "vlan set strip (on|off) (port_id)\n"
- " Set the VLAN strip on a port.\n\n"
-
"vlan set stripq (on|off) (port_id,queue_id)\n"
" Set the VLAN strip for a queue on a port.\n\n"
"set tc tx min-bandwidth (port_id) (bw1, bw2, ...)\n"
" Set all TCs' min bandwidth(%%) for all PF and VFs.\n\n"
- "vlan set filter (on|off) (port_id)\n"
- " Set the VLAN filter on a port.\n\n"
-
- "vlan set qinq (on|off) (port_id)\n"
- " Set the VLAN QinQ (extended queue in queue)"
- " on a port.\n\n"
+ "vlan set (strip|filter|qinq_strip|extend) (on|off) (port_id)\n"
+ " Set the VLAN strip or filter or qinq strip or extend\n\n"
"vlan set (inner|outer) tpid (value) (port_id)\n"
" Set the VLAN TPID for Packet Filtering on"
"port close (port_id|all)\n"
" Close all ports or port_id.\n\n"
+ "port reset (port_id|all)\n"
+ " Reset all ports or port_id.\n\n"
+
"port attach (ident)\n"
" Attach physical or virtual dev by pci address or virtual device name\n\n"
"port config <port_id> rx_offload vlan_strip|"
"ipv4_cksum|udp_cksum|tcp_cksum|tcp_lro|qinq_strip|"
"outer_ipv4_cksum|macsec_strip|header_split|"
- "vlan_filter|vlan_extend|jumbo_frame|crc_strip|"
+ "vlan_filter|vlan_extend|jumbo_frame|"
"scatter|timestamp|security|keep_crc on|off\n"
" Enable or disable a per port Rx offloading"
" on all Rx queues of a port\n\n"
"port (port_id) rxq (queue_id) rx_offload vlan_strip|"
"ipv4_cksum|udp_cksum|tcp_cksum|tcp_lro|qinq_strip|"
"outer_ipv4_cksum|macsec_strip|header_split|"
- "vlan_filter|vlan_extend|jumbo_frame|crc_strip|"
+ "vlan_filter|vlan_extend|jumbo_frame|"
"scatter|timestamp|security|keep_crc on|off\n"
" Enable or disable a per queue Rx offloading"
" only on a specific Rx queue\n\n"
"get_hash_global_config (port_id)\n"
" Get the global configurations of hash filters.\n\n"
- "set_hash_global_config (port_id) (toeplitz|simple_xor|default)"
+ "set_hash_global_config (port_id) (toeplitz|simple_xor|symmetric_toeplitz|default)"
" (ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|"
"ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload)"
" (enable|disable)\n"
int all_updated = 1;
int diag;
uint16_t i;
+ int ret;
if (!strcmp(res->value, "all"))
rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP |
rss_conf.rss_hf = ETH_RSS_GENEVE;
else if (!strcmp(res->value, "nvgre"))
rss_conf.rss_hf = ETH_RSS_NVGRE;
+ else if (!strcmp(res->value, "l3-src-only"))
+ rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY;
+ else if (!strcmp(res->value, "l3-dst-only"))
+ rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY;
+ else if (!strcmp(res->value, "l4-src-only"))
+ rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
+ else if (!strcmp(res->value, "l4-dst-only"))
+ rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
else if (!strcmp(res->value, "none"))
rss_conf.rss_hf = 0;
else if (!strcmp(res->value, "default"))
RTE_ETH_FOREACH_DEV(i) {
struct rte_eth_rss_conf local_rss_conf;
- rte_eth_dev_info_get(i, &dev_info);
+ ret = eth_dev_info_get_print_err(i, &dev_info);
+ if (ret != 0)
+ return;
+
if (use_default)
rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
struct rte_eth_dev_info dev_info;
uint8_t hash_key_size;
uint32_t key_len;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
- rte_eth_dev_info_get(res->port_id, &dev_info);
if (dev_info.hash_key_size > 0 &&
dev_info.hash_key_size <= sizeof(hash_key))
hash_key_size = dev_info.hash_key_size;
"ipv4#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#"
"ipv4-other#ipv6#ipv6-frag#ipv6-tcp#ipv6-udp#"
"ipv6-sctp#ipv6-other#l2-payload#ipv6-ex#"
- "ipv6-tcp-ex#ipv6-udp-ex");
+ "ipv6-tcp-ex#ipv6-udp-ex#"
+ "l3-src-only#l3-dst-only#l4-src-only#l4-dst-only");
cmdline_parse_token_string_t cmd_config_rss_hash_key_value =
TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, key, NULL);
.help_str = "port config <port_id> rss-hash-key "
"ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
"ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
- "l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex "
+ "l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex|"
+ "l3-src-only|l3-dst-only|l4-src-only|l4-dst-only "
"<string of hex digits (variable length, NIC dependent)>",
.tokens = {
(void *)&cmd_config_rss_hash_key_port,
struct rte_eth_rss_reta_entry64 reta_conf[8];
struct cmd_config_rss_reta *res = parsed_result;
- rte_eth_dev_info_get(res->port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (dev_info.reta_size == 0) {
printf("Redirection table size is 0 which is "
"invalid for RSS\n");
struct rte_eth_rss_reta_entry64 reta_conf[8];
struct rte_eth_dev_info dev_info;
uint16_t max_reta_size;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
- rte_eth_dev_info_get(res->port_id, &dev_info);
max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
if (res->size == 0 || res->size > max_reta_size) {
printf("Invalid redirection table size: %u (1-%u)\n",
struct cmd_config_burst *res = parsed_result;
struct rte_eth_dev_info dev_info;
uint16_t rec_nb_pkts;
+ int ret;
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
* size for all ports, so assume all ports are the same
* NIC model and use the values from Port 0.
*/
- rte_eth_dev_info_get(0, &dev_info);
+ ret = eth_dev_info_get_print_err(0, &dev_info);
+ if (ret != 0)
+ return;
+
rec_nb_pkts = dev_info.default_rxportconf.burst_size;
if (rec_nb_pkts == 0) {
}
else if (!strcmp(res->what, "filter"))
rx_vlan_filter_set(port_id, on);
+ else if (!strcmp(res->what, "qinq_strip"))
+ rx_vlan_qinq_strip_set(port_id, on);
else
vlan_extend_set(port_id, on);
set, "set");
cmdline_parse_token_string_t cmd_vlan_offload_what =
TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result,
- what, "strip#filter#qinq#stripq");
+ what, "strip#filter#qinq_strip#extend#stripq");
cmdline_parse_token_string_t cmd_vlan_offload_on =
TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result,
on, "on#off");
cmdline_parse_inst_t cmd_vlan_offload = {
.f = cmd_vlan_offload_parsed,
.data = NULL,
- .help_str = "vlan set strip|filter|qinq|stripq on|off "
+ .help_str = "vlan set strip|filter|qinq_strip|extend|stripq on|off "
"<port_id[,queue_id]>: "
- "Filter/Strip for rx side qinq(extended) for both rx/tx sides",
+ "Strip/Filter/QinQ for rx side Extend for both rx/tx sides",
.tokens = {
(void *)&cmd_vlan_offload_vlan,
(void *)&cmd_vlan_offload_set,
{
struct rte_eth_dev_info dev_info;
uint64_t tx_offloads;
+ int ret;
tx_offloads = ports[port_id].dev_conf.txmode.offloads;
printf("Parse tunnel is %s\n",
(tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
/* display warnings if configuration is not supported by the NIC */
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
printf("Warning: hardware IP checksum enabled but not "
int hw = 0;
uint64_t csum_offloads = 0;
struct rte_eth_dev_info dev_info;
+ int ret;
if (port_id_is_invalid(res->port_id, ENABLED_WARN)) {
printf("invalid port %d\n", res->port_id);
return;
}
- rte_eth_dev_info_get(res->port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (!strcmp(res->mode, "set")) {
if (!strcmp(res->hwsw, "hw"))
{
struct cmd_tso_set_result *res = parsed_result;
struct rte_eth_dev_info dev_info;
+ int ret;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
if (!strcmp(res->mode, "set"))
ports[res->port_id].tso_segsz = res->tso_segsz;
- rte_eth_dev_info_get(res->port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if ((ports[res->port_id].tso_segsz != 0) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
printf("Error: TSO is not supported by port %d\n",
cmd_config_queue_tx_offloads(&ports[res->port_id]);
/* display warnings if configuration is not supported by the NIC */
- rte_eth_dev_info_get(res->port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if ((ports[res->port_id].tso_segsz != 0) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
printf("Warning: TSO enabled but not "
{
struct rte_eth_dev_info dev_info;
- rte_eth_dev_info_get(port_id, &dev_info);
+ if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
+ return dev_info;
+
if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
printf("Warning: VXLAN TUNNEL TSO not supported therefore "
"not enabled for port %d\n", port_id);
struct cmd_create_bonded_device_result *res = parsed_result;
char ethdev_name[RTE_ETH_NAME_MAX_LEN];
int port_id;
+ int ret;
if (test_done == 0) {
printf("Please stop forwarding first\n");
/* Update number of ports */
nb_ports = rte_eth_dev_count_avail();
reconfig(port_id, res->socket);
- rte_eth_promiscuous_enable(port_id);
+ ret = rte_eth_promiscuous_enable(port_id);
+ if (ret != 0)
+ printf("Failed to enable promiscuous mode for port %u: %s - ignore\n",
+ port_id, rte_strerror(-ret));
+
ports[port_id].need_setup = 0;
ports[port_id].port_status = RTE_PORT_STOPPED;
}
/* all ports */
if (allports) {
- RTE_ETH_FOREACH_DEV(i) {
- if (enable)
- rte_eth_promiscuous_enable(i);
- else
- rte_eth_promiscuous_disable(i);
- }
- }
- else {
- if (enable)
- rte_eth_promiscuous_enable(res->port_num);
- else
- rte_eth_promiscuous_disable(res->port_num);
+ RTE_ETH_FOREACH_DEV(i)
+ eth_set_promisc_mode(i, enable);
+ } else {
+ eth_set_promisc_mode(res->port_num, enable);
}
}
/* all ports */
if (allports) {
RTE_ETH_FOREACH_DEV(i) {
- if (enable)
- rte_eth_allmulticast_enable(i);
- else
- rte_eth_allmulticast_disable(i);
+ eth_set_allmulticast_mode(i, enable);
}
}
else {
- if (enable)
- rte_eth_allmulticast_enable(res->port_num);
- else
- rte_eth_allmulticast_disable(res->port_num);
+ eth_set_allmulticast_mode(res->port_num, enable);
}
}
else if (!strncmp(res->pf_vf, "vf", 2)) {
struct rte_eth_dev_info dev_info;
- rte_eth_dev_info_get(res->port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(res->port_id,
+ &dev_info);
+ if (ret != 0)
+ return;
+
errno = 0;
vf_id = strtoul(res->pf_vf + 2, &end, 10);
if (errno != 0 || *end != '\0' ||
case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
printf("Hash function is Simple XOR\n");
break;
+ case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
+ printf("Hash function is Symmetric Toeplitz\n");
+ break;
default:
printf("Unknown hash function\n");
break;
else if (!strcmp(res->hash_func, "simple_xor"))
info.info.global_conf.hash_func =
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+ else if (!strcmp(res->hash_func, "symmetric_toeplitz"))
+ info.info.global_conf.hash_func =
+ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
else if (!strcmp(res->hash_func, "default"))
info.info.global_conf.hash_func =
RTE_ETH_HASH_FUNCTION_DEFAULT;
port_id, UINT16);
cmdline_parse_token_string_t cmd_set_hash_global_config_hash_func =
TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
- hash_func, "toeplitz#simple_xor#default");
+ hash_func, "toeplitz#simple_xor#symmetric_toeplitz#default");
cmdline_parse_token_string_t cmd_set_hash_global_config_flow_type =
TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
flow_type,
.f = cmd_set_hash_global_config_parsed,
.data = NULL,
.help_str = "set_hash_global_config <port_id> "
- "toeplitz|simple_xor|default "
+ "toeplitz|simple_xor|symmetric_toeplitz|default "
"ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
"ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
"l2_payload enable|disable",
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_macsec_disable(port_id);
portid_t port_id = res->port_id;
uint64_t queue_offloads;
uint64_t port_offloads;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
- rte_eth_dev_info_get(port_id, &dev_info);
queue_offloads = dev_info.rx_queue_offload_capa;
port_offloads = dev_info.rx_offload_capa ^ queue_offloads;
uint64_t queue_offloads;
uint16_t nb_rx_queues;
int q;
+ int ret;
printf("Rx Offloading Configuration of port %d :\n", port_id);
print_rx_offloads(port_offloads);
printf("\n");
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
nb_rx_queues = dev_info.nb_rx_queues;
for (q = 0; q < nb_rx_queues; q++) {
queue_offloads = port->rx_conf[q].offloads;
offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#"
"qinq_strip#outer_ipv4_cksum#macsec_strip#"
"header_split#vlan_filter#vlan_extend#jumbo_frame#"
- "crc_strip#scatter#timestamp#security#keep_crc");
+ "scatter#timestamp#security#keep_crc");
cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_on_off =
TOKEN_STRING_INITIALIZER
(struct cmd_config_per_port_rx_offload_result,
uint64_t single_offload;
uint16_t nb_rx_queues;
int q;
+ int ret;
if (port->port_status != RTE_PORT_STOPPED) {
printf("Error: Can't config offload when Port %d "
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
nb_rx_queues = dev_info.nb_rx_queues;
if (!strcmp(res->on_off, "on")) {
port->dev_conf.rxmode.offloads |= single_offload;
.help_str = "port config <port_id> rx_offload vlan_strip|ipv4_cksum|"
"udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|"
"macsec_strip|header_split|vlan_filter|vlan_extend|"
- "jumbo_frame|crc_strip|scatter|timestamp|security|keep_crc "
+ "jumbo_frame|scatter|timestamp|security|keep_crc "
"on|off",
.tokens = {
(void *)&cmd_config_per_port_rx_offload_result_port,
offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#"
"qinq_strip#outer_ipv4_cksum#macsec_strip#"
"header_split#vlan_filter#vlan_extend#jumbo_frame#"
- "crc_strip#scatter#timestamp#security#keep_crc");
+ "scatter#timestamp#security#keep_crc");
cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_on_off =
TOKEN_STRING_INITIALIZER
(struct cmd_config_per_queue_rx_offload_result,
uint16_t queue_id = res->queue_id;
struct rte_port *port = &ports[port_id];
uint64_t single_offload;
+ int ret;
if (port->port_status != RTE_PORT_STOPPED) {
printf("Error: Can't config offload when Port %d "
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (queue_id >= dev_info.nb_rx_queues) {
printf("Error: input queue_id should be 0 ... "
"%d\n", dev_info.nb_rx_queues - 1);
"vlan_strip|ipv4_cksum|"
"udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|"
"macsec_strip|header_split|vlan_filter|vlan_extend|"
- "jumbo_frame|crc_strip|scatter|timestamp|security|keep_crc "
+ "jumbo_frame|scatter|timestamp|security|keep_crc "
"on|off",
.tokens = {
(void *)&cmd_config_per_queue_rx_offload_result_port,
portid_t port_id = res->port_id;
uint64_t queue_offloads;
uint64_t port_offloads;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
- rte_eth_dev_info_get(port_id, &dev_info);
queue_offloads = dev_info.tx_queue_offload_capa;
port_offloads = dev_info.tx_offload_capa ^ queue_offloads;
uint64_t queue_offloads;
uint16_t nb_tx_queues;
int q;
+ int ret;
printf("Tx Offloading Configuration of port %d :\n", port_id);
print_tx_offloads(port_offloads);
printf("\n");
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
nb_tx_queues = dev_info.nb_tx_queues;
for (q = 0; q < nb_tx_queues; q++) {
queue_offloads = port->tx_conf[q].offloads;
uint64_t single_offload;
uint16_t nb_tx_queues;
int q;
+ int ret;
if (port->port_status != RTE_PORT_STOPPED) {
printf("Error: Can't config offload when Port %d "
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
nb_tx_queues = dev_info.nb_tx_queues;
if (!strcmp(res->on_off, "on")) {
port->dev_conf.txmode.offloads |= single_offload;
uint16_t queue_id = res->queue_id;
struct rte_port *port = &ports[port_id];
uint64_t single_offload;
+ int ret;
if (port->port_status != RTE_PORT_STOPPED) {
printf("Error: Can't config offload when Port %d "
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (queue_id >= dev_info.nb_tx_queues) {
printf("Error: input queue_id should be 0 ... "
"%d\n", dev_info.nb_tx_queues - 1);
},
};
+/* show port supported ptypes */
+
+/* Common result structure for show port ptypes */
+struct cmd_show_port_supported_ptypes_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t ptypes;
+};
+
+/* Common CLI fields for show port ptypes */
+cmdline_parse_token_string_t cmd_show_port_supported_ptypes_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_show_port_supported_ptypes_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_show_port_supported_ptypes_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_show_port_supported_ptypes_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_show_port_supported_ptypes_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_show_port_supported_ptypes_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_show_port_supported_ptypes_ptypes =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_show_port_supported_ptypes_result,
+ ptypes, "ptypes");
+
+static void
+cmd_show_port_supported_ptypes_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+#define RSVD_PTYPE_MASK 0xf0000000
+#define MAX_PTYPES_PER_LAYER 16
+#define LTYPE_NAMESIZE 32
+#define PTYPE_NAMESIZE 256
+ struct cmd_show_port_supported_ptypes_result *res = parsed_result;
+ char buf[PTYPE_NAMESIZE], ltype[LTYPE_NAMESIZE];
+ uint32_t ptype_mask = RTE_PTYPE_L2_MASK;
+ uint32_t ptypes[MAX_PTYPES_PER_LAYER];
+ uint16_t port_id = res->port_id;
+ int ret, i;
+
+ ret = rte_eth_dev_get_supported_ptypes(port_id, ptype_mask, NULL, 0);
+ if (ret < 0)
+ return;
+
+ while (ptype_mask != RSVD_PTYPE_MASK) {
+
+ switch (ptype_mask) {
+ case RTE_PTYPE_L2_MASK:
+ strlcpy(ltype, "L2", sizeof(ltype));
+ break;
+ case RTE_PTYPE_L3_MASK:
+ strlcpy(ltype, "L3", sizeof(ltype));
+ break;
+ case RTE_PTYPE_L4_MASK:
+ strlcpy(ltype, "L4", sizeof(ltype));
+ break;
+ case RTE_PTYPE_TUNNEL_MASK:
+ strlcpy(ltype, "Tunnel", sizeof(ltype));
+ break;
+ case RTE_PTYPE_INNER_L2_MASK:
+ strlcpy(ltype, "Inner L2", sizeof(ltype));
+ break;
+ case RTE_PTYPE_INNER_L3_MASK:
+ strlcpy(ltype, "Inner L3", sizeof(ltype));
+ break;
+ case RTE_PTYPE_INNER_L4_MASK:
+ strlcpy(ltype, "Inner L4", sizeof(ltype));
+ break;
+ default:
+ return;
+ }
+
+ ret = rte_eth_dev_get_supported_ptypes(res->port_id,
+ ptype_mask, ptypes,
+ MAX_PTYPES_PER_LAYER);
+
+ if (ret > 0)
+ printf("Supported %s ptypes:\n", ltype);
+ else
+ printf("%s ptypes unsupported\n", ltype);
+
+ for (i = 0; i < ret; ++i) {
+ rte_get_ptype_name(ptypes[i], buf, sizeof(buf));
+ printf("%s\n", buf);
+ }
+
+ ptype_mask <<= 4;
+ }
+}
+
+cmdline_parse_inst_t cmd_show_port_supported_ptypes = {
+ .f = cmd_show_port_supported_ptypes_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> ptypes",
+ .tokens = {
+ (void *)&cmd_show_port_supported_ptypes_show,
+ (void *)&cmd_show_port_supported_ptypes_port,
+ (void *)&cmd_show_port_supported_ptypes_port_id,
+ (void *)&cmd_show_port_supported_ptypes_ptypes,
+ NULL,
+ },
+};
+
/* ******************************************************************************** */
/* list of instructions */
(cmdline_parse_inst_t *)&cmd_clear_input_set,
(cmdline_parse_inst_t *)&cmd_show_vf_stats,
(cmdline_parse_inst_t *)&cmd_clear_vf_stats,
+ (cmdline_parse_inst_t *)&cmd_show_port_supported_ptypes,
(cmdline_parse_inst_t *)&cmd_ptype_mapping_get,
(cmdline_parse_inst_t *)&cmd_ptype_mapping_replace,
(cmdline_parse_inst_t *)&cmd_ptype_mapping_reset,