int all_updated = 1;
int diag;
uint16_t i;
+ int ret;
if (!strcmp(res->value, "all"))
rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP |
RTE_ETH_FOREACH_DEV(i) {
struct rte_eth_rss_conf local_rss_conf;
- rte_eth_dev_info_get(i, &dev_info);
+ ret = eth_dev_info_get_print_err(i, &dev_info);
+ if (ret != 0)
+ return;
+
if (use_default)
rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
struct rte_eth_dev_info dev_info;
uint8_t hash_key_size;
uint32_t key_len;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
- rte_eth_dev_info_get(res->port_id, &dev_info);
if (dev_info.hash_key_size > 0 &&
dev_info.hash_key_size <= sizeof(hash_key))
hash_key_size = dev_info.hash_key_size;
struct rte_eth_rss_reta_entry64 reta_conf[8];
struct cmd_config_rss_reta *res = parsed_result;
- rte_eth_dev_info_get(res->port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (dev_info.reta_size == 0) {
printf("Redirection table size is 0 which is "
"invalid for RSS\n");
struct rte_eth_rss_reta_entry64 reta_conf[8];
struct rte_eth_dev_info dev_info;
uint16_t max_reta_size;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
- rte_eth_dev_info_get(res->port_id, &dev_info);
max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
if (res->size == 0 || res->size > max_reta_size) {
printf("Invalid redirection table size: %u (1-%u)\n",
struct cmd_config_burst *res = parsed_result;
struct rte_eth_dev_info dev_info;
uint16_t rec_nb_pkts;
+ int ret;
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
* size for all ports, so assume all ports are the same
* NIC model and use the values from Port 0.
*/
- rte_eth_dev_info_get(0, &dev_info);
+ ret = eth_dev_info_get_print_err(0, &dev_info);
+ if (ret != 0)
+ return;
+
rec_nb_pkts = dev_info.default_rxportconf.burst_size;
if (rec_nb_pkts == 0) {
{
struct rte_eth_dev_info dev_info;
uint64_t tx_offloads;
+ int ret;
tx_offloads = ports[port_id].dev_conf.txmode.offloads;
printf("Parse tunnel is %s\n",
(tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
/* display warnings if configuration is not supported by the NIC */
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
printf("Warning: hardware IP checksum enabled but not "
int hw = 0;
uint64_t csum_offloads = 0;
struct rte_eth_dev_info dev_info;
+ int ret;
if (port_id_is_invalid(res->port_id, ENABLED_WARN)) {
printf("invalid port %d\n", res->port_id);
return;
}
- rte_eth_dev_info_get(res->port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (!strcmp(res->mode, "set")) {
if (!strcmp(res->hwsw, "hw"))
{
struct cmd_tso_set_result *res = parsed_result;
struct rte_eth_dev_info dev_info;
+ int ret;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
if (!strcmp(res->mode, "set"))
ports[res->port_id].tso_segsz = res->tso_segsz;
- rte_eth_dev_info_get(res->port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if ((ports[res->port_id].tso_segsz != 0) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
printf("Error: TSO is not supported by port %d\n",
cmd_config_queue_tx_offloads(&ports[res->port_id]);
/* display warnings if configuration is not supported by the NIC */
- rte_eth_dev_info_get(res->port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if ((ports[res->port_id].tso_segsz != 0) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
printf("Warning: TSO enabled but not "
{
struct rte_eth_dev_info dev_info;
- rte_eth_dev_info_get(port_id, &dev_info);
+ if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
+ return dev_info;
+
if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
printf("Warning: VXLAN TUNNEL TSO not supported therefore "
"not enabled for port %d\n", port_id);
else if (!strncmp(res->pf_vf, "vf", 2)) {
struct rte_eth_dev_info dev_info;
- rte_eth_dev_info_get(res->port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(res->port_id,
+ &dev_info);
+ if (ret != 0)
+ return;
+
errno = 0;
vf_id = strtoul(res->pf_vf + 2, &end, 10);
if (errno != 0 || *end != '\0' ||
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_macsec_disable(port_id);
portid_t port_id = res->port_id;
uint64_t queue_offloads;
uint64_t port_offloads;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
- rte_eth_dev_info_get(port_id, &dev_info);
queue_offloads = dev_info.rx_queue_offload_capa;
port_offloads = dev_info.rx_offload_capa ^ queue_offloads;
uint64_t queue_offloads;
uint16_t nb_rx_queues;
int q;
+ int ret;
printf("Rx Offloading Configuration of port %d :\n", port_id);
print_rx_offloads(port_offloads);
printf("\n");
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
nb_rx_queues = dev_info.nb_rx_queues;
for (q = 0; q < nb_rx_queues; q++) {
queue_offloads = port->rx_conf[q].offloads;
uint64_t single_offload;
uint16_t nb_rx_queues;
int q;
+ int ret;
if (port->port_status != RTE_PORT_STOPPED) {
printf("Error: Can't config offload when Port %d "
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
nb_rx_queues = dev_info.nb_rx_queues;
if (!strcmp(res->on_off, "on")) {
port->dev_conf.rxmode.offloads |= single_offload;
uint16_t queue_id = res->queue_id;
struct rte_port *port = &ports[port_id];
uint64_t single_offload;
+ int ret;
if (port->port_status != RTE_PORT_STOPPED) {
printf("Error: Can't config offload when Port %d "
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (queue_id >= dev_info.nb_rx_queues) {
printf("Error: input queue_id should be 0 ... "
"%d\n", dev_info.nb_rx_queues - 1);
portid_t port_id = res->port_id;
uint64_t queue_offloads;
uint64_t port_offloads;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
- rte_eth_dev_info_get(port_id, &dev_info);
queue_offloads = dev_info.tx_queue_offload_capa;
port_offloads = dev_info.tx_offload_capa ^ queue_offloads;
uint64_t queue_offloads;
uint16_t nb_tx_queues;
int q;
+ int ret;
printf("Tx Offloading Configuration of port %d :\n", port_id);
print_tx_offloads(port_offloads);
printf("\n");
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
nb_tx_queues = dev_info.nb_tx_queues;
for (q = 0; q < nb_tx_queues; q++) {
queue_offloads = port->tx_conf[q].offloads;
uint64_t single_offload;
uint16_t nb_tx_queues;
int q;
+ int ret;
if (port->port_status != RTE_PORT_STOPPED) {
printf("Error: Can't config offload when Port %d "
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
nb_tx_queues = dev_info.nb_tx_queues;
if (!strcmp(res->on_off, "on")) {
port->dev_conf.txmode.offloads |= single_offload;
uint16_t queue_id = res->queue_id;
struct rte_port *port = &ports[port_id];
uint64_t single_offload;
+ int ret;
if (port->port_status != RTE_PORT_STOPPED) {
printf("Error: Can't config offload when Port %d "
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (queue_id >= dev_info.nb_tx_queues) {
printf("Error: input queue_id should be 0 ... "
"%d\n", dev_info.nb_tx_queues - 1);
if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
ctx->port != (portid_t)RTE_PORT_ALL) {
struct rte_eth_dev_info info;
+ int ret2;
+
+ ret2 = rte_eth_dev_info_get(ctx->port, &info);
+ if (ret2 != 0)
+ return ret2;
- rte_eth_dev_info_get(ctx->port, &info);
action_rss_data->conf.key_len =
RTE_MIN(sizeof(action_rss_data->key),
info.hash_key_size);
static const char *info_border = "*********************";
uint16_t mtu;
char name[RTE_ETH_NAME_MAX_LEN];
+ int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
print_valid_ports();
}
port = &ports[port_id];
rte_eth_link_get_nowait(port_id, &link);
- rte_eth_dev_info_get(port_id, &dev_info);
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
printf("\n%s Infos for port %-2d %s\n",
info_border, port_id, info_border);
rte_eth_macaddr_get(port_id, &mac_addr);
struct rte_eth_link link;
struct rte_eth_dev_info dev_info;
char name[RTE_ETH_NAME_MAX_LEN];
+ int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
print_valid_ports();
}
rte_eth_link_get_nowait(port_id, &link);
- rte_eth_dev_info_get(port_id, &dev_info);
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
rte_eth_dev_get_name_by_port(port_id, name);
rte_eth_macaddr_get(port_id, &mac_addr);
{
struct rte_eth_dev_info dev_info;
static const char *info_border = "************";
+ int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
printf("\n%s Port %d supported offload features: %s\n",
info_border, port_id, info_border);
{
int diag;
struct rte_eth_dev_info dev_info;
+ int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
- rte_eth_dev_info_get(port_id, &dev_info);
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
mtu, dev_info.min_mtu, dev_info.max_mtu);
#endif
uint16_t desc_id)
{
+ int ret;
struct igb_ring_desc_16_bytes *ring =
(struct igb_ring_desc_16_bytes *)ring_mz->addr;
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
struct rte_eth_dev_info dev_info;
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (strstr(dev_info.driver_name, "i40e") != NULL) {
/* 32 bytes RX descriptor, i40e only */
struct igb_ring_desc_32_bytes *ring =
int diag;
struct rte_eth_dev_info dev_info;
uint8_t hash_key_size;
+ int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if (dev_info.hash_key_size > 0 &&
dev_info.hash_key_size <= sizeof(rss_key))
hash_key_size = dev_info.hash_key_size;
{
struct rte_eth_dev_info dev_info;
uint16_t queue;
+ int ret;
if (port_id_is_invalid(portid, ENABLED_WARN))
return;
- rte_eth_dev_info_get(portid, &dev_info);
+ ret = eth_dev_info_get_print_err(portid, &dev_info);
+ if (ret != 0)
+ return;
+
for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
if (!ports[portid].rx_dump_cb[queue])
ports[portid].rx_dump_cb[queue] =
{
struct rte_eth_dev_info dev_info;
uint16_t queue;
+ int ret;
if (port_id_is_invalid(portid, ENABLED_WARN))
return;
- rte_eth_dev_info_get(portid, &dev_info);
+
+ ret = eth_dev_info_get_print_err(portid, &dev_info);
+ if (ret != 0)
+ return;
+
for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
if (!ports[portid].tx_dump_cb[queue])
ports[portid].tx_dump_cb[queue] =
{
struct rte_eth_dev_info dev_info;
uint16_t queue;
+ int ret;
if (port_id_is_invalid(portid, ENABLED_WARN))
return;
- rte_eth_dev_info_get(portid, &dev_info);
+
+ ret = eth_dev_info_get_print_err(portid, &dev_info);
+ if (ret != 0)
+ return;
+
for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
if (ports[portid].rx_dump_cb[queue]) {
rte_eth_remove_rx_callback(portid, queue,
{
struct rte_eth_dev_info dev_info;
uint16_t queue;
+ int ret;
if (port_id_is_invalid(portid, ENABLED_WARN))
return;
- rte_eth_dev_info_get(portid, &dev_info);
+
+ ret = eth_dev_info_get_print_err(portid, &dev_info);
+ if (ret != 0)
+ return;
+
for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
if (ports[portid].tx_dump_cb[queue]) {
rte_eth_remove_tx_callback(portid, queue,
tx_vlan_set(portid_t port_id, uint16_t vlan_id)
{
struct rte_eth_dev_info dev_info;
+ int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
printf("Error, as QinQ has been enabled.\n");
return;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
printf("Error: vlan insert is not supported by port %d\n",
port_id);
tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
{
struct rte_eth_dev_info dev_info;
+ int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
if (vlan_id_is_invalid(vlan_id_outer))
return;
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ return;
+
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
printf("Error: qinq insert not supported by port %d\n",
port_id);
uint64_t tx_offloads = tx_mode.offloads;
struct rte_eth_dev_info dev_info;
uint16_t rec_nb_pkts;
+ int ret;
static struct option lgopts[] = {
{ "help", 0, 0, 0 },
* value, on the assumption that all
* ports are of the same NIC model.
*/
- rte_eth_dev_info_get(0, &dev_info);
+ ret = eth_dev_info_get_print_err(
+ 0,
+ &dev_info);
+ if (ret != 0)
+ return;
+
rec_nb_pkts = dev_info
.default_rxportconf.burst_size;
get_allowed_max_nb_rxq(portid_t *pid)
{
queueid_t allowed_max_rxq = MAX_QUEUE_ID;
+ bool max_rxq_valid = false;
portid_t pi;
struct rte_eth_dev_info dev_info;
RTE_ETH_FOREACH_DEV(pi) {
- rte_eth_dev_info_get(pi, &dev_info);
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ max_rxq_valid = true;
if (dev_info.max_rx_queues < allowed_max_rxq) {
allowed_max_rxq = dev_info.max_rx_queues;
*pid = pi;
}
}
- return allowed_max_rxq;
+ return max_rxq_valid ? allowed_max_rxq : 0;
}
/*
get_allowed_max_nb_txq(portid_t *pid)
{
queueid_t allowed_max_txq = MAX_QUEUE_ID;
+ bool max_txq_valid = false;
portid_t pi;
struct rte_eth_dev_info dev_info;
RTE_ETH_FOREACH_DEV(pi) {
- rte_eth_dev_info_get(pi, &dev_info);
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ max_txq_valid = true;
if (dev_info.max_tx_queues < allowed_max_txq) {
allowed_max_txq = dev_info.max_tx_queues;
*pid = pi;
}
}
- return allowed_max_txq;
+ return max_txq_valid ? allowed_max_txq : 0;
}
/*
uint16_t data_size;
bool warning = 0;
int k;
+ int ret;
memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
/* Apply default TxRx configuration for all ports */
port->dev_conf.txmode = tx_mode;
port->dev_conf.rxmode = rx_mode;
- rte_eth_dev_info_get(pid, &port->dev_info);
+
+ ret = eth_dev_info_get_print_err(pid, &port->dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_info_get() failed\n");
if (!(port->dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_MBUF_FAST_FREE))
reconfig(portid_t new_port_id, unsigned socket_id)
{
struct rte_port *port;
+ int ret;
/* Reconfiguration of Ethernet ports. */
port = &ports[new_port_id];
- rte_eth_dev_info_get(new_port_id, &port->dev_info);
+
+ ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
+ if (ret != 0)
+ return;
/* set flag to initialize port/queue */
port->need_reconfig = 1;
{
portid_t pid;
struct rte_port *port;
+ int ret;
RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
port->dev_conf.fdir_conf = fdir_conf;
- rte_eth_dev_info_get(pid, &port->dev_info);
+
+ ret = eth_dev_info_get_print_err(pid, &port->dev_info);
+ if (ret != 0)
+ return;
+
if (nb_rxq > 1) {
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
if (retval < 0)
return retval;
- rte_eth_dev_info_get(pid, &rte_port->dev_info);
+
+ retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
+ if (retval != 0)
+ return retval;
/* If dev_info.vmdq_pool_base is greater than 0,
* the queue id of vmdq pools is started after pf queues.
void setup_gro_flush_cycles(uint8_t cycles);
void show_gro(portid_t port_id);
void setup_gso(const char *mode, portid_t port_id);
+int eth_dev_info_get_print_err(uint16_t port_id,
+ struct rte_eth_dev_info *dev_info);
+
/* Functions to manage the set of filtered Multicast MAC addresses */
void mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr);
{
struct rte_eth_dev_info dev_info;
uint16_t queue;
+ int ret;
if (port_id_is_invalid(portid, ENABLED_WARN))
return;
- rte_eth_dev_info_get(portid, &dev_info);
+
+ ret = eth_dev_info_get_print_err(portid, &dev_info);
+ if (ret != 0)
+ return;
+
for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
if (!ports[portid].tx_set_md_cb[queue])
ports[portid].tx_set_md_cb[queue] =
{
struct rte_eth_dev_info dev_info;
uint16_t queue;
+ int ret;
if (port_id_is_invalid(portid, ENABLED_WARN))
return;
- rte_eth_dev_info_get(portid, &dev_info);
+
+ ret = eth_dev_info_get_print_err(portid, &dev_info);
+ if (ret != 0)
+ return;
+
for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
if (ports[portid].tx_set_md_cb[queue]) {
rte_eth_remove_tx_callback(portid, queue,
ports[portid].tx_set_md_cb[queue] = NULL;
}
}
+
+int
+eth_dev_info_get_print_err(uint16_t port_id,
+ struct rte_eth_dev_info *dev_info)
+{
+ int ret;
+
+ ret = rte_eth_dev_info_get(port_id, dev_info);
+ if (ret != 0)
+ printf("Error during getting device (port %u) info: %s\n",
+ port_id, strerror(-ret));
+
+ return ret;
+}