dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
if (dev->data->dev_started) {
*/
memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ goto rollback;
/* If number of queues specified by application for both Rx and Tx is
* zero, use driver preferred values. This cannot be done individually
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
int diag;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
return 0;
}
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
/* Lets restore MAC now if device does not support live change */
if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
return -EINVAL;
}
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
/*
* This value must be provided in the private data of the memory pool.
* First check that the memory pool has a valid private data.
*/
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
mp->name, (int)mp->private_data_size,
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf local_conf;
void **txq;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
return -EINVAL;
}
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
/* Use default specified by driver, if nb_tx_desc is zero */
if (nb_tx_desc == 0) {
fw_version, fw_size));
}
-void
+int
rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
{
struct rte_eth_dev *dev;
*/
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
- RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
dev_info->rx_desc_lim = lim;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->max_mtu = UINT16_MAX;
- RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
dev_info->dev_flags = &dev->data->dev_flags;
+
+ return 0;
}
int
* which relies on dev->dev_ops->dev_infos_get.
*/
if (*dev->dev_ops->dev_infos_get != NULL) {
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
return -EINVAL;
}
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
dev_info.flow_type_rss_offloads) {
RTE_ETHDEV_LOG(ERR,
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
unsigned i;
+ int ret;
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return -1;
for (i = 0; i < dev_info.max_mac_addrs; i++)
if (memcmp(addr, &dev->data->mac_addrs[i],
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
unsigned i;
+ int ret;
+
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return -1;
- rte_eth_dev_info_get(port_id, &dev_info);
if (!dev->data->hash_mac_addrs)
return -1;
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_link link;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
link = dev->data->dev_link;
if (queue_idx > dev_info.max_tx_queues) {
uint16_t *nb_rx_desc,
uint16_t *nb_tx_desc)
{
- struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
-
- rte_eth_dev_info_get(port_id, &dev_info);
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
if (nb_rx_desc != NULL)
rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);