Change eth_dev_infos_get_t return value from void to int.
Make eth_dev_infos_get_t implementations across all drivers to return
negative errno values if case of error conditions.
Signed-off-by: Ivan Ilchenko <ivan.ilchenko@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
return -1;
}
-static void
+static int
virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
dev_info->max_tx_queues = (uint16_t)512;
dev_info->min_rx_bufsize = 0;
+
+ return 0;
}
static int
return 0;
}
-static void
+static int
eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
dev_info->min_rx_bufsize = 0;
+
+ return 0;
}
static int
return 0;
}
-static void
+static int
eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
+
+ return 0;
}
static int
static int eth_ark_dev_start(struct rte_eth_dev *dev);
static void eth_ark_dev_stop(struct rte_eth_dev *dev);
static void eth_ark_dev_close(struct rte_eth_dev *dev);
-static void eth_ark_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int eth_ark_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev);
dev->data->mac_addrs = 0;
}
-static void
+static int
eth_ark_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_50G |
ETH_LINK_SPEED_100G);
+
+ return 0;
}
static int
struct rte_pci_device *pci_dev);
static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
-static void atl_dev_info_get(struct rte_eth_dev *dev,
+static int atl_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
int atl_logtype_init;
return 0;
}
-static void
+static int
atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->speed_capa |= ETH_LINK_SPEED_100M;
dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+
+ return 0;
}
static const uint32_t *
atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct rte_eth_dev_info dev_info;
+ int ret;
uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
- atl_dev_info_get(dev, &dev_info);
+ ret = atl_dev_info_get(dev, &dev_info);
+ if (ret != 0)
+ return ret;
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
static int avp_dev_start(struct rte_eth_dev *dev);
static void avp_dev_stop(struct rte_eth_dev *dev);
static void avp_dev_close(struct rte_eth_dev *dev);
-static void avp_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int avp_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
rte_spinlock_unlock(&avp->lock);
}
-static void
+static int
avp_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
}
+
+ return 0;
}
static int
static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void axgbe_dev_stats_reset(struct rte_eth_dev *dev);
-static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+static int axgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
/* The set of PCI devices this driver supports */
}
}
-static void
+static int
axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct axgbe_port *pdata = dev->data->dev_private;
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = AXGBE_TX_FREE_THRESH,
};
+
+ return 0;
}
static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
return num;
}
-static void
+static int
bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bnx2x_softc *sc = dev->data->dev_private;
dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
dev_info->tx_desc_lim.nb_max = MAX_TX_AVAIL;
+
+ return 0;
}
static int
* Device configuration and status function
*/
-static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
- struct rte_eth_dev_info *dev_info)
+static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info)
{
struct bnxt *bp = eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
dev_info->vmdq_pool_base = 0;
dev_info->vmdq_queue_base = 0;
+
+ return 0;
}
/* Configure the device based on the configuration provided */
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
- bnxt_dev_info_get_op(eth_dev, &dev_info);
+ rc = bnxt_dev_info_get_op(eth_dev, &dev_info);
+ if (rc != 0) {
+ PMD_DRV_LOG(ERR, "Error during getting ethernet device info\n");
+ return rc;
+ }
if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
/* forward declaration */
static int bond_ethdev_configure(struct rte_eth_dev *dev);
-static void
+static int
bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bond_dev_private *internals = dev->data->dev_private;
slave.port_id,
strerror(-ret));
- return;
+ return ret;
}
if (slave_info.max_rx_queues < max_nb_rx_queues)
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
dev_info->reta_size = internals->reta_size;
+
+ return 0;
}
static int
return work_done;
}
-void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *device_info)
{
struct port_info *pi = eth_dev->data->dev_private;
device_info->rx_desc_lim = cxgbe_desc_lim;
device_info->tx_desc_lim = cxgbe_desc_lim;
cxgbe_get_speed_caps(pi, &device_info->speed_capa);
+
+ return 0;
}
void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
int err;
uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
- cxgbe_dev_info_get(eth_dev, &dev_info);
+ err = cxgbe_dev_info_get(eth_dev, &dev_info);
+ if (err != 0)
+ return err;
/* Must accommodate at least RTE_ETHER_MIN_MTU */
if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
__func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
socket_id, mp);
- cxgbe_dev_info_get(eth_dev, &dev_info);
+ err = cxgbe_dev_info_get(eth_dev, &dev_info);
+ if (err != 0) {
+ dev_err(adap, "%s: error during getting ethernet device info",
+ __func__);
+ return err;
+ }
/* Must accommodate at least RTE_ETHER_MIN_MTU */
if ((pkt_len < dev_info.min_rx_bufsize) ||
void cxgbe_dev_tx_queue_release(void *q);
void cxgbe_dev_stop(struct rte_eth_dev *eth_dev);
void cxgbe_dev_close(struct rte_eth_dev *eth_dev);
-void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
- struct rte_eth_dev_info *device_info);
+int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info);
void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev);
void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev);
void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev);
static struct rte_dpaa_driver rte_dpaa_pmd;
-static void
+static int
dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
static inline void
return 0;
}
-static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
+static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
- if (dpaa_intf->fif->mac_type == fman_mac_1g)
+ if (dpaa_intf->fif->mac_type == fman_mac_1g) {
dev_info->speed_capa = ETH_LINK_SPEED_1G;
- else if (dpaa_intf->fif->mac_type == fman_mac_10g)
+ } else if (dpaa_intf->fif->mac_type == fman_mac_10g) {
dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G);
- else
+ } else {
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, dpaa_intf->fif->mac_type);
+ return -EINVAL;
+ }
dev_info->rx_offload_capa = dev_rx_offloads_sup |
dev_rx_offloads_nodis;
dev_tx_offloads_nodis;
dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
+
+ return 0;
}
static int dpaa_eth_link_update(struct rte_eth_dev *dev,
return 0;
}
-static void
+static int
dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
dev_info->max_vfs = 0;
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
+
+ return 0;
}
static int
static int eth_em_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static void eth_em_stats_reset(struct rte_eth_dev *dev);
-static void eth_em_infos_get(struct rte_eth_dev *dev,
+static int eth_em_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
}
}
-static void
+static int
eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_txportconf.ring_size = 256;
dev_info->default_rxportconf.ring_size = 256;
+
+ return 0;
}
/* return 0 means link status changed, -1 means not changed */
struct e1000_hw *hw;
uint32_t frame_size;
uint32_t rctl;
+ int ret;
+
+ ret = eth_em_infos_get(dev, &dev_info);
+ if (ret != 0)
+ return ret;
- eth_em_infos_get(dev, &dev_info);
frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
VLAN_TAG_SIZE;
static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
char *fw_version, size_t fw_size);
-static void eth_igb_infos_get(struct rte_eth_dev *dev,
+static int eth_igb_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
-static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
+static int eth_igbvf_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
return 0;
}
-static void
+static int
eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
default:
/* Should not happen */
- break;
+ return -EINVAL;
}
dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ return 0;
}
static const uint32_t *
return NULL;
}
-static void
+static int
eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
break;
default:
/* Should not happen */
- break;
+ return -EINVAL;
}
dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
+
+ return 0;
}
/* return 0 means link status changed, -1 means not changed */
static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
{
uint32_t mask, regval;
+ int ret;
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_dev_info dev_info;
memset(&dev_info, 0, sizeof(dev_info));
- eth_igb_infos_get(dev, &dev_info);
+ ret = eth_igb_infos_get(dev, &dev_info);
+ if (ret != 0)
+ return ret;
mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift;
regval = E1000_READ_REG(hw, E1000_EIMS);
u16 i;
struct rte_eth_dev_info dev_info;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
memset(&dev_info, 0, sizeof(dev_info));
- eth_igbvf_infos_get(dev, &dev_info);
+ ret = eth_igbvf_infos_get(dev, &dev_info);
+ if (ret != 0)
+ return;
/* Clear interrupt mask to stop from interrupts being generated */
igbvf_intr_disable(hw);
struct e1000_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + E1000_ETH_OVERHEAD;
+ int ret;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hw->mac.type == e1000_82571)
return -ENOTSUP;
#endif
- eth_igb_infos_get(dev, &dev_info);
+ ret = eth_igb_infos_get(dev, &dev_info);
+ if (ret != 0)
+ return ret;
/* check that mtu is within the allowed range */
if (mtu < RTE_ETHER_MIN_MTU ||
static int ena_queue_start_all(struct rte_eth_dev *dev,
enum ena_ring_type ring_type);
static void ena_stats_restart(struct rte_eth_dev *dev);
-static void ena_infos_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int ena_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
}
}
-static void ena_infos_get(struct rte_eth_dev *dev,
+static int ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct ena_adapter *adapter;
adapter->max_tx_sgl_size);
dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
adapter->max_tx_sgl_size);
+
+ return 0;
}
static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return 0;
}
-static void
+static int
enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME);
+
+ return 0;
}
static int
return ETH_LINK_SPEED_10G;
}
-static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *device_info)
{
struct enic *enic = pmd_priv(eth_dev);
.nb_queues = ENIC_DEFAULT_TX_RINGS,
};
device_info->speed_capa = speed_capa_from_pci_id(eth_dev);
+
+ return 0;
}
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
* all sub_devices and the default capabilities.
*
*/
-static void
+static int
fs_dev_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *infos)
{
ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info);
ret = fs_err(sdev, ret);
if (ret != 0)
- return;
+ return ret;
fs_dev_merge_info(infos, &sub_info);
}
+
+ return 0;
}
static const uint32_t *
static int fm10k_check_ftag(struct rte_devargs *devargs);
static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
-static void fm10k_dev_infos_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
fm10k_rebind_hw_stats(hw, hw_stats);
}
-static void
+static int
fm10k_dev_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+
+ return 0;
}
#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
* @param info
* Pointer to Info structure output buffer.
*/
-static void
+static int
hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL;
info->rx_desc_lim = hinic_rx_desc_lim;
info->tx_desc_lim = hinic_tx_desc_lim;
+
+ return 0;
}
static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl)
static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
static int i40e_fw_version_get(struct rte_eth_dev *dev,
char *fw_version, size_t fw_size);
-static void i40e_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int i40e_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id,
int on);
return false;
}
-static void
+static int
i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
}
dev_info->default_rxportconf.burst_size = 32;
dev_info->default_txportconf.burst_size = 32;
+
+ return 0;
}
static int
static int i40evf_dev_configure(struct rte_eth_dev *dev);
static int i40evf_dev_start(struct rte_eth_dev *dev);
static void i40evf_dev_stop(struct rte_eth_dev *dev);
-static void i40evf_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int i40evf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int i40evf_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static int i40evf_dev_stats_get(struct rte_eth_dev *dev,
vf->promisc_multicast_enabled = FALSE;
}
-static void
+static int
i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
.nb_min = I40E_MIN_RING_DESC,
.nb_align = I40E_ALIGN_RING_DESC,
};
+
+ return 0;
}
static int
return i40e_dev_link_update(representor->adapter->eth_dev,
wait_to_complete);
}
-static void
+static int
i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
struct rte_eth_dev_info *dev_info)
{
representor->adapter->eth_dev->device->name;
dev_info->switch_info.domain_id = representor->switch_domain_id;
dev_info->switch_info.port_id = representor->vf_id;
+
+ return 0;
}
static int
static int iavf_dev_start(struct rte_eth_dev *dev);
static void iavf_dev_stop(struct rte_eth_dev *dev);
static void iavf_dev_close(struct rte_eth_dev *dev);
-static void iavf_dev_info_get(struct rte_eth_dev *dev,
+static int iavf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static int iavf_dev_stats_get(struct rte_eth_dev *dev,
hw->adapter_stopped = 1;
}
-static void
+static int
iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
.nb_min = IAVF_MIN_RING_DESC,
.nb_align = IAVF_ALIGN_RING_DESC,
};
+
+ return 0;
}
static const uint32_t *
static void ice_dev_stop(struct rte_eth_dev *dev);
static void ice_dev_close(struct rte_eth_dev *dev);
static int ice_dev_reset(struct rte_eth_dev *dev);
-static void ice_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int ice_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int ice_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static int ice_dev_set_link_up(struct rte_eth_dev *dev);
return 0;
}
-static void
+static int
ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
+
+ return 0;
}
static inline int
static int
ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst);
-static void
+static int
ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
struct rte_eth_dev_info *dev_info)
{
dev_info->switch_info.name = ethdev->device->name;
dev_info->switch_info.domain_id = rpst->switch_domain_id;
dev_info->switch_info.port_id = rpst->port_id;
+
+ return 0;
}
static int
uint8_t is_rx);
static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
size_t fw_size);
-static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int ixgbe_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
-static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
return 0;
}
-static void
+static int
ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_rxportconf.ring_size = 256;
dev_info->default_txportconf.ring_size = 256;
+
+ return 0;
}
static const uint32_t *
return NULL;
}
-static void
+static int
ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
+
+ return 0;
}
static int
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD;
struct rte_eth_dev_data *dev_data = dev->data;
+ int ret;
- ixgbe_dev_info_get(dev, &dev_info);
+ ret = ixgbe_dev_info_get(dev, &dev_info);
+ if (ret != 0)
+ return ret;
/* check that mtu is within the allowed range */
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
representor->vf_id, mac_addr);
}
-static void
+static int
ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
struct rte_eth_dev_info *dev_info)
{
representor->pf_ethdev->device->name;
dev_info->switch_info.domain_id = representor->switch_domain_id;
dev_info->switch_info.port_id = representor->vf_id;
+
+ return 0;
}
static int ixgbe_vf_representor_dev_configure(
return 0;
}
-static void
+static int
eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->min_rx_bufsize = 0;
+
+ return 0;
}
static int
}
}
-static void
+static int
lio_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *devinfo)
{
devinfo->speed_capa = ETH_LINK_SPEED_10G;
lio_dev_err(lio_dev,
"Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
+ return -EINVAL;
}
devinfo->max_rx_queues = lio_dev->max_rx_queues;
ETH_RSS_NONFRAG_IPV6_TCP |
ETH_RSS_IPV6_EX |
ETH_RSS_IPV6_TCP_EX);
+ return 0;
}
static int
return memif_connect(dev);
}
-static void
+static int
memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *dev_info)
{
dev_info->max_mac_addrs = 1;
dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
dev_info->min_rx_bufsize = 0;
+
+ return 0;
}
static memif_ring_t *
int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
void mlx4_stats_reset(struct rte_eth_dev *dev);
int mlx4_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
-void mlx4_dev_infos_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *info);
+int mlx4_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *info);
int mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete);
int mlx4_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
* @param[out] info
* Info structure output buffer.
*/
-void
+int
mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct mlx4_priv *priv = dev->data->dev_private;
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_56G;
info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
+
+ return 0;
}
/**
int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep,
unsigned int flags);
int mlx5_dev_configure(struct rte_eth_dev *dev);
-void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
+int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
int mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
* @param[out] info
* Info structure output buffer.
*/
-void
+int
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct mlx5_priv *priv = dev->data->dev_private;
break;
}
}
+
+ return 0;
}
/**
* @param info
* Info structure output buffer.
*/
-static void
+static int
mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *info)
{
info->default_txconf.offloads = 0;
info->max_rx_pktlen = MVNETA_PKT_SIZE_MAX;
+
+ return 0;
}
/**
* @param info
* Info structure output buffer.
*/
-static void
+static int
mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *info)
{
info->default_rxconf.rx_drop_en = 1;
info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
+
+ return 0;
}
/**
return rte_eth_linkstatus_set(dev, &link);
}
-static void hn_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
+static int hn_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
{
struct hn_data *hv = dev->data->dev_private;
+ int rc;
dev_info->speed_capa = ETH_LINK_SPEED_10G;
dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
dev_info->max_rx_queues = hv->max_queues;
dev_info->max_tx_queues = hv->max_queues;
- hn_rndis_get_offload(hv, dev_info);
- hn_vf_info_get(hv, dev_info);
+ rc = hn_rndis_get_offload(hv, dev_info);
+ if (rc != 0)
+ return rc;
+
+ rc = hn_vf_info_get(hv, dev_info);
+ if (rc != 0)
+ return rc;
+
+ return 0;
}
static int hn_rss_reta_update(struct rte_eth_dev *dev,
* @param[out] info
* Info structure output buffer.
*/
-static void
+static int
nfb_eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->max_rx_queues = dev->data->nb_rx_queues;
dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->speed_capa = ETH_LINK_SPEED_100G;
+
+ return 0;
}
/**
static void nfp_net_dev_interrupt_handler(void *param);
static void nfp_net_dev_interrupt_delayed_handler(void *param);
static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void nfp_net_infos_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int nfp_net_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int nfp_net_init(struct rte_eth_dev *eth_dev);
static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
}
-static void
+static int
nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct nfp_net_hw *hw;
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+
+ return 0;
}
static const uint32_t *
return 0;
}
-static void
+static int
eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals;
if ((dev == NULL) || (dev_info == NULL))
- return;
+ return -EINVAL;
internals = dev->data->dev_private;
dev_info->max_mac_addrs = 1;
dev_info->min_rx_bufsize = 0;
dev_info->reta_size = internals->reta_size;
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
+
+ return 0;
}
static int
return ret;
}
-static void
+static int
octeontx_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS;
dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS;
+
+ return 0;
}
static void
}
/* Ops */
-void otx2_nix_info_get(struct rte_eth_dev *eth_dev,
- struct rte_eth_dev_info *dev_info);
+int otx2_nix_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info);
int otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
return 0;
}
-void
+int
otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
+ return 0;
}
return 0;
}
-static void
+static int
eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->max_rx_queues = dev->data->nb_rx_queues;
dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->min_rx_bufsize = 0;
+
+ return 0;
}
static int
.nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
};
-static void
+static int
qede_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info)
{
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
speed_cap |= ETH_LINK_SPEED_100G;
dev_info->speed_capa = speed_cap;
+
+ return 0;
}
/* return 0 means link status changed, -1 means not changed */
int i, rc;
PMD_INIT_FUNC_TRACE(edev);
- qede_dev_info_get(dev, &dev_info);
+ rc = qede_dev_info_get(dev, &dev_info);
+ if (rc != 0) {
+ DP_ERR(edev, "Error during getting ethernet device info\n");
+ return rc;
+ }
max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
frame_size = max_rx_pkt_len;
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
}
-static void
+static int
eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
+
+ return 0;
}
static int
return 0;
}
-static void
+static int
sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
+ return 0;
}
static const uint32_t *
rte_log(RTE_LOG_ ## level, pmd_softnic_logtype, \
"%s(): " fmt "\n", __func__, ##args)
-static void
+static int
pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
dev_info->max_rx_pktlen = UINT32_MAX;
dev_info->max_rx_queues = UINT16_MAX;
dev_info->max_tx_queues = UINT16_MAX;
+
+ return 0;
}
static int
return 0;
}
-static void
+static int
eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->rx_queue_offload_capa = 0;
dev_info->tx_queue_offload_capa = 0;
dev_info->speed_capa = ETH_LINK_SPEED_100G;
+
+ return 0;
}
static int
return capa;
}
-static void
+static int
tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
* functions together and not in partial combinations
*/
dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
+
+ return 0;
}
static int
return 0;
}
-static void
+static int
nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM,
};
+
+ return 0;
}
static nicvf_iova_addr_t
return 0;
}
-static void
+static int
eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
internal = dev->data->dev_private;
if (internal == NULL) {
VHOST_LOG(ERR, "Invalid device specified\n");
- return;
+ return -ENODEV;
}
dev_info->max_mac_addrs = 1;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT;
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return 0;
}
static int
static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
-static void virtio_dev_info_get(struct rte_eth_dev *dev,
+static int virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
return 0;
}
-static void
+static int
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
uint64_t tso_mask, host_features;
(1ULL << VIRTIO_NET_F_HOST_TSO6);
if ((host_features & tso_mask) == tso_mask)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ return 0;
}
/*
unsigned int n);
static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n);
-static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int vmxnet3_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static const uint32_t *
vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
}
}
-static void
+static int
vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
dev_info->rx_queue_offload_capa = 0;
dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
dev_info->tx_queue_offload_capa = 0;
+
+ return 0;
}
static const uint32_t *
.nb_seg_max = UINT16_MAX,
.nb_mtu_seg_max = UINT16_MAX,
};
+ int diag;
/*
* Init dev_info before port_id check since caller does not have
dev_info->max_mtu = UINT16_MAX;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
- (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+ diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+ if (diag != 0) {
+ /* Cleanup already filled in device information */
+ memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+ return eth_err(port_id, diag);
+ }
+
dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
uint8_t is_rx);
/**< @internal Set a queue statistics mapping for a tx/rx queue of an Ethernet device. */
-typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+typedef int (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
/**< @internal Get specific information of an Ethernet device. */
typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);