The API function rte_eth_dev_close() was returning void.
The return type is changed to int for notifying of errors.
If an error happens during a close operation,
the status of the port is undefined,
a maximum of resources having been freed.
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Reviewed-by: Liron Himi <lironh@marvell.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
invalid port ID, unsupported operation, failed operation):
- ``rte_eth_dev_stop``
- - ``rte_eth_dev_close``
* ethdev: New offload flags ``DEV_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
This will allow application to enable or disable PMDs from updating
* ethdev: Added capability to query age flow action.
+* ethdev: Added ``int`` return type to ``rte_eth_dev_close()``.
+
* ethdev: Renamed internal functions:
* ``_rte_eth_dev_callback_process()`` -> ``rte_eth_dev_callback_process()``
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
uint16_t port_id;
+ int err = 0;
/* Free up other ports and all resources */
RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
- rte_eth_dev_close(port_id);
+ err |= rte_eth_dev_close(port_id);
- return 0;
+ return err == 0 ? 0 : -EIO;
}
static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
uint16_t port_id;
+ int err = 0;
/* Free up other ports and all resources */
RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
- rte_eth_dev_close(port_id);
+ err |= rte_eth_dev_close(port_id);
- return 0;
+ return err == 0 ? 0 : -EIO;
}
static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
/* fallthrough */
case DEV_ACTIVE:
failsafe_eth_dev_unregister_callbacks(sdev);
- rte_eth_dev_close(PORT_ID(sdev));
+ ret = rte_eth_dev_close(PORT_ID(sdev));
+ if (ret < 0) {
+ ERROR("Port close failed for sub-device %u",
+ PORT_ID(sdev));
+ }
sdev->state = DEV_PROBED;
/* fallthrough */
case DEV_PROBED:
{
struct sub_device *sdev;
uint8_t i;
- int ret;
+ int err, ret = 0;
fs_lock(dev, 0);
failsafe_hotplug_alarm_cancel(dev);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Closing sub_device %d", i);
failsafe_eth_dev_unregister_callbacks(sdev);
- rte_eth_dev_close(PORT_ID(sdev));
+ err = rte_eth_dev_close(PORT_ID(sdev));
+ if (err) {
+ ret = ret ? ret : err;
+ ERROR("Error while closing sub-device %u",
+ PORT_ID(sdev));
+ }
sdev->state = DEV_ACTIVE - 1;
}
rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
failsafe_eth_new_event_callback, dev);
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
fs_unlock(dev, 0);
- return 0;
+ return ret;
}
fs_dev_free_queues(dev);
- ret = failsafe_eal_uninit(dev);
- if (ret)
+ err = failsafe_eal_uninit(dev);
+ if (err) {
+ ret = ret ? ret : err;
ERROR("Error while uninitializing sub-EAL");
+ }
failsafe_args_free(dev);
rte_free(PRIV(dev)->subs);
rte_free(PRIV(dev)->mcast_addrs);
/* mac_addrs must not be freed alone because part of dev_private */
dev->data->mac_addrs = NULL;
fs_unlock(dev, 0);
- ret = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex);
- if (ret)
+ err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex);
+ if (err) {
+ ret = ret ? ret : err;
ERROR("Error while destroying hotplug mutex");
- return 0;
+ }
+ return ret;
}
static int
if (eth_dev == NULL)
return 0;
- rte_eth_dev_close(eth_dev->data->port_id);
-
- return 0;
+ return rte_eth_dev_close(eth_dev->data->port_id);
}
static struct rte_vdev_driver pmd_memif_drv = {
mlx5_pci_remove(struct rte_pci_device *pci_dev)
{
uint16_t port_id;
+ int ret = 0;
RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
/*
* call the close function explicitly for secondary process.
*/
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
- mlx5_dev_close(&rte_eth_devices[port_id]);
+ ret |= mlx5_dev_close(&rte_eth_devices[port_id]);
else
- rte_eth_dev_close(port_id);
+ ret |= rte_eth_dev_close(port_id);
}
- return 0;
+ return ret == 0 ? 0 : -EIO;
}
static const struct rte_pci_id mlx5_pci_id_map[] = {
rte_pmd_mvneta_remove(struct rte_vdev_device *vdev)
{
uint16_t port_id;
+ int ret = 0;
RTE_ETH_FOREACH_DEV(port_id) {
if (rte_eth_devices[port_id].device != &vdev->device)
continue;
- rte_eth_dev_close(port_id);
+ ret |= rte_eth_dev_close(port_id);
}
- return 0;
+ return ret == 0 ? 0 : -EIO;
}
static struct rte_vdev_driver pmd_mvneta_drv = {
rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
{
uint16_t port_id;
+ int ret = 0;
RTE_ETH_FOREACH_DEV(port_id) {
if (rte_eth_devices[port_id].device != &vdev->device)
continue;
- rte_eth_dev_close(port_id);
+ ret |= rte_eth_dev_close(port_id);
}
- return 0;
+ return ret == 0 ? 0 : -EIO;
}
static struct rte_vdev_driver pmd_mrvl_drv = {
static int
hn_dev_close(struct rte_eth_dev *dev)
{
+ int ret;
+
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- hn_vf_close(dev);
+ ret = hn_vf_close(dev);
hn_dev_free_queues(dev);
- return 0;
+ return ret;
}
static const struct eth_dev_ops hn_eth_dev_ops = {
int hn_vf_start(struct rte_eth_dev *dev);
void hn_vf_reset(struct rte_eth_dev *dev);
void hn_vf_stop(struct rte_eth_dev *dev);
-void hn_vf_close(struct rte_eth_dev *dev);
+int hn_vf_close(struct rte_eth_dev *dev);
int hn_vf_allmulticast_enable(struct rte_eth_dev *dev);
int hn_vf_allmulticast_disable(struct rte_eth_dev *dev);
VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
}
-void hn_vf_close(struct rte_eth_dev *dev)
+int hn_vf_close(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
uint16_t vf_port;
+ int ret = 0;
rte_rwlock_read_lock(&hv->vf_lock);
vf_port = hv->vf_port;
if (vf_port != HN_INVALID_PORT)
- rte_eth_dev_close(vf_port);
+ ret = rte_eth_dev_close(vf_port);
hv->vf_port = HN_INVALID_PORT;
rte_rwlock_read_unlock(&hv->vf_lock);
+
+ return ret;
}
int hn_vf_stats_reset(struct rte_eth_dev *dev)
return rte_eth_dev_release_port(eth_dev);
/* make sure the device is stopped, queues freed */
- rte_eth_dev_close(eth_dev->data->port_id);
-
- return 0;
+ return rte_eth_dev_close(eth_dev->data->port_id);
}
static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr,
return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
}
-void
+int
rte_eth_dev_close(uint16_t port_id)
{
struct rte_eth_dev *dev;
+ int firsterr, binerr;
+ int *lasterr = &firsterr;
- RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
- (*dev->dev_ops->dev_close)(dev);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+ *lasterr = (*dev->dev_ops->dev_close)(dev);
+ if (*lasterr != 0)
+ lasterr = &binerr;
rte_ethdev_trace_close(port_id);
- rte_eth_dev_release_port(dev);
+ *lasterr = rte_eth_dev_release_port(dev);
+
+ return eth_err(port_id, firsterr);
}
int
*
* @param port_id
* The port identifier of the Ethernet device.
+ * @return
+ * - Zero if the port is closed successfully.
+ * - Negative if something went wrong.
*/
-void rte_eth_dev_close(uint16_t port_id);
+int rte_eth_dev_close(uint16_t port_id);
/**
* Reset a Ethernet device and keep its port id.