#include <rte_debug.h>
#include <rte_atomic.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_flow.h>
#include <rte_cycles.h>
if (ret) {
if (!fs_err(sdev, ret))
continue;
- rte_eth_dev_stop(PORT_ID(sdev));
+ if (fs_err(sdev, rte_eth_dev_stop(PORT_ID(sdev))) < 0)
+ ERROR("Failed to stop sub-device %u",
+ SUB_ID(sdev));
fs_unlock(dev, 0);
return ret;
}
RTE_ETH_QUEUE_STATE_STOPPED;
}
-static void
+static int
fs_dev_stop(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
uint8_t i;
+ int ret;
fs_lock(dev, 0);
PRIV(dev)->state = DEV_STARTED - 1;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
- rte_eth_dev_stop(PORT_ID(sdev));
+ ret = rte_eth_dev_stop(PORT_ID(sdev));
+ if (fs_err(sdev, ret) < 0) {
+ ERROR("Failed to stop device %u",
+ PORT_ID(sdev));
+ PRIV(dev)->state = DEV_STARTED + 1;
+ fs_unlock(dev, 0);
+ return ret;
+ }
failsafe_rx_intr_uninstall_subdevice(sdev);
sdev->state = DEV_STARTED - 1;
}
failsafe_rx_intr_uninstall(dev);
fs_set_queues_state_stop(dev);
fs_unlock(dev, 0);
+
+ return 0;
}
static int
{
struct sub_device *sdev;
uint8_t i;
- int ret;
+ int err, ret = 0;
fs_lock(dev, 0);
failsafe_hotplug_alarm_cancel(dev);
- if (PRIV(dev)->state == DEV_STARTED)
- dev->dev_ops->dev_stop(dev);
+ if (PRIV(dev)->state == DEV_STARTED) {
+ ret = dev->dev_ops->dev_stop(dev);
+ if (ret != 0) {
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
PRIV(dev)->state = DEV_ACTIVE - 1;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Closing sub_device %d", i);
failsafe_eth_dev_unregister_callbacks(sdev);
- rte_eth_dev_close(PORT_ID(sdev));
+ err = rte_eth_dev_close(PORT_ID(sdev));
+ if (err) {
+ ret = ret ? ret : err;
+ ERROR("Error while closing sub-device %u",
+ PORT_ID(sdev));
+ }
sdev->state = DEV_ACTIVE - 1;
}
rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
failsafe_eth_new_event_callback, dev);
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
fs_unlock(dev, 0);
- return 0;
+ return ret;
}
fs_dev_free_queues(dev);
- ret = failsafe_eal_uninit(dev);
- if (ret)
+ err = failsafe_eal_uninit(dev);
+ if (err) {
+ ret = ret ? ret : err;
ERROR("Error while uninitializing sub-EAL");
+ }
failsafe_args_free(dev);
rte_free(PRIV(dev)->subs);
rte_free(PRIV(dev)->mcast_addrs);
/* mac_addrs must not be freed alone because part of dev_private */
dev->data->mac_addrs = NULL;
fs_unlock(dev, 0);
- ret = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex);
- if (ret)
+ err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex);
+ if (err) {
+ ret = ret ? ret : err;
ERROR("Error while destroying hotplug mutex");
- return 0;
+ }
+ return ret;
}
static int
fs_dev_merge_info(struct rte_eth_dev_info *info,
const struct rte_eth_dev_info *sinfo)
{
+ info->min_mtu = RTE_MAX(info->min_mtu, sinfo->min_mtu);
+ info->max_mtu = RTE_MIN(info->max_mtu, sinfo->max_mtu);
info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen);
info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues);
info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues);
int ret;
/* Use maximum upper bounds by default */
+ infos->min_mtu = RTE_ETHER_MIN_MTU;
+ infos->max_mtu = UINT16_MAX;
infos->max_rx_pktlen = UINT32_MAX;
infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_SECURITY;
+ DEV_RX_OFFLOAD_SECURITY |
+ DEV_RX_OFFLOAD_RSS_HASH;
infos->rx_queue_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_SECURITY;
+ DEV_RX_OFFLOAD_SECURITY |
+ DEV_RX_OFFLOAD_RSS_HASH;
infos->tx_offload_capa =
DEV_TX_OFFLOAD_MULTI_SEGS |
}
static int
-fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
- enum rte_filter_type type,
- enum rte_filter_op op,
- void *arg)
+fs_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- if (type == RTE_ETH_FILTER_GENERIC &&
- op == RTE_ETH_FILTER_GET) {
- *(const void **)arg = &fs_flow_ops;
- return 0;
- }
- return -ENOTSUP;
+ *ops = &fs_flow_ops;
+ return 0;
}
const struct eth_dev_ops failsafe_ops = {
.mac_addr_set = fs_mac_addr_set,
.set_mc_addr_list = fs_set_mc_addr_list,
.rss_hash_update = fs_rss_hash_update,
- .filter_ctrl = fs_filter_ctrl,
+ .flow_ops_get = fs_flow_ops_get,
};