+ /* rte_flow */
+ if (TAILQ_EMPTY(&PRIV(dev)->flow_list)) {
+ DEBUG("rte_flow already set");
+ } else {
+ DEBUG("Resetting rte_flow configuration");
+ ret = rte_flow_flush(PORT_ID(sdev), &ferror);
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ i = 0;
+ rte_errno = 0;
+ DEBUG("Configuring rte_flow");
+ TAILQ_FOREACH(flow, &PRIV(dev)->flow_list, next) {
+ DEBUG("Creating flow #%" PRIu32, i++);
+ flow->flows[SUB_ID(sdev)] =
+ rte_flow_create(PORT_ID(sdev),
+ flow->rule.attr,
+ flow->rule.pattern,
+ flow->rule.actions,
+ &ferror);
+ ret = rte_errno;
+ if (ret)
+ break;
+ }
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void
+fs_dev_remove(struct sub_device *sdev)
+{
+ int ret;
+
+ if (sdev == NULL)
+ return;
+ switch (sdev->state) {
+ case DEV_STARTED:
+ failsafe_rx_intr_uninstall_subdevice(sdev);
+ ret = rte_eth_dev_stop(PORT_ID(sdev));
+ if (ret < 0)
+ ERROR("Failed to stop sub-device %u", SUB_ID(sdev));
+ sdev->state = DEV_ACTIVE;
+ /* fallthrough */
+ case DEV_ACTIVE:
+ failsafe_eth_dev_unregister_callbacks(sdev);
+ ret = rte_eth_dev_close(PORT_ID(sdev));
+ if (ret < 0) {
+ ERROR("Port close failed for sub-device %u",
+ PORT_ID(sdev));
+ }
+ sdev->state = DEV_PROBED;
+ /* fallthrough */
+ case DEV_PROBED:
+ ret = rte_dev_remove(sdev->dev);
+ if (ret < 0) {
+ ERROR("Bus detach failed for sub_device %u",
+ SUB_ID(sdev));
+ } else {
+ rte_eth_dev_release_port(ETH(sdev));
+ }
+ sdev->state = DEV_PARSED;
+ /* fallthrough */
+ case DEV_PARSED:
+ case DEV_UNDEFINED:
+ sdev->state = DEV_UNDEFINED;
+ sdev->sdev_port_id = RTE_MAX_ETHPORTS;
+ /* the end */
+ break;
+ }
+ sdev->remove = 0;
+ failsafe_hotplug_alarm_install(fs_dev(sdev));
+}
+
+static void
+fs_dev_stats_save(struct sub_device *sdev)
+{
+ struct rte_eth_stats stats;
+ int err;
+
+ /* Attempt to read current stats. */
+ err = rte_eth_stats_get(PORT_ID(sdev), &stats);
+ if (err) {
+ uint64_t timestamp = sdev->stats_snapshot.timestamp;
+
+ WARN("Could not access latest statistics from sub-device %d.",
+ SUB_ID(sdev));
+ if (timestamp != 0)
+ WARN("Using latest snapshot taken before %"PRIu64" seconds.",
+ (rte_rdtsc() - timestamp) / rte_get_tsc_hz());
+ }
+ failsafe_stats_increment
+ (&PRIV(fs_dev(sdev))->stats_accumulator,
+ err ? &sdev->stats_snapshot.stats : &stats);
+ memset(&sdev->stats_snapshot, 0, sizeof(sdev->stats_snapshot));
+}
+
+static inline int
+fs_rxtx_clean(struct sub_device *sdev)
+{
+ uint16_t i;
+
+ for (i = 0; i < ETH(sdev)->data->nb_rx_queues; i++)
+ if (FS_ATOMIC_RX(sdev, i))
+ return 0;
+ for (i = 0; i < ETH(sdev)->data->nb_tx_queues; i++)
+ if (FS_ATOMIC_TX(sdev, i))
+ return 0;
+ return 1;
+}
+
+void
+failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev)
+{
+ int ret;
+
+ if (sdev == NULL)
+ return;
+ if (sdev->rmv_callback) {
+ ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_RMV,
+ failsafe_eth_rmv_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to unregister RMV callback for sub_device"
+ " %d", SUB_ID(sdev));
+ sdev->rmv_callback = 0;
+ }
+ if (sdev->lsc_callback) {
+ ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_LSC,
+ failsafe_eth_lsc_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to unregister LSC callback for sub_device"
+ " %d", SUB_ID(sdev));
+ sdev->lsc_callback = 0;
+ }
+}
+
+void
+failsafe_dev_remove(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (!sdev->remove)
+ continue;
+
+ /* Active devices must have finished their burst and
+ * their stats must be saved.
+ */
+ if (sdev->state >= DEV_ACTIVE &&
+ fs_rxtx_clean(sdev) == 0)
+ continue;
+ if (fs_lock(dev, 1) != 0)
+ return;
+ if (sdev->state >= DEV_ACTIVE)
+ fs_dev_stats_save(sdev);
+ fs_dev_remove(sdev);
+ fs_unlock(dev, 1);
+ }
+}
+
+static int
+failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev)
+{
+ struct rxq *rxq;
+ int ret;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->info.conf.rx_deferred_start &&
+ dev->data->rx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ /*
+ * The subdevice Rx queue does not launch on device
+ * start if deferred start flag is set. It needs to be
+ * started manually in case an appropriate failsafe Rx
+ * queue has been started earlier.
+ */
+ ret = dev->dev_ops->rx_queue_start(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Rx queue %d", i);
+ return ret;
+ }
+ } else if (dev->data->rx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STOPPED) {
+ /*
+ * The subdevice Rx queue needs to be stopped manually
+ * in case an appropriate failsafe Rx queue has been
+ * stopped earlier.
+ */
+ ret = dev->dev_ops->rx_queue_stop(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Rx queue %d", i);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev)
+{
+ struct txq *txq;
+ int ret;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ if (txq->info.conf.tx_deferred_start &&
+ dev->data->tx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ /*
+ * The subdevice Tx queue does not launch on device
+ * start if deferred start flag is set. It needs to be
+ * started manually in case an appropriate failsafe Tx
+ * queue has been started earlier.
+ */
+ ret = dev->dev_ops->tx_queue_start(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Tx queue %d", i);
+ return ret;
+ }
+ } else if (dev->data->tx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STOPPED) {
+ /*
+ * The subdevice Tx queue needs to be stopped manually
+ * in case an appropriate failsafe Tx queue has been
+ * stopped earlier.
+ */
+ ret = dev->dev_ops->tx_queue_stop(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Tx queue %d", i);
+ return ret;
+ }
+ }
+ }