return kern_mtu == vhost_mtu ? 0 : -1;
}
-static void
+void
mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv)
{
/* Clean pre-created resource in dev removal only. */
mlx5_vdpa_mem_dereg(priv);
}
+static bool
+mlx5_vdpa_wait_dev_close_tasks_done(struct mlx5_vdpa_priv *priv)
+{
+ uint32_t timeout = 0;
+
+ /* Check and wait all close tasks done. */
+ while (__atomic_load_n(&priv->dev_close_progress,
+ __ATOMIC_RELAXED) != 0 && timeout < 1000) {
+ rte_delay_us_sleep(10000);
+ timeout++;
+ }
+ if (priv->dev_close_progress) {
+ DRV_LOG(ERR,
+ "Failed to wait close device tasks done vid %d.",
+ priv->vid);
+ return true;
+ }
+ return false;
+}
+
static int
mlx5_vdpa_dev_close(int vid)
{
ret |= mlx5_vdpa_lm_log(priv);
priv->state = MLX5_VDPA_STATE_IN_PROGRESS;
}
+ if (priv->use_c_thread) {
+ if (priv->last_c_thrd_idx >=
+ (conf_thread_mng.max_thrds - 1))
+ priv->last_c_thrd_idx = 0;
+ else
+ priv->last_c_thrd_idx++;
+ __atomic_store_n(&priv->dev_close_progress,
+ 1, __ATOMIC_RELAXED);
+ if (mlx5_vdpa_task_add(priv,
+ priv->last_c_thrd_idx,
+ MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
+ NULL, NULL, NULL, 1)) {
+ DRV_LOG(ERR,
+ "Fail to add dev close task. ");
+ goto single_thrd;
+ }
+ priv->state = MLX5_VDPA_STATE_PROBED;
+ DRV_LOG(INFO, "vDPA device %d was closed.", vid);
+ return ret;
+ }
+single_thrd:
pthread_mutex_lock(&priv->steer_update_lock);
mlx5_vdpa_steer_unset(priv);
pthread_mutex_unlock(&priv->steer_update_lock);
mlx5_vdpa_drain_cq(priv);
if (priv->lm_mr.addr)
mlx5_os_wrapped_mkey_destroy(&priv->lm_mr);
- priv->state = MLX5_VDPA_STATE_PROBED;
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
+ __atomic_store_n(&priv->dev_close_progress, 0,
+ __ATOMIC_RELAXED);
+ priv->state = MLX5_VDPA_STATE_PROBED;
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
}
DRV_LOG(ERR, "Failed to reconfigure vid %d.", vid);
return -1;
}
+ if (mlx5_vdpa_wait_dev_close_tasks_done(priv))
+ return -1;
priv->vid = vid;
priv->connected = true;
if (mlx5_vdpa_mtu_set(priv))
DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
return -1;
}
- if (priv->state == MLX5_VDPA_STATE_PROBED)
+ if (priv->state == MLX5_VDPA_STATE_PROBED) {
+ if (priv->use_c_thread)
+ mlx5_vdpa_wait_dev_close_tasks_done(priv);
mlx5_vdpa_dev_cache_clean(priv);
+ }
priv->connected = false;
return 0;
}
{
if (priv->state == MLX5_VDPA_STATE_CONFIGURED)
mlx5_vdpa_dev_close(priv->vid);
+ if (priv->use_c_thread)
+ mlx5_vdpa_wait_dev_close_tasks_done(priv);
mlx5_vdpa_release_dev_resources(priv);
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
MLX5_VDPA_TASK_REG_MR = 1,
MLX5_VDPA_TASK_SETUP_VIRTQ,
MLX5_VDPA_TASK_STOP_VIRTQ,
+ MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
};
/* Generic task information and size must be multiple of 4B. */
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
uint16_t last_c_thrd_idx;
+ uint16_t dev_close_progress;
uint16_t num_mrs; /* Number of memory regions. */
struct mlx5_vdpa_steer steer;
struct mlx5dv_var *var;
uint32_t *err_cnt, uint32_t sleep_time);
int
mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
+void
+mlx5_vdpa_vq_destroy(struct mlx5_vdpa_virtq *virtq);
+void
+mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv);
+void
+mlx5_vdpa_virtq_unreg_intr_handle_all(struct mlx5_vdpa_priv *priv);
#endif /* RTE_PMD_MLX5_VDPA_H_ */
task[i].type = task_type;
task[i].remaining_cnt = remaining_cnt;
task[i].err_cnt = err_cnt;
- task[i].idx = data[i];
+ if (data)
+ task[i].idx = data[i];
}
if (!mlx5_vdpa_c_thrd_ring_enqueue_bulk(rng, (void **)&task, num, NULL))
return -1;
MLX5_VDPA_USED_RING_LEN(virtq->vq_size));
pthread_mutex_unlock(&virtq->virtq_lock);
break;
+ case MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT:
+ mlx5_vdpa_virtq_unreg_intr_handle_all(priv);
+ pthread_mutex_lock(&priv->steer_update_lock);
+ mlx5_vdpa_steer_unset(priv);
+ pthread_mutex_unlock(&priv->steer_update_lock);
+ mlx5_vdpa_virtqs_release(priv);
+ mlx5_vdpa_drain_cq(priv);
+ if (priv->lm_mr.addr)
+ mlx5_os_wrapped_mkey_destroy(
+ &priv->lm_mr);
+ if (!priv->connected)
+ mlx5_vdpa_dev_cache_clean(priv);
+ priv->vid = 0;
+ __atomic_store_n(
+ &priv->dev_close_progress, 0,
+ __ATOMIC_RELAXED);
+ break;
default:
DRV_LOG(ERR, "Invalid vdpa task type %d.",
task.type);