(1ULL << VIRTIO_F_ANY_LAYOUT) | \
(1ULL << VIRTIO_NET_F_MQ) | \
(1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
- (1ULL << VIRTIO_F_ORDER_PLATFORM))
+ (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
+ (1ULL << VHOST_F_LOG_ALL))
#define MLX5_VDPA_PROTOCOL_FEATURES \
((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
return mlx5_vdpa_virtq_enable(virtq, state);
}
+static int
+mlx5_vdpa_features_set(int vid)
+{
+ int did = rte_vhost_get_vdpa_device_id(vid);
+ struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+ uint64_t log_base, log_size;
+ uint64_t features;
+ int ret;
+
+ if (priv == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ return -EINVAL;
+ }
+ ret = rte_vhost_get_negotiated_features(vid, &features);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to get negotiated features.");
+ return ret;
+ }
+ if (RTE_VHOST_NEED_LOG(features)) {
+ ret = rte_vhost_get_log_base(vid, &log_base, &log_size);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to get log base.");
+ return ret;
+ }
+ ret = mlx5_vdpa_dirty_bitmap_set(priv, log_base, log_size);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to set dirty bitmap.");
+ return ret;
+ }
+ DRV_LOG(INFO, "mlx5 vdpa: enabling dirty logging...");
+ ret = mlx5_vdpa_logging_enable(priv, 1);
+ if (ret) {
+ DRV_LOG(ERR, "Failed t enable dirty logging.");
+ return ret;
+ }
+ }
+ return 0;
+}
+
static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
.get_queue_num = mlx5_vdpa_get_queue_num,
.get_features = mlx5_vdpa_get_vdpa_features,
.dev_conf = NULL,
.dev_close = NULL,
.set_vring_state = mlx5_vdpa_set_vring_state,
- .set_features = NULL,
+ .set_features = mlx5_vdpa_features_set,
.migration_done = NULL,
.get_vfio_group_fd = NULL,
.get_vfio_device_fd = NULL,
*/
int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv);
+/**
+ * Enable\Disable live migration logging.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] enable
+ * Set for enable, unset for disable.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable);
+
+/**
+ * Set dirty bitmap logging to allow live migration.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] log_base
+ * Vhost log base.
+ * @param[in] log_size
+ * Vhost log size.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
+ uint64_t log_size);
+
+/**
+ * Log all virtqs information for live migration.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] enable
+ * Set for enable, unset for disable.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv);
+
+/**
+ * Modify virtq state to be ready or suspend.
+ *
+ * @param[in] virtq
+ * The vdpa driver private virtq structure.
+ * @param[in] state
+ * Set for ready, otherwise suspend.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
+
#endif /* RTE_PMD_MLX5_VDPA_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+#include <rte_malloc.h>
+#include <rte_errno.h>
+
+#include "mlx5_vdpa_utils.h"
+#include "mlx5_vdpa.h"
+
+
+int
+mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable)
+{
+ struct mlx5_devx_virtq_attr attr = {
+ .type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,
+ .dirty_bitmap_dump_enable = enable,
+ };
+ struct mlx5_vdpa_virtq *virtq;
+
+ SLIST_FOREACH(virtq, &priv->virtq_list, next) {
+ attr.queue_index = virtq->index;
+ if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to modify virtq %d logging.",
+ virtq->index);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int
+mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
+ uint64_t log_size)
+{
+ struct mlx5_devx_mkey_attr mkey_attr = {
+ .addr = (uintptr_t)log_base,
+ .size = log_size,
+ .pd = priv->pdn,
+ .pg_access = 1,
+ .klm_array = NULL,
+ .klm_num = 0,
+ };
+ struct mlx5_devx_virtq_attr attr = {
+ .type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS,
+ .dirty_bitmap_addr = log_base,
+ .dirty_bitmap_size = log_size,
+ };
+ struct mlx5_vdpa_query_mr *mr = rte_malloc(__func__, sizeof(*mr), 0);
+ struct mlx5_vdpa_virtq *virtq;
+
+ if (!mr) {
+ DRV_LOG(ERR, "Failed to allocate mem for lm mr.");
+ return -1;
+ }
+ mr->umem = mlx5_glue->devx_umem_reg(priv->ctx,
+ (void *)(uintptr_t)log_base,
+ log_size, IBV_ACCESS_LOCAL_WRITE);
+ if (!mr->umem) {
+ DRV_LOG(ERR, "Failed to register umem for lm mr.");
+ goto err;
+ }
+ mkey_attr.umem_id = mr->umem->umem_id;
+ mr->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
+ if (!mr->mkey) {
+ DRV_LOG(ERR, "Failed to create Mkey for lm.");
+ goto err;
+ }
+ attr.dirty_bitmap_mkey = mr->mkey->id;
+ SLIST_FOREACH(virtq, &priv->virtq_list, next) {
+ attr.queue_index = virtq->index;
+ if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to modify virtq %d for lm.",
+ virtq->index);
+ goto err;
+ }
+ }
+ mr->is_indirect = 0;
+ SLIST_INSERT_HEAD(&priv->mr_list, mr, next);
+ return 0;
+err:
+ if (mr->mkey)
+ mlx5_devx_cmd_destroy(mr->mkey);
+ if (mr->umem)
+ mlx5_glue->devx_umem_dereg(mr->umem);
+ rte_free(mr);
+ return -1;
+}
+
+#define MLX5_VDPA_USED_RING_LEN(size) \
+ ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+int
+mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
+{
+ struct mlx5_devx_virtq_attr attr = {0};
+ struct mlx5_vdpa_virtq *virtq;
+ uint64_t features;
+ int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
+
+ if (ret) {
+ DRV_LOG(ERR, "Failed to get negotiated features.");
+ return -1;
+ }
+ if (!RTE_VHOST_NEED_LOG(features))
+ return 0;
+ SLIST_FOREACH(virtq, &priv->virtq_list, next) {
+ ret = mlx5_vdpa_virtq_modify(virtq, 0);
+ if (ret)
+ return -1;
+ if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to query virtq %d.", virtq->index);
+ return -1;
+ }
+ DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
+ "hw_used_index=%d", priv->vid, virtq->index,
+ attr.hw_available_index, attr.hw_used_index);
+ ret = rte_vhost_set_vring_base(priv->vid, virtq->index,
+ attr.hw_available_index,
+ attr.hw_used_index);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to set virtq %d base.",
+ virtq->index);
+ return -1;
+ }
+ rte_vhost_log_used_vring(priv->vid, virtq->index, 0,
+ MLX5_VDPA_USED_RING_LEN(virtq->vq_size));
+ }
+ return 0;
+}