struct mlx5_vdpa_cq cq;
struct mlx5_devx_obj *fw_qp;
struct mlx5_devx_qp sw_qp;
+ uint16_t qp_pi;
};
struct mlx5_vdpa_query_mr {
SLIST_ENTRY(mlx5_vdpa_query_mr) next;
- void *addr;
- uint64_t length;
- struct mlx5dv_devx_umem *umem;
- struct mlx5_devx_obj *mkey;
+ union {
+ struct ibv_mr *mr;
+ struct mlx5_devx_obj *mkey;
+ };
int is_indirect;
};
MLX5_VDPA_NOTIFIER_STATE_ERR
};
+#define MLX5_VDPA_MAX_C_THRD 256
+#define MLX5_VDPA_MAX_TASKS_PER_THRD 4096
+#define MLX5_VDPA_TASKS_PER_DEV 64
+
+/* Generic task information and size must be multiple of 4B. */
+struct mlx5_vdpa_task {
+ struct mlx5_vdpa_priv *priv;
+ uint32_t *remaining_cnt;
+ uint32_t *err_cnt;
+ uint32_t idx;
+} __rte_packed __rte_aligned(4);
+
+/* Generic mlx5_vdpa_c_thread information. */
+struct mlx5_vdpa_c_thread {
+ pthread_t tid;
+ struct rte_ring *rng;
+ pthread_cond_t c_cond;
+};
+
+struct mlx5_vdpa_conf_thread_mng {
+ void *initializer_priv;
+ uint32_t refcnt;
+ uint32_t max_thrds;
+ pthread_mutex_t cthrd_lock;
+ struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
+};
+extern struct mlx5_vdpa_conf_thread_mng conf_thread_mng;
+
struct mlx5_vdpa_virtq {
SLIST_ENTRY(mlx5_vdpa_virtq) next;
uint8_t enable;
uint16_t vq_size;
uint8_t notifier_state;
bool stopped;
+ uint32_t configured:1;
uint32_t version;
+ pthread_mutex_t virtq_lock;
struct mlx5_vdpa_priv *priv;
struct mlx5_devx_obj *virtq;
struct mlx5_devx_obj *counters;
struct rte_intr_handle *intr_handle;
uint64_t err_time[3]; /* RDTSC time of recent errors. */
uint32_t n_retry;
+ struct mlx5_devx_virtio_q_couners_attr stats;
struct mlx5_devx_virtio_q_couners_attr reset;
};
MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT
};
+enum mlx5_dev_state {
+ MLX5_VDPA_STATE_PROBED = 0,
+ MLX5_VDPA_STATE_CONFIGURED,
+ MLX5_VDPA_STATE_IN_PROGRESS /* Shutting down. */
+};
+
struct mlx5_vdpa_priv {
TAILQ_ENTRY(mlx5_vdpa_priv) next;
- uint8_t configured;
- pthread_mutex_t vq_config_lock;
+ bool connected;
+ bool use_c_thread;
+ enum mlx5_dev_state state;
+ rte_spinlock_t db_lock;
+ pthread_mutex_t steer_update_lock;
uint64_t no_traffic_counter;
pthread_t timer_tid;
int event_mode;
uint8_t hw_latency_mode; /* Hardware CQ moderation mode. */
uint16_t hw_max_latency_us; /* Hardware CQ moderation period in usec. */
uint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */
+ uint16_t queue_size; /* virtq depth for pre-creating virtq resource */
+ uint16_t queues; /* Max virtq pair for pre-creating virtq resource */
struct rte_vdpa_device *vdev; /* vDPA device. */
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
int vid; /* vhost device id. */
struct mlx5_vdpa_steer steer;
struct mlx5dv_var *var;
void *virtq_db_addr;
+ struct mlx5_pmd_wrapped_mr lm_mr;
SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
struct mlx5_vdpa_virtq virtqs[];
};
* Number of descriptors.
* @param[in] callfd
* The guest notification file descriptor.
- * @param[in/out] eqp
- * Pointer to the event QP structure.
+ * @param[in/out] virtq
+ * Pointer to the virt-queue structure.
*
* @return
* 0 on success, -1 otherwise and rte_errno is set.
*/
-int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
- int callfd, struct mlx5_vdpa_event_qp *eqp);
+int
+mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
+ int callfd, struct mlx5_vdpa_virtq *virtq);
/**
* Destroy an event QP and all its related resources.
*/
void mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp);
+/**
+ * Create all the event global resources.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ */
+int
+mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv);
+
/**
* Release all the event global resources.
*
void mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv);
/**
- * Release a virtq and all its related resources.
+ * Release virtqs and resources except that to be reused.
*
* @param[in] priv
* The vdpa driver private structure.
*/
void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv);
+/**
+ * Cleanup cached resources of all virtqs.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ */
+void mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv);
+
/**
* Create all the HW virtqs resources and all their related resources.
*
int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
/**
- * Unset steering and release all its related resources- stop traffic.
+ * Unset steering - stop traffic.
*
* @param[in] priv
* The vdpa driver private structure.
*/
int
mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
+
+/**
+ * Drain virtq CQ CQE.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ */
+void
+mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv);
+
+bool
+mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv);
+
+/**
+ * Create configuration multi-threads resource
+ *
+ * @param[in] cpu_core
+ * CPU core number to set configuration threads affinity to.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_vdpa_mult_threads_create(int cpu_core);
+
+/**
+ * Destroy configuration multi-threads resource
+ *
+ */
+void
+mlx5_vdpa_mult_threads_destroy(bool need_unlock);
+
+bool
+mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
+ uint32_t thrd_idx,
+ uint32_t num);
#endif /* RTE_PMD_MLX5_VDPA_H_ */