1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
5 #ifndef RTE_PMD_MLX5_VDPA_H_
6 #define RTE_PMD_MLX5_VDPA_H_
8 #include <linux/virtio_net.h>
12 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <vdpa_driver.h>
16 #include <rte_vhost.h>
18 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_spinlock.h>
21 #include <rte_interrupts.h>
23 #include <mlx5_glue.h>
24 #include <mlx5_devx_cmds.h>
25 #include <mlx5_common_devx.h>
29 #define MLX5_VDPA_INTR_RETRIES 256
30 #define MLX5_VDPA_INTR_RETRIES_USEC 1000
32 #ifndef VIRTIO_F_ORDER_PLATFORM
33 #define VIRTIO_F_ORDER_PLATFORM 36
36 #ifndef VIRTIO_F_RING_PACKED
37 #define VIRTIO_F_RING_PACKED 34
40 #define MLX5_VDPA_DEFAULT_TIMER_DELAY_US 0u
41 #define MLX5_VDPA_DEFAULT_TIMER_STEP_US 1u
50 struct mlx5_devx_cq cq_obj;
54 struct mlx5_vdpa_event_qp {
55 struct mlx5_vdpa_cq cq;
56 struct mlx5_devx_obj *fw_qp;
57 struct mlx5_devx_qp sw_qp;
61 struct mlx5_vdpa_query_mr {
62 SLIST_ENTRY(mlx5_vdpa_query_mr) next;
65 struct mlx5_devx_obj *mkey;
71 MLX5_VDPA_NOTIFIER_STATE_DISABLED,
72 MLX5_VDPA_NOTIFIER_STATE_ENABLED,
73 MLX5_VDPA_NOTIFIER_STATE_ERR
76 #define MLX5_VDPA_MAX_C_THRD 256
77 #define MLX5_VDPA_MAX_TASKS_PER_THRD 4096
78 #define MLX5_VDPA_TASKS_PER_DEV 64
80 /* Generic task information and size must be multiple of 4B. */
81 struct mlx5_vdpa_task {
82 struct mlx5_vdpa_priv *priv;
83 uint32_t *remaining_cnt;
86 } __rte_packed __rte_aligned(4);
88 /* Generic mlx5_vdpa_c_thread information. */
89 struct mlx5_vdpa_c_thread {
92 pthread_cond_t c_cond;
95 struct mlx5_vdpa_conf_thread_mng {
96 void *initializer_priv;
99 pthread_mutex_t cthrd_lock;
100 struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
102 extern struct mlx5_vdpa_conf_thread_mng conf_thread_mng;
104 struct mlx5_vdpa_virtq {
105 SLIST_ENTRY(mlx5_vdpa_virtq) next;
109 uint8_t notifier_state;
111 uint32_t configured:1;
113 pthread_mutex_t virtq_lock;
114 struct mlx5_vdpa_priv *priv;
115 struct mlx5_devx_obj *virtq;
116 struct mlx5_devx_obj *counters;
117 struct mlx5_vdpa_event_qp eqp;
119 struct mlx5dv_devx_umem *obj;
123 struct rte_intr_handle *intr_handle;
124 uint64_t err_time[3]; /* RDTSC time of recent errors. */
126 struct mlx5_devx_virtio_q_couners_attr stats;
127 struct mlx5_devx_virtio_q_couners_attr reset;
130 struct mlx5_vdpa_steer {
131 struct mlx5_devx_obj *rqt;
135 struct mlx5dv_flow_matcher *matcher;
136 struct mlx5_devx_obj *tir;
143 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER,
144 MLX5_VDPA_EVENT_MODE_FIXED_TIMER,
145 MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT
148 enum mlx5_dev_state {
149 MLX5_VDPA_STATE_PROBED = 0,
150 MLX5_VDPA_STATE_CONFIGURED,
151 MLX5_VDPA_STATE_IN_PROGRESS /* Shutting down. */
154 struct mlx5_vdpa_priv {
155 TAILQ_ENTRY(mlx5_vdpa_priv) next;
158 enum mlx5_dev_state state;
159 rte_spinlock_t db_lock;
160 pthread_mutex_t steer_update_lock;
161 uint64_t no_traffic_counter;
164 int event_core; /* Event thread cpu affinity core. */
166 uint32_t timer_delay_us;
167 uint32_t no_traffic_max;
168 uint8_t hw_latency_mode; /* Hardware CQ moderation mode. */
169 uint16_t hw_max_latency_us; /* Hardware CQ moderation period in usec. */
170 uint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */
171 uint16_t queue_size; /* virtq depth for pre-creating virtq resource */
172 uint16_t queues; /* Max virtq pair for pre-creating virtq resource */
173 struct rte_vdpa_device *vdev; /* vDPA device. */
174 struct mlx5_common_device *cdev; /* Backend mlx5 device. */
175 int vid; /* vhost device id. */
176 struct mlx5_hca_vdpa_attr caps;
177 uint32_t gpa_mkey_index;
178 struct ibv_mr *null_mr;
179 struct rte_vhost_memory *vmem;
180 struct mlx5dv_devx_event_channel *eventc;
181 struct mlx5dv_devx_event_channel *err_chnl;
183 struct rte_intr_handle *err_intr_handle;
184 struct mlx5_devx_obj *td;
185 struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
187 uint8_t num_lag_ports;
188 uint64_t features; /* Negotiated features. */
189 uint16_t log_max_rqt_size;
190 struct mlx5_vdpa_steer steer;
191 struct mlx5dv_var *var;
193 struct mlx5_pmd_wrapped_mr lm_mr;
194 SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
195 struct mlx5_vdpa_virtq virtqs[];
199 MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
200 MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
201 MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
202 MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
203 MLX5_VDPA_STATS_INVALID_BUFFER,
204 MLX5_VDPA_STATS_COMPLETION_ERRORS,
209 * Check whether virtq is for traffic receive.
210 * According to VIRTIO_NET Spec the virtqueues index identity its type by:
215 * 2(N-1)+1 transmitqN
218 static inline uint8_t
219 is_virtq_recvq(int virtq_index, int nr_vring)
221 if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
227 * Release all the prepared memory regions and all their related resources.
230 * The vdpa driver private structure.
232 void mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv);
235 * Register all the memory regions of the virtio device to the HW and allocate
236 * all their related resources.
239 * The vdpa driver private structure.
242 * 0 on success, a negative errno value otherwise and rte_errno is set.
244 int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
248 * Create an event QP and all its related resources.
251 * The vdpa driver private structure.
253 * Number of descriptors.
255 * The guest notification file descriptor.
256 * @param[in/out] virtq
257 * Pointer to the virt-queue structure.
260 * 0 on success, -1 otherwise and rte_errno is set.
263 mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
264 int callfd, struct mlx5_vdpa_virtq *virtq);
267 * Destroy an event QP and all its related resources.
270 * Pointer to the event QP structure.
272 void mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp);
275 * Create all the event global resources.
278 * The vdpa driver private structure.
281 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv);
284 * Release all the event global resources.
287 * The vdpa driver private structure.
289 void mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv);
295 * The vdpa driver private structure.
298 * 0 on success, a negative errno value otherwise and rte_errno is set.
300 int mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv);
306 * The vdpa driver private structure.
308 void mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv);
311 * Setup error interrupt handler.
314 * The vdpa driver private structure.
317 * 0 on success, a negative errno value otherwise and rte_errno is set.
319 int mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv);
322 * Unset error event handler.
325 * The vdpa driver private structure.
327 void mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv);
330 * Release virtqs and resources except that to be reused.
333 * The vdpa driver private structure.
335 void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv);
338 * Cleanup cached resources of all virtqs.
341 * The vdpa driver private structure.
343 void mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv);
346 * Create all the HW virtqs resources and all their related resources.
349 * The vdpa driver private structure.
352 * 0 on success, a negative errno value otherwise and rte_errno is set.
354 int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv);
357 * Enable\Disable virtq..
360 * The vdpa driver private structure.
364 * Set to enable, otherwise disable.
367 * 0 on success, a negative value otherwise.
369 int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
372 * Unset steering - stop traffic.
375 * The vdpa driver private structure.
377 void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
380 * Update steering according to the received queues status.
383 * The vdpa driver private structure.
386 * 0 on success, a negative value otherwise.
388 int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);
391 * Setup steering and all its related resources to enable RSS traffic from the
392 * device to all the Rx host queues.
395 * The vdpa driver private structure.
398 * 0 on success, a negative value otherwise.
400 int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv);
403 * Enable\Disable live migration logging.
406 * The vdpa driver private structure.
408 * Set for enable, unset for disable.
411 * 0 on success, a negative value otherwise.
413 int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable);
416 * Set dirty bitmap logging to allow live migration.
419 * The vdpa driver private structure.
420 * @param[in] log_base
422 * @param[in] log_size
426 * 0 on success, a negative value otherwise.
428 int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
432 * Log all virtqs information for live migration.
435 * The vdpa driver private structure.
437 * Set for enable, unset for disable.
440 * 0 on success, a negative value otherwise.
442 int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv);
445 * Modify virtq state to be ready or suspend.
448 * The vdpa driver private virtq structure.
450 * Set for ready, otherwise suspend.
453 * 0 on success, a negative value otherwise.
455 int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
458 * Stop virtq before destroying it.
461 * The vdpa driver private structure.
466 * 0 on success, a negative value otherwise.
468 int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
471 * Query virtq information.
474 * The vdpa driver private structure.
479 * 0 on success, a negative value otherwise.
481 int mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index);
484 * Get virtq statistics.
487 * The vdpa driver private structure.
491 * The virtq statistics array to fill.
493 * The number of elements in @p stats array.
496 * A negative value on error, otherwise the number of entries filled in the
500 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
501 struct rte_vdpa_stat *stats, unsigned int n);
504 * Reset virtq statistics.
507 * The vdpa driver private structure.
512 * A negative value on error, otherwise 0.
515 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
518 * Drain virtq CQ CQE.
521 * The vdpa driver private structure.
524 mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv);
527 mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv);
530 * Create configuration multi-threads resource
532 * @param[in] cpu_core
533 * CPU core number to set configuration threads affinity to.
536 * 0 on success, a negative value otherwise.
539 mlx5_vdpa_mult_threads_create(int cpu_core);
542 * Destroy configuration multi-threads resource
546 mlx5_vdpa_mult_threads_destroy(bool need_unlock);
549 mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
552 #endif /* RTE_PMD_MLX5_VDPA_H_ */