1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
5 #ifndef RTE_PMD_MLX5_VDPA_H_
6 #define RTE_PMD_MLX5_VDPA_H_
8 #include <linux/virtio_net.h>
12 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <rte_vdpa_dev.h>
16 #include <rte_vhost.h>
18 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_spinlock.h>
21 #include <rte_interrupts.h>
23 #include <mlx5_glue.h>
24 #include <mlx5_devx_cmds.h>
28 #define MLX5_VDPA_INTR_RETRIES 256
29 #define MLX5_VDPA_INTR_RETRIES_USEC 1000
31 #ifndef VIRTIO_F_ORDER_PLATFORM
32 #define VIRTIO_F_ORDER_PLATFORM 36
35 #ifndef VIRTIO_F_RING_PACKED
36 #define VIRTIO_F_RING_PACKED 34
39 #define MLX5_VDPA_DEFAULT_TIMER_DELAY_US 0u
40 #define MLX5_VDPA_DEFAULT_TIMER_STEP_US 1u
49 struct mlx5_devx_obj *cq;
50 struct mlx5dv_devx_umem *umem_obj;
52 volatile void *umem_buf;
53 volatile struct mlx5_cqe *cqes;
55 volatile uint32_t *db_rec;
59 struct mlx5_vdpa_event_qp {
60 struct mlx5_vdpa_cq cq;
61 struct mlx5_devx_obj *fw_qp;
62 struct mlx5_devx_obj *sw_qp;
63 struct mlx5dv_devx_umem *umem_obj;
65 volatile uint32_t *db_rec;
68 struct mlx5_vdpa_query_mr {
69 SLIST_ENTRY(mlx5_vdpa_query_mr) next;
72 struct mlx5dv_devx_umem *umem;
73 struct mlx5_devx_obj *mkey;
78 MLX5_VDPA_NOTIFIER_STATE_DISABLED,
79 MLX5_VDPA_NOTIFIER_STATE_ENABLED,
80 MLX5_VDPA_NOTIFIER_STATE_ERR
83 struct mlx5_vdpa_virtq {
84 SLIST_ENTRY(mlx5_vdpa_virtq) next;
88 uint8_t notifier_state;
91 struct mlx5_vdpa_priv *priv;
92 struct mlx5_devx_obj *virtq;
93 struct mlx5_devx_obj *counters;
94 struct mlx5_vdpa_event_qp eqp;
96 struct mlx5dv_devx_umem *obj;
100 struct rte_intr_handle intr_handle;
101 uint64_t err_time[3]; /* RDTSC time of recent errors. */
103 struct mlx5_devx_virtio_q_couners_attr reset;
106 struct mlx5_vdpa_steer {
107 struct mlx5_devx_obj *rqt;
111 struct mlx5dv_flow_matcher *matcher;
112 struct mlx5_devx_obj *tir;
119 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER,
120 MLX5_VDPA_EVENT_MODE_FIXED_TIMER,
121 MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT
124 struct mlx5_vdpa_priv {
125 TAILQ_ENTRY(mlx5_vdpa_priv) next;
127 pthread_mutex_t vq_config_lock;
128 uint64_t last_traffic_tic;
130 pthread_mutex_t timer_lock;
131 pthread_cond_t timer_cond;
132 volatile uint8_t timer_on;
134 int event_core; /* Event thread cpu affinity core. */
136 uint32_t timer_delay_us;
137 uint32_t no_traffic_time_s;
138 uint8_t hw_latency_mode; /* Hardware CQ moderation mode. */
139 uint16_t hw_max_latency_us; /* Hardware CQ moderation period in usec. */
140 uint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */
141 struct rte_vdpa_device *vdev; /* vDPA device. */
142 int vid; /* vhost device id. */
143 struct ibv_context *ctx; /* Device context. */
144 struct rte_pci_device *pci_dev;
145 struct mlx5_hca_vdpa_attr caps;
146 uint32_t pdn; /* Protection Domain number. */
148 uint32_t gpa_mkey_index;
149 struct ibv_mr *null_mr;
150 struct rte_vhost_memory *vmem;
152 struct mlx5dv_devx_event_channel *eventc;
153 struct mlx5dv_devx_event_channel *err_chnl;
154 struct mlx5dv_devx_uar *uar;
155 struct rte_intr_handle intr_handle;
156 struct rte_intr_handle err_intr_handle;
157 struct mlx5_devx_obj *td;
158 struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
160 uint8_t num_lag_ports;
161 uint64_t features; /* Negotiated features. */
162 uint16_t log_max_rqt_size;
163 struct mlx5_vdpa_steer steer;
164 struct mlx5dv_var *var;
166 SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
167 struct mlx5_vdpa_virtq virtqs[];
171 MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
172 MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
173 MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
174 MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
175 MLX5_VDPA_STATS_INVALID_BUFFER,
176 MLX5_VDPA_STATS_COMPLETION_ERRORS,
181 * Check whether virtq is for traffic receive.
182 * According to VIRTIO_NET Spec the virtqueues index identity its type by:
187 * 2(N-1)+1 transmitqN
190 static inline uint8_t
191 is_virtq_recvq(int virtq_index, int nr_vring)
193 if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
199 * Release all the prepared memory regions and all their related resources.
202 * The vdpa driver private structure.
204 void mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv);
207 * Register all the memory regions of the virtio device to the HW and allocate
208 * all their related resources.
211 * The vdpa driver private structure.
214 * 0 on success, a negative errno value otherwise and rte_errno is set.
216 int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
220 * Create an event QP and all its related resources.
223 * The vdpa driver private structure.
225 * Number of descriptors.
227 * The guest notification file descriptor.
229 * Pointer to the event QP structure.
232 * 0 on success, -1 otherwise and rte_errno is set.
234 int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
235 int callfd, struct mlx5_vdpa_event_qp *eqp);
238 * Destroy an event QP and all its related resources.
241 * Pointer to the event QP structure.
243 void mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp);
246 * Release all the event global resources.
249 * The vdpa driver private structure.
251 void mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv);
257 * The vdpa driver private structure.
260 * 0 on success, a negative errno value otherwise and rte_errno is set.
262 int mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv);
268 * The vdpa driver private structure.
270 void mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv);
273 * Setup error interrupt handler.
276 * The vdpa driver private structure.
279 * 0 on success, a negative errno value otherwise and rte_errno is set.
281 int mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv);
284 * Unset error event handler.
287 * The vdpa driver private structure.
289 void mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv);
292 * Release a virtq and all its related resources.
295 * The vdpa driver private structure.
297 void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv);
300 * Create all the HW virtqs resources and all their related resources.
303 * The vdpa driver private structure.
306 * 0 on success, a negative errno value otherwise and rte_errno is set.
308 int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv);
311 * Enable\Disable virtq..
314 * The vdpa driver private structure.
318 * Set to enable, otherwise disable.
321 * 0 on success, a negative value otherwise.
323 int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
326 * Unset steering and release all its related resources- stop traffic.
329 * The vdpa driver private structure.
331 void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
334 * Update steering according to the received queues status.
337 * The vdpa driver private structure.
340 * 0 on success, a negative value otherwise.
342 int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);
345 * Setup steering and all its related resources to enable RSS traffic from the
346 * device to all the Rx host queues.
349 * The vdpa driver private structure.
352 * 0 on success, a negative value otherwise.
354 int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv);
357 * Enable\Disable live migration logging.
360 * The vdpa driver private structure.
362 * Set for enable, unset for disable.
365 * 0 on success, a negative value otherwise.
367 int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable);
370 * Set dirty bitmap logging to allow live migration.
373 * The vdpa driver private structure.
374 * @param[in] log_base
376 * @param[in] log_size
380 * 0 on success, a negative value otherwise.
382 int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
386 * Log all virtqs information for live migration.
389 * The vdpa driver private structure.
391 * Set for enable, unset for disable.
394 * 0 on success, a negative value otherwise.
396 int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv);
399 * Modify virtq state to be ready or suspend.
402 * The vdpa driver private virtq structure.
404 * Set for ready, otherwise suspend.
407 * 0 on success, a negative value otherwise.
409 int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
412 * Stop virtq before destroying it.
415 * The vdpa driver private structure.
420 * 0 on success, a negative value otherwise.
422 int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
425 * Query virtq information.
428 * The vdpa driver private structure.
433 * 0 on success, a negative value otherwise.
435 int mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index);
438 * Get virtq statistics.
441 * The vdpa driver private structure.
445 * The virtq statistics array to fill.
447 * The number of elements in @p stats array.
450 * A negative value on error, otherwise the number of entries filled in the
454 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
455 struct rte_vdpa_stat *stats, unsigned int n);
458 * Reset virtq statistics.
461 * The vdpa driver private structure.
466 * A negative value on error, otherwise 0.
469 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
470 #endif /* RTE_PMD_MLX5_VDPA_H_ */