1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
5 #ifndef RTE_PMD_MLX5_VDPA_H_
6 #define RTE_PMD_MLX5_VDPA_H_
8 #include <linux/virtio_net.h>
12 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <vdpa_driver.h>
16 #include <rte_vhost.h>
18 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_spinlock.h>
21 #include <rte_interrupts.h>
23 #include <mlx5_glue.h>
24 #include <mlx5_devx_cmds.h>
25 #include <mlx5_common_devx.h>
29 #define MLX5_VDPA_INTR_RETRIES 256
30 #define MLX5_VDPA_INTR_RETRIES_USEC 1000
32 #ifndef VIRTIO_F_ORDER_PLATFORM
33 #define VIRTIO_F_ORDER_PLATFORM 36
36 #ifndef VIRTIO_F_RING_PACKED
37 #define VIRTIO_F_RING_PACKED 34
40 #define MLX5_VDPA_DEFAULT_TIMER_DELAY_US 0u
41 #define MLX5_VDPA_DEFAULT_TIMER_STEP_US 1u
50 struct mlx5_devx_cq cq_obj;
54 struct mlx5_vdpa_event_qp {
55 struct mlx5_vdpa_cq cq;
56 struct mlx5_devx_obj *fw_qp;
57 struct mlx5_devx_qp sw_qp;
60 struct mlx5_vdpa_query_mr {
61 SLIST_ENTRY(mlx5_vdpa_query_mr) next;
64 struct mlx5_devx_obj *mkey;
70 MLX5_VDPA_NOTIFIER_STATE_DISABLED,
71 MLX5_VDPA_NOTIFIER_STATE_ENABLED,
72 MLX5_VDPA_NOTIFIER_STATE_ERR
75 struct mlx5_vdpa_virtq {
76 SLIST_ENTRY(mlx5_vdpa_virtq) next;
80 uint8_t notifier_state;
83 struct mlx5_vdpa_priv *priv;
84 struct mlx5_devx_obj *virtq;
85 struct mlx5_devx_obj *counters;
86 struct mlx5_vdpa_event_qp eqp;
88 struct mlx5dv_devx_umem *obj;
92 struct rte_intr_handle *intr_handle;
93 uint64_t err_time[3]; /* RDTSC time of recent errors. */
95 struct mlx5_devx_virtio_q_couners_attr stats;
96 struct mlx5_devx_virtio_q_couners_attr reset;
99 struct mlx5_vdpa_steer {
100 struct mlx5_devx_obj *rqt;
104 struct mlx5dv_flow_matcher *matcher;
105 struct mlx5_devx_obj *tir;
112 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER,
113 MLX5_VDPA_EVENT_MODE_FIXED_TIMER,
114 MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT
117 enum mlx5_dev_state {
118 MLX5_VDPA_STATE_PROBED = 0,
119 MLX5_VDPA_STATE_CONFIGURED,
120 MLX5_VDPA_STATE_IN_PROGRESS /* Shutting down. */
123 struct mlx5_vdpa_priv {
124 TAILQ_ENTRY(mlx5_vdpa_priv) next;
126 enum mlx5_dev_state state;
127 pthread_mutex_t vq_config_lock;
128 uint64_t no_traffic_counter;
131 int event_core; /* Event thread cpu affinity core. */
133 uint32_t timer_delay_us;
134 uint32_t no_traffic_max;
135 uint8_t hw_latency_mode; /* Hardware CQ moderation mode. */
136 uint16_t hw_max_latency_us; /* Hardware CQ moderation period in usec. */
137 uint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */
138 uint16_t queue_size; /* virtq depth for pre-creating virtq resource */
139 uint16_t queues; /* Max virtq pair for pre-creating virtq resource */
140 struct rte_vdpa_device *vdev; /* vDPA device. */
141 struct mlx5_common_device *cdev; /* Backend mlx5 device. */
142 int vid; /* vhost device id. */
143 struct mlx5_hca_vdpa_attr caps;
144 uint32_t gpa_mkey_index;
145 struct ibv_mr *null_mr;
146 struct rte_vhost_memory *vmem;
147 struct mlx5dv_devx_event_channel *eventc;
148 struct mlx5dv_devx_event_channel *err_chnl;
150 struct rte_intr_handle *err_intr_handle;
151 struct mlx5_devx_obj *td;
152 struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
154 uint8_t num_lag_ports;
155 uint64_t features; /* Negotiated features. */
156 uint16_t log_max_rqt_size;
157 struct mlx5_vdpa_steer steer;
158 struct mlx5dv_var *var;
160 struct mlx5_pmd_wrapped_mr lm_mr;
161 SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
162 struct mlx5_vdpa_virtq virtqs[];
166 MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
167 MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
168 MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
169 MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
170 MLX5_VDPA_STATS_INVALID_BUFFER,
171 MLX5_VDPA_STATS_COMPLETION_ERRORS,
176 * Check whether virtq is for traffic receive.
177 * According to VIRTIO_NET Spec the virtqueues index identity its type by:
182 * 2(N-1)+1 transmitqN
185 static inline uint8_t
186 is_virtq_recvq(int virtq_index, int nr_vring)
188 if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
194 * Release all the prepared memory regions and all their related resources.
197 * The vdpa driver private structure.
199 void mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv);
202 * Register all the memory regions of the virtio device to the HW and allocate
203 * all their related resources.
206 * The vdpa driver private structure.
209 * 0 on success, a negative errno value otherwise and rte_errno is set.
211 int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
215 * Create an event QP and all its related resources.
218 * The vdpa driver private structure.
220 * Number of descriptors.
222 * The guest notification file descriptor.
224 * Pointer to the event QP structure.
227 * 0 on success, -1 otherwise and rte_errno is set.
229 int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
230 int callfd, struct mlx5_vdpa_event_qp *eqp);
233 * Destroy an event QP and all its related resources.
236 * Pointer to the event QP structure.
238 void mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp);
241 * Create all the event global resources.
244 * The vdpa driver private structure.
247 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv);
250 * Release all the event global resources.
253 * The vdpa driver private structure.
255 void mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv);
261 * The vdpa driver private structure.
264 * 0 on success, a negative errno value otherwise and rte_errno is set.
266 int mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv);
272 * The vdpa driver private structure.
274 void mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv);
277 * Setup error interrupt handler.
280 * The vdpa driver private structure.
283 * 0 on success, a negative errno value otherwise and rte_errno is set.
285 int mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv);
288 * Unset error event handler.
291 * The vdpa driver private structure.
293 void mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv);
296 * Release virtqs and resources except that to be reused.
299 * The vdpa driver private structure.
301 void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv);
304 * Cleanup cached resources of all virtqs.
307 * The vdpa driver private structure.
309 void mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv);
312 * Create all the HW virtqs resources and all their related resources.
315 * The vdpa driver private structure.
318 * 0 on success, a negative errno value otherwise and rte_errno is set.
320 int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv);
323 * Enable\Disable virtq..
326 * The vdpa driver private structure.
330 * Set to enable, otherwise disable.
333 * 0 on success, a negative value otherwise.
335 int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
338 * Unset steering - stop traffic.
341 * The vdpa driver private structure.
343 void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
346 * Update steering according to the received queues status.
349 * The vdpa driver private structure.
352 * 0 on success, a negative value otherwise.
354 int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);
357 * Setup steering and all its related resources to enable RSS traffic from the
358 * device to all the Rx host queues.
361 * The vdpa driver private structure.
364 * 0 on success, a negative value otherwise.
366 int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv);
369 * Enable\Disable live migration logging.
372 * The vdpa driver private structure.
374 * Set for enable, unset for disable.
377 * 0 on success, a negative value otherwise.
379 int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable);
382 * Set dirty bitmap logging to allow live migration.
385 * The vdpa driver private structure.
386 * @param[in] log_base
388 * @param[in] log_size
392 * 0 on success, a negative value otherwise.
394 int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
398 * Log all virtqs information for live migration.
401 * The vdpa driver private structure.
403 * Set for enable, unset for disable.
406 * 0 on success, a negative value otherwise.
408 int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv);
411 * Modify virtq state to be ready or suspend.
414 * The vdpa driver private virtq structure.
416 * Set for ready, otherwise suspend.
419 * 0 on success, a negative value otherwise.
421 int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
424 * Stop virtq before destroying it.
427 * The vdpa driver private structure.
432 * 0 on success, a negative value otherwise.
434 int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
437 * Query virtq information.
440 * The vdpa driver private structure.
445 * 0 on success, a negative value otherwise.
447 int mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index);
450 * Get virtq statistics.
453 * The vdpa driver private structure.
457 * The virtq statistics array to fill.
459 * The number of elements in @p stats array.
462 * A negative value on error, otherwise the number of entries filled in the
466 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
467 struct rte_vdpa_stat *stats, unsigned int n);
470 * Reset virtq statistics.
473 * The vdpa driver private structure.
478 * A negative value on error, otherwise 0.
481 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
482 #endif /* RTE_PMD_MLX5_VDPA_H_ */