1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
5 #ifndef RTE_PMD_MLX5_VDPA_H_
6 #define RTE_PMD_MLX5_VDPA_H_
8 #include <linux/virtio_net.h>
12 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <rte_vhost.h>
17 #pragma GCC diagnostic error "-Wpedantic"
19 #include <rte_spinlock.h>
20 #include <rte_interrupts.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_devx_cmds.h>
27 #define MLX5_VDPA_INTR_RETRIES 256
28 #define MLX5_VDPA_INTR_RETRIES_USEC 1000
30 #ifndef VIRTIO_F_ORDER_PLATFORM
31 #define VIRTIO_F_ORDER_PLATFORM 36
34 #ifndef VIRTIO_F_RING_PACKED
35 #define VIRTIO_F_RING_PACKED 34
44 struct mlx5_devx_obj *cq;
45 struct mlx5dv_devx_umem *umem_obj;
47 volatile void *umem_buf;
48 volatile struct mlx5_cqe *cqes;
50 volatile uint32_t *db_rec;
54 struct mlx5_vdpa_event_qp {
55 struct mlx5_vdpa_cq cq;
56 struct mlx5_devx_obj *fw_qp;
57 struct mlx5_devx_obj *sw_qp;
58 struct mlx5dv_devx_umem *umem_obj;
60 volatile uint32_t *db_rec;
63 struct mlx5_vdpa_query_mr {
64 SLIST_ENTRY(mlx5_vdpa_query_mr) next;
67 struct mlx5dv_devx_umem *umem;
68 struct mlx5_devx_obj *mkey;
72 struct mlx5_vdpa_virtq {
73 SLIST_ENTRY(mlx5_vdpa_virtq) next;
77 struct mlx5_vdpa_priv *priv;
78 struct mlx5_devx_obj *virtq;
79 struct mlx5_devx_obj *counters;
80 struct mlx5_vdpa_event_qp eqp;
82 struct mlx5dv_devx_umem *obj;
86 struct rte_intr_handle intr_handle;
87 struct mlx5_devx_virtio_q_couners_attr reset;
90 struct mlx5_vdpa_steer {
91 struct mlx5_devx_obj *rqt;
95 struct mlx5dv_flow_matcher *matcher;
96 struct mlx5_devx_obj *tir;
102 struct mlx5_vdpa_priv {
103 TAILQ_ENTRY(mlx5_vdpa_priv) next;
105 uint8_t direct_notifier; /* Whether direct notifier is on or off. */
106 int id; /* vDPA device id. */
107 int vid; /* vhost device id. */
108 struct ibv_context *ctx; /* Device context. */
109 struct rte_vdpa_dev_addr dev_addr;
110 struct mlx5_hca_vdpa_attr caps;
111 uint32_t pdn; /* Protection Domain number. */
113 uint32_t gpa_mkey_index;
114 struct ibv_mr *null_mr;
115 struct rte_vhost_memory *vmem;
117 struct mlx5dv_devx_event_channel *eventc;
118 struct mlx5dv_devx_uar *uar;
119 struct rte_intr_handle intr_handle;
120 struct mlx5_devx_obj *td;
121 struct mlx5_devx_obj *tis;
123 uint64_t features; /* Negotiated features. */
124 uint16_t log_max_rqt_size;
125 struct mlx5_vdpa_steer steer;
126 struct mlx5dv_var *var;
128 SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
129 struct mlx5_vdpa_virtq virtqs[];
133 MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
134 MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
135 MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
136 MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
137 MLX5_VDPA_STATS_INVALID_BUFFER,
138 MLX5_VDPA_STATS_COMPLETION_ERRORS,
143 * Check whether virtq is for traffic receive.
144 * According to VIRTIO_NET Spec the virtqueues index identity its type by:
149 * 2(N-1)+1 transmitqN
152 static inline uint8_t
153 is_virtq_recvq(int virtq_index, int nr_vring)
155 if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
161 * Release all the prepared memory regions and all their related resources.
164 * The vdpa driver private structure.
166 void mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv);
169 * Register all the memory regions of the virtio device to the HW and allocate
170 * all their related resources.
173 * The vdpa driver private structure.
176 * 0 on success, a negative errno value otherwise and rte_errno is set.
178 int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
182 * Create an event QP and all its related resources.
185 * The vdpa driver private structure.
187 * Number of descriptors.
189 * The guest notification file descriptor.
191 * Pointer to the event QP structure.
194 * 0 on success, -1 otherwise and rte_errno is set.
196 int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
197 int callfd, struct mlx5_vdpa_event_qp *eqp);
200 * Destroy an event QP and all its related resources.
203 * Pointer to the event QP structure.
205 void mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp);
208 * Release all the event global resources.
211 * The vdpa driver private structure.
213 void mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv);
219 * The vdpa driver private structure.
222 * 0 on success, a negative errno value otherwise and rte_errno is set.
224 int mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv);
230 * The vdpa driver private structure.
232 void mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv);
235 * Release a virtq and all its related resources.
238 * The vdpa driver private structure.
240 void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv);
243 * Create all the HW virtqs resources and all their related resources.
246 * The vdpa driver private structure.
249 * 0 on success, a negative errno value otherwise and rte_errno is set.
251 int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv);
254 * Enable\Disable virtq..
257 * The vdpa driver private structure.
261 * Set to enable, otherwise disable.
264 * 0 on success, a negative value otherwise.
266 int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
269 * Unset steering and release all its related resources- stop traffic.
272 * The vdpa driver private structure.
274 void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
277 * Update steering according to the received queues status.
280 * The vdpa driver private structure.
283 * 0 on success, a negative value otherwise.
285 int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);
288 * Setup steering and all its related resources to enable RSS traffic from the
289 * device to all the Rx host queues.
292 * The vdpa driver private structure.
295 * 0 on success, a negative value otherwise.
297 int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv);
300 * Enable\Disable live migration logging.
303 * The vdpa driver private structure.
305 * Set for enable, unset for disable.
308 * 0 on success, a negative value otherwise.
310 int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable);
313 * Set dirty bitmap logging to allow live migration.
316 * The vdpa driver private structure.
317 * @param[in] log_base
319 * @param[in] log_size
323 * 0 on success, a negative value otherwise.
325 int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
329 * Log all virtqs information for live migration.
332 * The vdpa driver private structure.
334 * Set for enable, unset for disable.
337 * 0 on success, a negative value otherwise.
339 int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv);
342 * Modify virtq state to be ready or suspend.
345 * The vdpa driver private virtq structure.
347 * Set for ready, otherwise suspend.
350 * 0 on success, a negative value otherwise.
352 int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
355 * Stop virtq before destroying it.
358 * The vdpa driver private structure.
363 * 0 on success, a negative value otherwise.
365 int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
368 * Get virtq statistics.
371 * The vdpa driver private structure.
375 * The virtq statistics array to fill.
377 * The number of elements in @p stats array.
380 * A negative value on error, otherwise the number of entries filled in the
384 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
385 struct rte_vdpa_stat *stats, unsigned int n);
388 * Reset virtq statistics.
391 * The vdpa driver private structure.
396 * A negative value on error, otherwise 0.
399 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
400 #endif /* RTE_PMD_MLX5_VDPA_H_ */