struct mlx5_vdpa_event_qp {
struct mlx5_vdpa_cq cq;
struct mlx5_devx_obj *fw_qp;
- struct mlx5_devx_obj *sw_qp;
- struct mlx5dv_devx_umem *umem_obj;
- void *umem_buf;
- volatile uint32_t *db_rec;
+ struct mlx5_devx_qp sw_qp;
};
struct mlx5_vdpa_query_mr {
void *buf;
uint32_t size;
} umems[3];
- struct rte_intr_handle intr_handle;
+ struct rte_intr_handle *intr_handle;
uint64_t err_time[3]; /* RDTSC time of recent errors. */
uint32_t n_retry;
struct mlx5_devx_virtio_q_couners_attr reset;
uint16_t hw_max_latency_us; /* Hardware CQ moderation period in usec. */
uint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */
struct rte_vdpa_device *vdev; /* vDPA device. */
+ struct mlx5_common_device *cdev; /* Backend mlx5 device. */
int vid; /* vhost device id. */
- struct ibv_context *ctx; /* Device context. */
struct mlx5_hca_vdpa_attr caps;
- uint32_t pdn; /* Protection Domain number. */
- struct ibv_pd *pd;
uint32_t gpa_mkey_index;
struct ibv_mr *null_mr;
struct rte_vhost_memory *vmem;
struct mlx5dv_devx_event_channel *eventc;
struct mlx5dv_devx_event_channel *err_chnl;
struct mlx5dv_devx_uar *uar;
- struct rte_intr_handle err_intr_handle;
+ struct rte_intr_handle *err_intr_handle;
struct mlx5_devx_obj *td;
struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
uint16_t nr_virtqs;
uint8_t num_lag_ports;
- uint8_t qp_ts_format;
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
struct mlx5_vdpa_steer steer;