vdpa_attr->max_num_virtio_queues =
                        MLX5_GET(virtio_emulation_cap, hcattr,
                                 max_num_virtio_queues);
-               vdpa_attr->umem_1_buffer_param_a =
-                       MLX5_GET(virtio_emulation_cap, hcattr,
-                                umem_1_buffer_param_a);
-               vdpa_attr->umem_1_buffer_param_b =
-                       MLX5_GET(virtio_emulation_cap, hcattr,
-                                umem_1_buffer_param_b);
-               vdpa_attr->umem_2_buffer_param_a =
-                       MLX5_GET(virtio_emulation_cap, hcattr,
-                                umem_2_buffer_param_a);
-               vdpa_attr->umem_2_buffer_param_b =
-                       MLX5_GET(virtio_emulation_cap, hcattr,
-                                umem_2_buffer_param_a);
-               vdpa_attr->umem_3_buffer_param_a =
-                       MLX5_GET(virtio_emulation_cap, hcattr,
-                                umem_3_buffer_param_a);
-               vdpa_attr->umem_3_buffer_param_b =
-                       MLX5_GET(virtio_emulation_cap, hcattr,
-                                umem_3_buffer_param_b);
+               vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr,
+                                                umem_1_buffer_param_a);
+               vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr,
+                                                umem_1_buffer_param_b);
+               vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr,
+                                                umem_2_buffer_param_a);
+               vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr,
+                                                umem_2_buffer_param_b);
+               vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr,
+                                                umem_3_buffer_param_a);
+               vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr,
+                                                umem_3_buffer_param_b);
        }
 }
 
        cq_obj->id = MLX5_GET(create_cq_out, out, cqn);
        return cq_obj;
 }
+
+/**
+ * Create VIRTQ using DevX API.
+ *
+ * @param[in] ctx
+ *   ibv_context returned from mlx5dv_open_device.
+ * @param [in] attr
+ *   Pointer to VIRTQ attributes structure.
+ *
+ * @return
+ *   The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_virtq(struct ibv_context *ctx,
+                          struct mlx5_devx_virtq_attr *attr)
+{
+       uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
+       uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+       struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__,
+                                                    sizeof(*virtq_obj), 0);
+       void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
+       void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
+       void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
+
+       if (!virtq_obj) {
+               DRV_LOG(ERR, "Failed to allocate virtq data.");
+               rte_errno = ENOMEM;
+               return NULL;
+       }
+       MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+                MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+                MLX5_GENERAL_OBJ_TYPE_VIRTQ);
+       MLX5_SET16(virtio_net_q, virtq, hw_available_index,
+                  attr->hw_available_index);
+       MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index);
+       MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4);
+       MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6);
+       MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum);
+       MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum);
+       MLX5_SET16(virtio_q, virtctx, virtio_version_1_0,
+                  attr->virtio_version_1_0);
+       MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode);
+       MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id);
+       MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr);
+       MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr);
+       MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr);
+       MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index);
+       MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size);
+       MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey);
+       MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id);
+       MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size);
+       MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset);
+       MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id);
+       MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size);
+       MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset);
+       MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id);
+       MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size);
+       MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset);
+       MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id);
+       virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+                                                   sizeof(out));
+       if (!virtq_obj->obj) {
+               rte_errno = errno;
+               DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX.");
+               rte_free(virtq_obj);
+               return NULL;
+       }
+       virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+       return virtq_obj;
+}
+
+/**
+ * Modify VIRTQ using DevX API.
+ *
+ * @param[in] virtq_obj
+ *   Pointer to virtq object structure.
+ * @param [in] attr
+ *   Pointer to modify virtq attributes structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj,
+                          struct mlx5_devx_virtq_attr *attr)
+{
+       uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
+       uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+       void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
+       void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
+       void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
+       int ret;
+
+       MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+                MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+                MLX5_GENERAL_OBJ_TYPE_VIRTQ);
+       MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id);
+       MLX5_SET64(virtio_net_q, virtq, modify_field_select, attr->type);
+       MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index);
+       switch (attr->type) {
+       case MLX5_VIRTQ_MODIFY_TYPE_STATE:
+               MLX5_SET16(virtio_net_q, virtq, state, attr->state);
+               break;
+       case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS:
+               MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey,
+                        attr->dirty_bitmap_mkey);
+               MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr,
+                        attr->dirty_bitmap_addr);
+               MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size,
+                        attr->dirty_bitmap_size);
+               break;
+       case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE:
+               MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable,
+                        attr->dirty_bitmap_dump_enable);
+               break;
+       default:
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
+       ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in),
+                                        out, sizeof(out));
+       if (ret) {
+               DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
+               rte_errno = errno;
+               return -errno;
+       }
+       return ret;
+}
+
+/**
+ * Query VIRTQ using DevX API.
+ *
+ * @param[in] virtq_obj
+ *   Pointer to virtq object structure.
+ * @param [in/out] attr
+ *   Pointer to virtq attributes structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj,
+                          struct mlx5_devx_virtq_attr *attr)
+{
+       uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
+       uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0};
+       void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr);
+       void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq);
+       int ret;
+
+       MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+                MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+                MLX5_GENERAL_OBJ_TYPE_VIRTQ);
+       MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id);
+       ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in),
+                                        out, sizeof(out));
+       if (ret) {
+               DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
+               rte_errno = errno;
+               return -errno;
+       }
+       attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq,
+                                             hw_available_index);
+       attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index);
+       return ret;
+}
 
 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - \
                                    (__mlx5_bit_off(typ, fld) & 0xf))
 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << \
+                                 __mlx5_16_bit_off(typ, fld))
 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
                        rte_cpu_to_be_64(v); \
        } while (0)
 
+#define MLX5_SET16(typ, p, fld, v) \
+       do { \
+               u16 _v = v; \
+               *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
+               rte_cpu_to_be_16((rte_be_to_cpu_16(*((__be16 *)(p) + \
+                                 __mlx5_16_off(typ, fld))) & \
+                                 (~__mlx5_16_mask(typ, fld))) | \
+                                (((_v) & __mlx5_mask16(typ, fld)) << \
+                                 __mlx5_16_bit_off(typ, fld))); \
+       } while (0)
+
 #define MLX5_GET(typ, p, fld) \
        ((rte_be_to_cpu_32(*((__be32 *)(p) +\
        __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
        MLX5_CMD_OP_CREATE_RQT = 0x916,
        MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
        MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
+       MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
+       MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01,
+       MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02,
 };
 
 enum {
        struct mlx5_ifc_tirc_bits ctx;
 };
 
+enum {
+       MLX5_INLINE_Q_TYPE_RQ = 0x0,
+       MLX5_INLINE_Q_TYPE_VIRTQ = 0x1,
+};
+
 struct mlx5_ifc_rq_num_bits {
        u8 reserved_at_0[0x8];
        u8 rq_num[0x18];
        u8 pas[];
 };
 
+enum {
+       MLX5_GENERAL_OBJ_TYPE_VIRTQ = 0x000d,
+};
+
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+       u8 opcode[0x10];
+       u8 reserved_at_10[0x20];
+       u8 obj_type[0x10];
+       u8 obj_id[0x20];
+       u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+       u8 status[0x8];
+       u8 reserved_at_8[0x18];
+       u8 syndrome[0x20];
+       u8 obj_id[0x20];
+       u8 reserved_at_60[0x20];
+};
+
+enum {
+       MLX5_VIRTQ_STATE_INIT = 0,
+       MLX5_VIRTQ_STATE_RDY = 1,
+       MLX5_VIRTQ_STATE_SUSPEND = 2,
+       MLX5_VIRTQ_STATE_ERROR = 3,
+};
+
+enum {
+       MLX5_VIRTQ_MODIFY_TYPE_STATE = (1UL << 0),
+       MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS = (1UL << 3),
+       MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE = (1UL << 4),
+};
+
+struct mlx5_ifc_virtio_q_bits {
+       u8 virtio_q_type[0x8];
+       u8 reserved_at_8[0x5];
+       u8 event_mode[0x3];
+       u8 queue_index[0x10];
+       u8 full_emulation[0x1];
+       u8 virtio_version_1_0[0x1];
+       u8 reserved_at_22[0x2];
+       u8 offload_type[0x4];
+       u8 event_qpn_or_msix[0x18];
+       u8 doorbell_stride_idx[0x10];
+       u8 queue_size[0x10];
+       u8 device_emulation_id[0x20];
+       u8 desc_addr[0x40];
+       u8 used_addr[0x40];
+       u8 available_addr[0x40];
+       u8 virtio_q_mkey[0x20];
+       u8 reserved_at_160[0x20];
+       u8 umem_1_id[0x20];
+       u8 umem_1_size[0x20];
+       u8 umem_1_offset[0x40];
+       u8 umem_2_id[0x20];
+       u8 umem_2_size[0x20];
+       u8 umem_2_offset[0x40];
+       u8 umem_3_id[0x20];
+       u8 umem_3_size[0x20];
+       u8 umem_3_offset[0x40];
+       u8 reserved_at_300[0x100];
+};
+
+struct mlx5_ifc_virtio_net_q_bits {
+       u8 modify_field_select[0x40];
+       u8 reserved_at_40[0x40];
+       u8 tso_ipv4[0x1];
+       u8 tso_ipv6[0x1];
+       u8 tx_csum[0x1];
+       u8 rx_csum[0x1];
+       u8 reserved_at_84[0x6];
+       u8 dirty_bitmap_dump_enable[0x1];
+       u8 vhost_log_page[0x5];
+       u8 reserved_at_90[0xc];
+       u8 state[0x4];
+       u8 error_type[0x8];
+       u8 tisn_or_qpn[0x18];
+       u8 dirty_bitmap_mkey[0x20];
+       u8 dirty_bitmap_size[0x20];
+       u8 dirty_bitmap_addr[0x40];
+       u8 hw_available_index[0x10];
+       u8 hw_used_index[0x10];
+       u8 reserved_at_160[0xa0];
+       struct mlx5_ifc_virtio_q_bits virtio_q_context;
+};
+
+struct mlx5_ifc_create_virtq_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_virtio_net_q_bits virtq;
+};
+
+struct mlx5_ifc_query_virtq_out_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_virtio_net_q_bits virtq;
+};
+
 /* CQE format mask. */
 #define MLX5E_CQE_FORMAT_MASK 0xc