1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
6 #include <sys/eventfd.h>
8 #include <rte_malloc.h>
12 #include <mlx5_common.h>
14 #include "mlx5_vdpa_utils.h"
15 #include "mlx5_vdpa.h"
19 mlx5_vdpa_virtq_kick_handler(void *cb_arg)
21 struct mlx5_vdpa_virtq *virtq = cb_arg;
22 struct mlx5_vdpa_priv *priv = virtq->priv;
27 if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
28 DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
29 priv->vid, virtq->index);
32 if (rte_intr_fd_get(virtq->intr_handle) < 0)
34 for (retry = 0; retry < 3; ++retry) {
35 nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
39 errno == EWOULDBLOCK ||
42 DRV_LOG(ERR, "Failed to read kickfd of virtq %d: %s",
43 virtq->index, strerror(errno));
49 rte_write32(virtq->index, priv->virtq_db_addr);
50 if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
51 DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
52 priv->vid, virtq->index);
55 if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
56 if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
57 virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_ERR;
59 virtq->notifier_state =
60 MLX5_VDPA_NOTIFIER_STATE_ENABLED;
61 DRV_LOG(INFO, "Virtq %u notifier state is %s.", virtq->index,
62 virtq->notifier_state ==
63 MLX5_VDPA_NOTIFIER_STATE_ENABLED ? "enabled" :
66 DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
69 /* Release cached VQ resources. */
71 mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
75 for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
76 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
78 virtq->configured = 0;
79 for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
80 if (virtq->umems[j].obj) {
81 claim_zero(mlx5_glue->devx_umem_dereg
82 (virtq->umems[j].obj));
83 virtq->umems[j].obj = NULL;
85 if (virtq->umems[j].buf) {
86 rte_free(virtq->umems[j].buf);
87 virtq->umems[j].buf = NULL;
89 virtq->umems[j].size = 0;
92 mlx5_vdpa_event_qp_destroy(&virtq->eqp);
97 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
101 if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
102 while (ret == -EAGAIN) {
103 ret = rte_intr_callback_unregister(virtq->intr_handle,
104 mlx5_vdpa_virtq_kick_handler, virtq);
105 if (ret == -EAGAIN) {
106 DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
107 rte_intr_fd_get(virtq->intr_handle),
109 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
112 rte_intr_fd_set(virtq->intr_handle, -1);
114 rte_intr_instance_free(virtq->intr_handle);
115 if (virtq->configured) {
116 ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
118 DRV_LOG(WARNING, "Failed to stop virtq %d.",
120 virtq->configured = 0;
121 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
124 virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
129 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
133 for (i = 0; i < priv->nr_virtqs; i++)
134 mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
140 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
142 struct mlx5_devx_virtq_attr attr = {
143 .mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_STATE,
144 .state = state ? MLX5_VIRTQ_STATE_RDY :
145 MLX5_VIRTQ_STATE_SUSPEND,
146 .queue_index = virtq->index,
149 return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
153 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
155 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
158 if (virtq->stopped || !virtq->configured)
160 ret = mlx5_vdpa_virtq_modify(virtq, 0);
163 virtq->stopped = true;
164 DRV_LOG(DEBUG, "vid %u virtq %u was stopped.", priv->vid, index);
165 return mlx5_vdpa_virtq_query(priv, index);
169 mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)
171 struct mlx5_devx_virtq_attr attr = {0};
172 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
175 if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
176 DRV_LOG(ERR, "Failed to query virtq %d.", index);
179 DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
180 "hw_used_index=%d", priv->vid, index,
181 attr.hw_available_index, attr.hw_used_index);
182 ret = rte_vhost_set_vring_base(priv->vid, index,
183 attr.hw_available_index,
186 DRV_LOG(ERR, "Failed to set virtq %d base.", index);
189 if (attr.state == MLX5_VIRTQ_STATE_ERROR)
190 DRV_LOG(WARNING, "vid %d vring %d hw error=%hhu",
191 priv->vid, index, attr.error_type);
196 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
198 struct rte_vhost_mem_region *reg;
202 for (i = 0; i < mem->nregions; i++) {
203 reg = &mem->regions[i];
204 if (hva >= reg->host_user_addr &&
205 hva < reg->host_user_addr + reg->size) {
206 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
214 mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
215 struct mlx5_devx_virtq_attr *attr,
216 struct rte_vhost_vring *vq, int index)
218 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
222 uint16_t last_avail_idx = 0;
223 uint16_t last_used_idx = 0;
226 attr->mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_STATE |
227 MLX5_VIRTQ_MODIFY_TYPE_ADDR |
228 MLX5_VIRTQ_MODIFY_TYPE_HW_AVAILABLE_INDEX |
229 MLX5_VIRTQ_MODIFY_TYPE_HW_USED_INDEX |
230 MLX5_VIRTQ_MODIFY_TYPE_VERSION_1_0 |
231 MLX5_VIRTQ_MODIFY_TYPE_Q_TYPE |
232 MLX5_VIRTQ_MODIFY_TYPE_Q_MKEY |
233 MLX5_VIRTQ_MODIFY_TYPE_QUEUE_FEATURE_BIT_MASK |
234 MLX5_VIRTQ_MODIFY_TYPE_EVENT_MODE;
235 attr->tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
236 attr->tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
237 attr->tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
238 attr->rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
239 attr->virtio_version_1_0 =
240 !!(priv->features & (1ULL << VIRTIO_F_VERSION_1));
242 (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
243 MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
245 * No need event QPs creation when the guest in poll mode or when the
246 * capability allows it.
248 attr->event_mode = vq->callfd != -1 ||
249 !(priv->caps.event_mode & (1 << MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
250 MLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
251 if (attr->event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
252 ret = mlx5_vdpa_event_qp_prepare(priv,
253 vq->size, vq->callfd, &virtq->eqp);
256 "Failed to create event QPs for virtq %d.",
260 attr->mod_fields_bitmap |= MLX5_VIRTQ_MODIFY_TYPE_EVENT_MODE;
261 attr->qp_id = virtq->eqp.fw_qp->id;
263 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
264 " need event QPs and event mechanism.", index);
266 if (priv->caps.queue_counters_valid) {
267 if (!virtq->counters)
268 virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
270 if (!virtq->counters) {
271 DRV_LOG(ERR, "Failed to create virtq couners for virtq"
275 attr->counters_obj_id = virtq->counters->id;
277 /* Setup 3 UMEMs for each virtq. */
279 for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
282 struct mlx5dv_devx_umem *obj;
285 priv->caps.umems[i].a * vq->size + priv->caps.umems[i].b;
286 if (virtq->umems[i].size == size &&
287 virtq->umems[i].obj != NULL) {
288 /* Reuse registered memory. */
289 memset(virtq->umems[i].buf, 0, size);
292 if (virtq->umems[i].obj)
293 claim_zero(mlx5_glue->devx_umem_dereg
294 (virtq->umems[i].obj));
295 if (virtq->umems[i].buf)
296 rte_free(virtq->umems[i].buf);
297 virtq->umems[i].size = 0;
298 virtq->umems[i].obj = NULL;
299 virtq->umems[i].buf = NULL;
300 buf = rte_zmalloc(__func__,
303 DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
307 obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx,
308 buf, size, IBV_ACCESS_LOCAL_WRITE);
310 DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
315 virtq->umems[i].size = size;
316 virtq->umems[i].buf = buf;
317 virtq->umems[i].obj = obj;
319 attr->umems[i].id = virtq->umems[i].obj->umem_id;
320 attr->umems[i].offset = 0;
321 attr->umems[i].size = virtq->umems[i].size;
324 if (attr->q_type == MLX5_VIRTQ_TYPE_SPLIT) {
325 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
326 (uint64_t)(uintptr_t)vq->desc);
328 DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
331 attr->desc_addr = gpa;
332 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
333 (uint64_t)(uintptr_t)vq->used);
335 DRV_LOG(ERR, "Failed to get GPA for used ring.");
338 attr->used_addr = gpa;
339 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
340 (uint64_t)(uintptr_t)vq->avail);
342 DRV_LOG(ERR, "Failed to get GPA for available ring.");
345 attr->available_addr = gpa;
347 ret = rte_vhost_get_vring_base(priv->vid,
348 index, &last_avail_idx, &last_used_idx);
352 DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
354 DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
355 "virtq %d.", priv->vid, last_avail_idx,
356 last_used_idx, index);
358 attr->hw_available_index = last_avail_idx;
359 attr->hw_used_index = last_used_idx;
360 attr->q_size = vq->size;
361 attr->mkey = priv->gpa_mkey_index;
362 attr->tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
363 attr->queue_index = index;
364 attr->pd = priv->cdev->pdn;
365 attr->hw_latency_mode = priv->hw_latency_mode;
366 attr->hw_max_latency_us = priv->hw_max_latency_us;
367 attr->hw_max_pending_comp = priv->hw_max_pending_comp;
368 if (attr->hw_latency_mode || attr->hw_max_latency_us ||
369 attr->hw_max_pending_comp)
370 attr->mod_fields_bitmap |= MLX5_VIRTQ_MODIFY_TYPE_QUEUE_PERIOD;
375 mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv)
377 return (priv->caps.vnet_modify_ext &&
378 priv->caps.virtio_net_q_addr_modify &&
379 priv->caps.virtio_q_index_modify) ? true : false;
383 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
385 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
386 struct rte_vhost_vring vq;
387 struct mlx5_devx_virtq_attr attr = {0};
389 uint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;
392 ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
399 ret = mlx5_vdpa_virtq_sub_objs_prepare(priv, &attr,
402 DRV_LOG(ERR, "Failed to setup update virtq attr %d.",
407 virtq->index = index;
408 virtq->vq_size = vq.size;
409 virtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx,
413 attr.mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_STATE;
415 attr.state = MLX5_VIRTQ_STATE_RDY;
416 ret = mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
418 DRV_LOG(ERR, "Failed to modify virtq %d.", index);
421 claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
422 virtq->configured = 1;
423 rte_write32(virtq->index, priv->virtq_db_addr);
424 /* Setup doorbell mapping. */
426 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
427 if (virtq->intr_handle == NULL) {
428 DRV_LOG(ERR, "Fail to allocate intr_handle");
432 if (rte_intr_fd_set(virtq->intr_handle, vq.kickfd))
435 if (rte_intr_fd_get(virtq->intr_handle) == -1) {
436 DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
438 if (rte_intr_type_set(virtq->intr_handle, RTE_INTR_HANDLE_EXT))
441 if (rte_intr_callback_register(virtq->intr_handle,
442 mlx5_vdpa_virtq_kick_handler,
444 rte_intr_fd_set(virtq->intr_handle, -1);
445 DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
449 DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
450 rte_intr_fd_get(virtq->intr_handle),
454 /* Subscribe virtq error event. */
456 cookie = ((uint64_t)virtq->version << 32) + index;
457 ret = mlx5_glue->devx_subscribe_devx_event(priv->err_chnl,
462 DRV_LOG(ERR, "Failed to subscribe device %d virtq %d error event.",
467 virtq->stopped = false;
468 /* Initial notification to ask Qemu handling completed buffers. */
469 if (virtq->eqp.cq.callfd != -1)
470 eventfd_write(virtq->eqp.cq.callfd, (eventfd_t)1);
471 DRV_LOG(DEBUG, "vid %u virtq %u was created successfully.", priv->vid,
475 mlx5_vdpa_virtq_unset(virtq);
480 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
482 if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
483 if (!(priv->caps.virtio_queue_type & (1 <<
484 MLX5_VIRTQ_TYPE_PACKED))) {
485 DRV_LOG(ERR, "Failed to configure PACKED mode for vdev "
486 "%d - it was not reported by HW/driver"
487 " capability.", priv->vid);
491 if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
492 if (!priv->caps.tso_ipv4) {
493 DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
494 " was not reported by HW/driver capability.",
499 if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
500 if (!priv->caps.tso_ipv6) {
501 DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
502 " was not reported by HW/driver capability.",
507 if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
508 if (!priv->caps.tx_csum) {
509 DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
510 " was not reported by HW/driver capability.",
515 if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
516 if (!priv->caps.rx_csum) {
517 DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
518 " GUEST CSUM was not reported by HW/driver "
519 "capability.", priv->vid);
523 if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
524 if (!priv->caps.virtio_version_1_0) {
525 DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
526 "version 1 was not reported by HW/driver"
527 " capability.", priv->vid);
535 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
538 uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
539 int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
541 if (ret || mlx5_vdpa_features_validate(priv)) {
542 DRV_LOG(ERR, "Failed to configure negotiated features.");
545 if ((priv->features & (1ULL << VIRTIO_NET_F_CSUM)) == 0 &&
546 ((priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) > 0 ||
547 (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) > 0)) {
548 /* Packet may be corrupted if TSO is enabled without CSUM. */
549 DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
550 priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
552 if (nr_vring > priv->caps.max_num_virtio_queues) {
553 DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
554 (int)priv->caps.max_num_virtio_queues,
558 priv->nr_virtqs = nr_vring;
559 for (i = 0; i < nr_vring; i++)
560 if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
564 mlx5_vdpa_virtqs_release(priv);
569 mlx5_vdpa_virtq_is_modified(struct mlx5_vdpa_priv *priv,
570 struct mlx5_vdpa_virtq *virtq)
572 struct rte_vhost_vring vq;
573 int ret = rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq);
577 if (vq.size != virtq->vq_size || vq.kickfd !=
578 rte_intr_fd_get(virtq->intr_handle))
580 if (virtq->eqp.cq.cq_obj.cq) {
581 if (vq.callfd != virtq->eqp.cq.callfd)
583 } else if (vq.callfd != -1) {
590 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
592 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
595 DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
596 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
597 if (priv->state == MLX5_VDPA_STATE_PROBED) {
598 virtq->enable = !!enable;
601 if (virtq->enable == !!enable) {
604 ret = mlx5_vdpa_virtq_is_modified(priv, virtq);
606 DRV_LOG(ERR, "Virtq %d modify check failed.", index);
611 DRV_LOG(INFO, "Virtq %d was modified, recreate it.", index);
613 if (virtq->configured) {
615 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
616 ret = mlx5_vdpa_steer_update(priv);
618 DRV_LOG(WARNING, "Failed to disable steering "
619 "for virtq %d.", index);
621 mlx5_vdpa_virtq_unset(virtq);
624 ret = mlx5_vdpa_virtq_setup(priv, index);
626 DRV_LOG(ERR, "Failed to setup virtq %d.", index);
630 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
631 ret = mlx5_vdpa_steer_update(priv);
633 DRV_LOG(WARNING, "Failed to enable steering "
634 "for virtq %d.", index);
641 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
642 struct rte_vdpa_stat *stats, unsigned int n)
644 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
645 struct mlx5_devx_virtio_q_couners_attr *attr = &virtq->stats;
648 if (!virtq->counters) {
649 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
653 ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, attr);
655 DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
658 ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
659 if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
661 stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
662 .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
663 .value = attr->received_desc - virtq->reset.received_desc,
665 if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
667 stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
668 .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
669 .value = attr->completed_desc - virtq->reset.completed_desc,
671 if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
673 stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
674 .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
675 .value = attr->bad_desc_errors - virtq->reset.bad_desc_errors,
677 if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
679 stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
680 .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
681 .value = attr->exceed_max_chain - virtq->reset.exceed_max_chain,
683 if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
685 stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
686 .id = MLX5_VDPA_STATS_INVALID_BUFFER,
687 .value = attr->invalid_buffer - virtq->reset.invalid_buffer,
689 if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
691 stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
692 .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
693 .value = attr->error_cqes - virtq->reset.error_cqes,
699 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
701 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
704 if (virtq->counters == NULL) /* VQ not enabled. */
706 ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
709 DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",