1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
6 #include <sys/eventfd.h>
8 #include <rte_malloc.h>
12 #include <mlx5_common.h>
14 #include "mlx5_vdpa_utils.h"
15 #include "mlx5_vdpa.h"
19 mlx5_vdpa_virtq_kick_handler(void *cb_arg)
21 struct mlx5_vdpa_virtq *virtq = cb_arg;
22 struct mlx5_vdpa_priv *priv = virtq->priv;
27 pthread_mutex_lock(&virtq->virtq_lock);
28 if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
29 pthread_mutex_unlock(&virtq->virtq_lock);
30 DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
31 priv->vid, virtq->index);
34 if (rte_intr_fd_get(virtq->intr_handle) < 0) {
35 pthread_mutex_unlock(&virtq->virtq_lock);
38 for (retry = 0; retry < 3; ++retry) {
39 nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
43 errno == EWOULDBLOCK ||
46 DRV_LOG(ERR, "Failed to read kickfd of virtq %d: %s",
47 virtq->index, strerror(errno));
52 pthread_mutex_unlock(&virtq->virtq_lock);
55 rte_spinlock_lock(&priv->db_lock);
56 rte_write32(virtq->index, priv->virtq_db_addr);
57 rte_spinlock_unlock(&priv->db_lock);
58 pthread_mutex_unlock(&virtq->virtq_lock);
59 if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
60 DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
61 priv->vid, virtq->index);
64 if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
65 if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
66 virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_ERR;
68 virtq->notifier_state =
69 MLX5_VDPA_NOTIFIER_STATE_ENABLED;
70 DRV_LOG(INFO, "Virtq %u notifier state is %s.", virtq->index,
71 virtq->notifier_state ==
72 MLX5_VDPA_NOTIFIER_STATE_ENABLED ? "enabled" :
75 DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
78 /* Virtq must be locked before calling this function. */
80 mlx5_vdpa_virtq_unregister_intr_handle(struct mlx5_vdpa_virtq *virtq)
84 if (!virtq->intr_handle)
86 if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
87 while (ret == -EAGAIN) {
88 ret = rte_intr_callback_unregister(virtq->intr_handle,
89 mlx5_vdpa_virtq_kick_handler, virtq);
91 DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
92 rte_intr_fd_get(virtq->intr_handle),
94 pthread_mutex_unlock(&virtq->virtq_lock);
95 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
96 pthread_mutex_lock(&virtq->virtq_lock);
99 (void)rte_intr_fd_set(virtq->intr_handle, -1);
101 rte_intr_instance_free(virtq->intr_handle);
102 virtq->intr_handle = NULL;
105 /* Release cached VQ resources. */
107 mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
111 for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
112 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
114 pthread_mutex_lock(&virtq->virtq_lock);
115 virtq->configured = 0;
116 for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
117 if (virtq->umems[j].obj) {
118 claim_zero(mlx5_glue->devx_umem_dereg
119 (virtq->umems[j].obj));
120 virtq->umems[j].obj = NULL;
122 if (virtq->umems[j].buf) {
123 rte_free(virtq->umems[j].buf);
124 virtq->umems[j].buf = NULL;
126 virtq->umems[j].size = 0;
128 if (virtq->eqp.fw_qp)
129 mlx5_vdpa_event_qp_destroy(&virtq->eqp);
130 pthread_mutex_unlock(&virtq->virtq_lock);
136 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
140 mlx5_vdpa_virtq_unregister_intr_handle(virtq);
141 if (virtq->configured) {
142 ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
144 DRV_LOG(WARNING, "Failed to stop virtq %d.",
146 virtq->configured = 0;
147 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
150 virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
155 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
157 struct mlx5_vdpa_virtq *virtq;
160 for (i = 0; i < priv->nr_virtqs; i++) {
161 virtq = &priv->virtqs[i];
162 pthread_mutex_lock(&virtq->virtq_lock);
163 mlx5_vdpa_virtq_unset(virtq);
164 pthread_mutex_unlock(&virtq->virtq_lock);
171 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
173 struct mlx5_devx_virtq_attr attr = {
174 .mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_STATE,
175 .state = state ? MLX5_VIRTQ_STATE_RDY :
176 MLX5_VIRTQ_STATE_SUSPEND,
177 .queue_index = virtq->index,
180 return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
184 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
186 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
189 if (virtq->stopped || !virtq->configured)
191 ret = mlx5_vdpa_virtq_modify(virtq, 0);
194 virtq->stopped = true;
195 DRV_LOG(DEBUG, "vid %u virtq %u was stopped.", priv->vid, index);
196 return mlx5_vdpa_virtq_query(priv, index);
200 mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)
202 struct mlx5_devx_virtq_attr attr = {0};
203 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
206 if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
207 DRV_LOG(ERR, "Failed to query virtq %d.", index);
210 DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
211 "hw_used_index=%d", priv->vid, index,
212 attr.hw_available_index, attr.hw_used_index);
213 ret = rte_vhost_set_vring_base(priv->vid, index,
214 attr.hw_available_index,
217 DRV_LOG(ERR, "Failed to set virtq %d base.", index);
220 if (attr.state == MLX5_VIRTQ_STATE_ERROR)
221 DRV_LOG(WARNING, "vid %d vring %d hw error=%hhu",
222 priv->vid, index, attr.error_type);
227 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
229 struct rte_vhost_mem_region *reg;
233 for (i = 0; i < mem->nregions; i++) {
234 reg = &mem->regions[i];
235 if (hva >= reg->host_user_addr &&
236 hva < reg->host_user_addr + reg->size) {
237 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
245 mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
246 struct mlx5_devx_virtq_attr *attr,
247 struct rte_vhost_vring *vq, int index)
249 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
253 uint16_t last_avail_idx = 0;
254 uint16_t last_used_idx = 0;
257 attr->mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_STATE |
258 MLX5_VIRTQ_MODIFY_TYPE_ADDR |
259 MLX5_VIRTQ_MODIFY_TYPE_HW_AVAILABLE_INDEX |
260 MLX5_VIRTQ_MODIFY_TYPE_HW_USED_INDEX |
261 MLX5_VIRTQ_MODIFY_TYPE_VERSION_1_0 |
262 MLX5_VIRTQ_MODIFY_TYPE_Q_TYPE |
263 MLX5_VIRTQ_MODIFY_TYPE_Q_MKEY |
264 MLX5_VIRTQ_MODIFY_TYPE_QUEUE_FEATURE_BIT_MASK |
265 MLX5_VIRTQ_MODIFY_TYPE_EVENT_MODE;
266 attr->tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
267 attr->tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
268 attr->tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
269 attr->rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
270 attr->virtio_version_1_0 =
271 !!(priv->features & (1ULL << VIRTIO_F_VERSION_1));
273 (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
274 MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
276 * No need event QPs creation when the guest in poll mode or when the
277 * capability allows it.
279 attr->event_mode = vq->callfd != -1 ||
280 !(priv->caps.event_mode & (1 << MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
281 MLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
282 if (attr->event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
283 ret = mlx5_vdpa_event_qp_prepare(priv,
284 vq->size, vq->callfd, virtq);
287 "Failed to create event QPs for virtq %d.",
291 attr->mod_fields_bitmap |= MLX5_VIRTQ_MODIFY_TYPE_EVENT_MODE;
292 attr->qp_id = virtq->eqp.fw_qp->id;
294 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
295 " need event QPs and event mechanism.", index);
297 if (priv->caps.queue_counters_valid) {
298 if (!virtq->counters)
299 virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
301 if (!virtq->counters) {
302 DRV_LOG(ERR, "Failed to create virtq couners for virtq"
306 attr->counters_obj_id = virtq->counters->id;
308 /* Setup 3 UMEMs for each virtq. */
310 for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
313 struct mlx5dv_devx_umem *obj;
316 priv->caps.umems[i].a * vq->size + priv->caps.umems[i].b;
317 if (virtq->umems[i].size == size &&
318 virtq->umems[i].obj != NULL) {
319 /* Reuse registered memory. */
320 memset(virtq->umems[i].buf, 0, size);
323 if (virtq->umems[i].obj)
324 claim_zero(mlx5_glue->devx_umem_dereg
325 (virtq->umems[i].obj));
326 if (virtq->umems[i].buf)
327 rte_free(virtq->umems[i].buf);
328 virtq->umems[i].size = 0;
329 virtq->umems[i].obj = NULL;
330 virtq->umems[i].buf = NULL;
331 buf = rte_zmalloc(__func__,
334 DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
338 obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx,
339 buf, size, IBV_ACCESS_LOCAL_WRITE);
341 DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
346 virtq->umems[i].size = size;
347 virtq->umems[i].buf = buf;
348 virtq->umems[i].obj = obj;
350 attr->umems[i].id = virtq->umems[i].obj->umem_id;
351 attr->umems[i].offset = 0;
352 attr->umems[i].size = virtq->umems[i].size;
355 if (attr->q_type == MLX5_VIRTQ_TYPE_SPLIT) {
356 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
357 (uint64_t)(uintptr_t)vq->desc);
359 DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
362 attr->desc_addr = gpa;
363 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
364 (uint64_t)(uintptr_t)vq->used);
366 DRV_LOG(ERR, "Failed to get GPA for used ring.");
369 attr->used_addr = gpa;
370 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
371 (uint64_t)(uintptr_t)vq->avail);
373 DRV_LOG(ERR, "Failed to get GPA for available ring.");
376 attr->available_addr = gpa;
378 ret = rte_vhost_get_vring_base(priv->vid,
379 index, &last_avail_idx, &last_used_idx);
383 DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
385 DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
386 "virtq %d.", priv->vid, last_avail_idx,
387 last_used_idx, index);
389 attr->hw_available_index = last_avail_idx;
390 attr->hw_used_index = last_used_idx;
391 attr->q_size = vq->size;
392 attr->mkey = priv->gpa_mkey_index;
393 attr->tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
394 attr->queue_index = index;
395 attr->pd = priv->cdev->pdn;
396 attr->hw_latency_mode = priv->hw_latency_mode;
397 attr->hw_max_latency_us = priv->hw_max_latency_us;
398 attr->hw_max_pending_comp = priv->hw_max_pending_comp;
399 if (attr->hw_latency_mode || attr->hw_max_latency_us ||
400 attr->hw_max_pending_comp)
401 attr->mod_fields_bitmap |= MLX5_VIRTQ_MODIFY_TYPE_QUEUE_PERIOD;
406 mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv)
408 return (priv->caps.vnet_modify_ext &&
409 priv->caps.virtio_net_q_addr_modify &&
410 priv->caps.virtio_q_index_modify) ? true : false;
414 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
416 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
417 struct rte_vhost_vring vq;
418 struct mlx5_devx_virtq_attr attr = {0};
420 uint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;
423 ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
430 ret = mlx5_vdpa_virtq_sub_objs_prepare(priv, &attr,
433 DRV_LOG(ERR, "Failed to setup update virtq attr %d.",
438 virtq->index = index;
439 virtq->vq_size = vq.size;
440 virtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx,
444 attr.mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_STATE;
446 attr.state = MLX5_VIRTQ_STATE_RDY;
447 ret = mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
449 DRV_LOG(ERR, "Failed to modify virtq %d.", index);
452 claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
453 virtq->configured = 1;
454 rte_spinlock_lock(&priv->db_lock);
455 rte_write32(virtq->index, priv->virtq_db_addr);
456 rte_spinlock_unlock(&priv->db_lock);
457 /* Setup doorbell mapping. */
459 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
460 if (virtq->intr_handle == NULL) {
461 DRV_LOG(ERR, "Fail to allocate intr_handle");
465 if (rte_intr_fd_set(virtq->intr_handle, vq.kickfd))
468 if (rte_intr_fd_get(virtq->intr_handle) == -1) {
469 DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
471 if (rte_intr_type_set(virtq->intr_handle, RTE_INTR_HANDLE_EXT))
474 if (rte_intr_callback_register(virtq->intr_handle,
475 mlx5_vdpa_virtq_kick_handler,
477 (void)rte_intr_fd_set(virtq->intr_handle, -1);
478 DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
482 DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
483 rte_intr_fd_get(virtq->intr_handle),
487 /* Subscribe virtq error event. */
489 cookie = ((uint64_t)virtq->version << 32) + index;
490 ret = mlx5_glue->devx_subscribe_devx_event(priv->err_chnl,
495 DRV_LOG(ERR, "Failed to subscribe device %d virtq %d error event.",
500 virtq->stopped = false;
501 /* Initial notification to ask Qemu handling completed buffers. */
502 if (virtq->eqp.cq.callfd != -1)
503 eventfd_write(virtq->eqp.cq.callfd, (eventfd_t)1);
504 DRV_LOG(DEBUG, "vid %u virtq %u was created successfully.", priv->vid,
508 mlx5_vdpa_virtq_unset(virtq);
513 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
515 if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
516 if (!(priv->caps.virtio_queue_type & (1 <<
517 MLX5_VIRTQ_TYPE_PACKED))) {
518 DRV_LOG(ERR, "Failed to configure PACKED mode for vdev "
519 "%d - it was not reported by HW/driver"
520 " capability.", priv->vid);
524 if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
525 if (!priv->caps.tso_ipv4) {
526 DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
527 " was not reported by HW/driver capability.",
532 if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
533 if (!priv->caps.tso_ipv6) {
534 DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
535 " was not reported by HW/driver capability.",
540 if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
541 if (!priv->caps.tx_csum) {
542 DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
543 " was not reported by HW/driver capability.",
548 if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
549 if (!priv->caps.rx_csum) {
550 DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
551 " GUEST CSUM was not reported by HW/driver "
552 "capability.", priv->vid);
556 if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
557 if (!priv->caps.virtio_version_1_0) {
558 DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
559 "version 1 was not reported by HW/driver"
560 " capability.", priv->vid);
568 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
571 uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
572 int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
573 struct mlx5_vdpa_virtq *virtq;
575 if (ret || mlx5_vdpa_features_validate(priv)) {
576 DRV_LOG(ERR, "Failed to configure negotiated features.");
579 if ((priv->features & (1ULL << VIRTIO_NET_F_CSUM)) == 0 &&
580 ((priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) > 0 ||
581 (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) > 0)) {
582 /* Packet may be corrupted if TSO is enabled without CSUM. */
583 DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
584 priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
586 if (nr_vring > priv->caps.max_num_virtio_queues) {
587 DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
588 (int)priv->caps.max_num_virtio_queues,
592 priv->nr_virtqs = nr_vring;
593 for (i = 0; i < nr_vring; i++) {
594 virtq = &priv->virtqs[i];
596 pthread_mutex_lock(&virtq->virtq_lock);
597 if (mlx5_vdpa_virtq_setup(priv, i)) {
598 pthread_mutex_unlock(&virtq->virtq_lock);
601 pthread_mutex_unlock(&virtq->virtq_lock);
606 mlx5_vdpa_virtqs_release(priv);
611 mlx5_vdpa_virtq_is_modified(struct mlx5_vdpa_priv *priv,
612 struct mlx5_vdpa_virtq *virtq)
614 struct rte_vhost_vring vq;
615 int ret = rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq);
619 if (vq.size != virtq->vq_size || vq.kickfd !=
620 rte_intr_fd_get(virtq->intr_handle))
622 if (virtq->eqp.cq.cq_obj.cq) {
623 if (vq.callfd != virtq->eqp.cq.callfd)
625 } else if (vq.callfd != -1) {
632 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
634 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
637 DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
638 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
639 if (priv->state == MLX5_VDPA_STATE_PROBED) {
640 virtq->enable = !!enable;
643 if (virtq->enable == !!enable) {
646 ret = mlx5_vdpa_virtq_is_modified(priv, virtq);
648 DRV_LOG(ERR, "Virtq %d modify check failed.", index);
653 DRV_LOG(INFO, "Virtq %d was modified, recreate it.", index);
655 if (virtq->configured) {
657 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
658 ret = mlx5_vdpa_steer_update(priv);
660 DRV_LOG(WARNING, "Failed to disable steering "
661 "for virtq %d.", index);
663 mlx5_vdpa_virtq_unset(virtq);
666 ret = mlx5_vdpa_virtq_setup(priv, index);
668 DRV_LOG(ERR, "Failed to setup virtq %d.", index);
672 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
673 ret = mlx5_vdpa_steer_update(priv);
675 DRV_LOG(WARNING, "Failed to enable steering "
676 "for virtq %d.", index);
683 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
684 struct rte_vdpa_stat *stats, unsigned int n)
686 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
687 struct mlx5_devx_virtio_q_couners_attr *attr = &virtq->stats;
690 if (!virtq->counters) {
691 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
695 ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, attr);
697 DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
700 ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
701 if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
703 stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
704 .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
705 .value = attr->received_desc - virtq->reset.received_desc,
707 if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
709 stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
710 .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
711 .value = attr->completed_desc - virtq->reset.completed_desc,
713 if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
715 stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
716 .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
717 .value = attr->bad_desc_errors - virtq->reset.bad_desc_errors,
719 if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
721 stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
722 .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
723 .value = attr->exceed_max_chain - virtq->reset.exceed_max_chain,
725 if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
727 stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
728 .id = MLX5_VDPA_STATS_INVALID_BUFFER,
729 .value = attr->invalid_buffer - virtq->reset.invalid_buffer,
731 if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
733 stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
734 .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
735 .value = attr->error_cqes - virtq->reset.error_cqes,
741 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
743 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
746 if (virtq->counters == NULL) /* VQ not enabled. */
748 ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
751 DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",