5ab63930ce826cd28ca139e2e4eb1942048c178f
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_virtq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <string.h>
5 #include <unistd.h>
6 #include <sys/eventfd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_errno.h>
10 #include <rte_io.h>
11
12 #include <mlx5_common.h>
13
14 #include "mlx5_vdpa_utils.h"
15 #include "mlx5_vdpa.h"
16
17
18 static void
19 mlx5_vdpa_virtq_kick_handler(void *cb_arg)
20 {
21         struct mlx5_vdpa_virtq *virtq = cb_arg;
22         struct mlx5_vdpa_priv *priv = virtq->priv;
23         uint64_t buf;
24         int nbytes;
25         int retry;
26
27         if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
28                 DRV_LOG(ERR,  "device %d queue %d down, skip kick handling",
29                         priv->vid, virtq->index);
30                 return;
31         }
32         if (rte_intr_fd_get(virtq->intr_handle) < 0)
33                 return;
34         for (retry = 0; retry < 3; ++retry) {
35                 nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
36                               8);
37                 if (nbytes < 0) {
38                         if (errno == EINTR ||
39                             errno == EWOULDBLOCK ||
40                             errno == EAGAIN)
41                                 continue;
42                         DRV_LOG(ERR,  "Failed to read kickfd of virtq %d: %s",
43                                 virtq->index, strerror(errno));
44                 }
45                 break;
46         }
47         if (nbytes < 0)
48                 return;
49         rte_write32(virtq->index, priv->virtq_db_addr);
50         if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
51                 DRV_LOG(ERR,  "device %d queue %d down, skip kick handling",
52                         priv->vid, virtq->index);
53                 return;
54         }
55         if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
56                 if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
57                         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_ERR;
58                 else
59                         virtq->notifier_state =
60                                                MLX5_VDPA_NOTIFIER_STATE_ENABLED;
61                 DRV_LOG(INFO, "Virtq %u notifier state is %s.", virtq->index,
62                         virtq->notifier_state ==
63                                 MLX5_VDPA_NOTIFIER_STATE_ENABLED ? "enabled" :
64                                                                     "disabled");
65         }
66         DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
67 }
68
69 static int
70 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
71 {
72         unsigned int i;
73         int ret = -EAGAIN;
74
75         if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
76                 while (ret == -EAGAIN) {
77                         ret = rte_intr_callback_unregister(virtq->intr_handle,
78                                         mlx5_vdpa_virtq_kick_handler, virtq);
79                         if (ret == -EAGAIN) {
80                                 DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
81                                         rte_intr_fd_get(virtq->intr_handle),
82                                         virtq->index);
83                                 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
84                         }
85                 }
86                 rte_intr_fd_set(virtq->intr_handle, -1);
87         }
88         rte_intr_instance_free(virtq->intr_handle);
89         if (virtq->virtq) {
90                 ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
91                 if (ret)
92                         DRV_LOG(WARNING, "Failed to stop virtq %d.",
93                                 virtq->index);
94                 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
95         }
96         virtq->virtq = NULL;
97         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
98                 if (virtq->umems[i].obj)
99                         claim_zero(mlx5_glue->devx_umem_dereg
100                                                          (virtq->umems[i].obj));
101                 rte_free(virtq->umems[i].buf);
102         }
103         memset(&virtq->umems, 0, sizeof(virtq->umems));
104         if (virtq->eqp.fw_qp)
105                 mlx5_vdpa_event_qp_destroy(&virtq->eqp);
106         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
107         return 0;
108 }
109
110 void
111 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
112 {
113         int i;
114         struct mlx5_vdpa_virtq *virtq;
115
116         for (i = 0; i < priv->nr_virtqs; i++) {
117                 virtq = &priv->virtqs[i];
118                 mlx5_vdpa_virtq_unset(virtq);
119                 if (virtq->counters)
120                         claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
121         }
122         priv->features = 0;
123         memset(priv->virtqs, 0, sizeof(*virtq) * priv->nr_virtqs);
124         priv->nr_virtqs = 0;
125 }
126
127 int
128 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
129 {
130         struct mlx5_devx_virtq_attr attr = {
131                         .type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
132                         .state = state ? MLX5_VIRTQ_STATE_RDY :
133                                          MLX5_VIRTQ_STATE_SUSPEND,
134                         .queue_index = virtq->index,
135         };
136
137         return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
138 }
139
140 int
141 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
142 {
143         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
144         int ret;
145
146         if (virtq->stopped)
147                 return 0;
148         ret = mlx5_vdpa_virtq_modify(virtq, 0);
149         if (ret)
150                 return -1;
151         virtq->stopped = true;
152         DRV_LOG(DEBUG, "vid %u virtq %u was stopped.", priv->vid, index);
153         return mlx5_vdpa_virtq_query(priv, index);
154 }
155
156 int
157 mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)
158 {
159         struct mlx5_devx_virtq_attr attr = {0};
160         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
161         int ret;
162
163         if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
164                 DRV_LOG(ERR, "Failed to query virtq %d.", index);
165                 return -1;
166         }
167         DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
168                 "hw_used_index=%d", priv->vid, index,
169                 attr.hw_available_index, attr.hw_used_index);
170         ret = rte_vhost_set_vring_base(priv->vid, index,
171                                        attr.hw_available_index,
172                                        attr.hw_used_index);
173         if (ret) {
174                 DRV_LOG(ERR, "Failed to set virtq %d base.", index);
175                 return -1;
176         }
177         if (attr.state == MLX5_VIRTQ_STATE_ERROR)
178                 DRV_LOG(WARNING, "vid %d vring %d hw error=%hhu",
179                         priv->vid, index, attr.error_type);
180         return 0;
181 }
182
183 static uint64_t
184 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
185 {
186         struct rte_vhost_mem_region *reg;
187         uint32_t i;
188         uint64_t gpa = 0;
189
190         for (i = 0; i < mem->nregions; i++) {
191                 reg = &mem->regions[i];
192                 if (hva >= reg->host_user_addr &&
193                     hva < reg->host_user_addr + reg->size) {
194                         gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
195                         break;
196                 }
197         }
198         return gpa;
199 }
200
201 static int
202 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
203 {
204         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
205         struct rte_vhost_vring vq;
206         struct mlx5_devx_virtq_attr attr = {0};
207         uint64_t gpa;
208         int ret;
209         unsigned int i;
210         uint16_t last_avail_idx;
211         uint16_t last_used_idx;
212         uint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;
213         uint64_t cookie;
214
215         ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
216         if (ret)
217                 return -1;
218         virtq->index = index;
219         virtq->vq_size = vq.size;
220         attr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
221         attr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
222         attr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
223         attr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
224         attr.virtio_version_1_0 = !!(priv->features & (1ULL <<
225                                                         VIRTIO_F_VERSION_1));
226         attr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
227                         MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
228         /*
229          * No need event QPs creation when the guest in poll mode or when the
230          * capability allows it.
231          */
232         attr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<
233                                                MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
234                                                       MLX5_VIRTQ_EVENT_MODE_QP :
235                                                   MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
236         if (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
237                 ret = mlx5_vdpa_event_qp_create(priv, vq.size, vq.callfd,
238                                                 &virtq->eqp);
239                 if (ret) {
240                         DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
241                                 index);
242                         return -1;
243                 }
244                 attr.qp_id = virtq->eqp.fw_qp->id;
245         } else {
246                 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
247                         " need event QPs and event mechanism.", index);
248         }
249         if (priv->caps.queue_counters_valid) {
250                 if (!virtq->counters)
251                         virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
252                                                               (priv->cdev->ctx);
253                 if (!virtq->counters) {
254                         DRV_LOG(ERR, "Failed to create virtq couners for virtq"
255                                 " %d.", index);
256                         goto error;
257                 }
258                 attr.counters_obj_id = virtq->counters->id;
259         }
260         /* Setup 3 UMEMs for each virtq. */
261         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
262                 virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
263                                                           priv->caps.umems[i].b;
264                 virtq->umems[i].buf = rte_zmalloc(__func__,
265                                                   virtq->umems[i].size, 4096);
266                 if (!virtq->umems[i].buf) {
267                         DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
268                                 " %u.", i, index);
269                         goto error;
270                 }
271                 virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx,
272                                                         virtq->umems[i].buf,
273                                                         virtq->umems[i].size,
274                                                         IBV_ACCESS_LOCAL_WRITE);
275                 if (!virtq->umems[i].obj) {
276                         DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
277                                 i, index);
278                         goto error;
279                 }
280                 attr.umems[i].id = virtq->umems[i].obj->umem_id;
281                 attr.umems[i].offset = 0;
282                 attr.umems[i].size = virtq->umems[i].size;
283         }
284         if (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {
285                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
286                                            (uint64_t)(uintptr_t)vq.desc);
287                 if (!gpa) {
288                         DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
289                         goto error;
290                 }
291                 attr.desc_addr = gpa;
292                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
293                                            (uint64_t)(uintptr_t)vq.used);
294                 if (!gpa) {
295                         DRV_LOG(ERR, "Failed to get GPA for used ring.");
296                         goto error;
297                 }
298                 attr.used_addr = gpa;
299                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
300                                            (uint64_t)(uintptr_t)vq.avail);
301                 if (!gpa) {
302                         DRV_LOG(ERR, "Failed to get GPA for available ring.");
303                         goto error;
304                 }
305                 attr.available_addr = gpa;
306         }
307         ret = rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,
308                                  &last_used_idx);
309         if (ret) {
310                 last_avail_idx = 0;
311                 last_used_idx = 0;
312                 DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
313         } else {
314                 DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
315                                 "virtq %d.", priv->vid, last_avail_idx,
316                                 last_used_idx, index);
317         }
318         attr.hw_available_index = last_avail_idx;
319         attr.hw_used_index = last_used_idx;
320         attr.q_size = vq.size;
321         attr.mkey = priv->gpa_mkey_index;
322         attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
323         attr.queue_index = index;
324         attr.pd = priv->cdev->pdn;
325         attr.hw_latency_mode = priv->hw_latency_mode;
326         attr.hw_max_latency_us = priv->hw_max_latency_us;
327         attr.hw_max_pending_comp = priv->hw_max_pending_comp;
328         virtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);
329         virtq->priv = priv;
330         if (!virtq->virtq)
331                 goto error;
332         claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
333         if (mlx5_vdpa_virtq_modify(virtq, 1))
334                 goto error;
335         virtq->priv = priv;
336         rte_write32(virtq->index, priv->virtq_db_addr);
337         /* Setup doorbell mapping. */
338         virtq->intr_handle =
339                 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
340         if (virtq->intr_handle == NULL) {
341                 DRV_LOG(ERR, "Fail to allocate intr_handle");
342                 goto error;
343         }
344
345         if (rte_intr_fd_set(virtq->intr_handle, vq.kickfd))
346                 goto error;
347
348         if (rte_intr_fd_get(virtq->intr_handle) == -1) {
349                 DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
350         } else {
351                 if (rte_intr_type_set(virtq->intr_handle, RTE_INTR_HANDLE_EXT))
352                         goto error;
353
354                 if (rte_intr_callback_register(virtq->intr_handle,
355                                                mlx5_vdpa_virtq_kick_handler,
356                                                virtq)) {
357                         rte_intr_fd_set(virtq->intr_handle, -1);
358                         DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
359                                 index);
360                         goto error;
361                 } else {
362                         DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
363                                 rte_intr_fd_get(virtq->intr_handle),
364                                 index);
365                 }
366         }
367         /* Subscribe virtq error event. */
368         virtq->version++;
369         cookie = ((uint64_t)virtq->version << 32) + index;
370         ret = mlx5_glue->devx_subscribe_devx_event(priv->err_chnl,
371                                                    virtq->virtq->obj,
372                                                    sizeof(event_num),
373                                                    &event_num, cookie);
374         if (ret) {
375                 DRV_LOG(ERR, "Failed to subscribe device %d virtq %d error event.",
376                         priv->vid, index);
377                 rte_errno = errno;
378                 goto error;
379         }
380         virtq->stopped = false;
381         /* Initial notification to ask Qemu handling completed buffers. */
382         if (virtq->eqp.cq.callfd != -1)
383                 eventfd_write(virtq->eqp.cq.callfd, (eventfd_t)1);
384         DRV_LOG(DEBUG, "vid %u virtq %u was created successfully.", priv->vid,
385                 index);
386         return 0;
387 error:
388         mlx5_vdpa_virtq_unset(virtq);
389         return -1;
390 }
391
392 static int
393 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
394 {
395         if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
396                 if (!(priv->caps.virtio_queue_type & (1 <<
397                                                      MLX5_VIRTQ_TYPE_PACKED))) {
398                         DRV_LOG(ERR, "Failed to configure PACKED mode for vdev "
399                                 "%d - it was not reported by HW/driver"
400                                 " capability.", priv->vid);
401                         return -ENOTSUP;
402                 }
403         }
404         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
405                 if (!priv->caps.tso_ipv4) {
406                         DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
407                                 " was not reported by HW/driver capability.",
408                                 priv->vid);
409                         return -ENOTSUP;
410                 }
411         }
412         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
413                 if (!priv->caps.tso_ipv6) {
414                         DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
415                                 " was not reported by HW/driver capability.",
416                                 priv->vid);
417                         return -ENOTSUP;
418                 }
419         }
420         if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
421                 if (!priv->caps.tx_csum) {
422                         DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
423                                 " was not reported by HW/driver capability.",
424                                 priv->vid);
425                         return -ENOTSUP;
426                 }
427         }
428         if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
429                 if (!priv->caps.rx_csum) {
430                         DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
431                                 " GUEST CSUM was not reported by HW/driver "
432                                 "capability.", priv->vid);
433                         return -ENOTSUP;
434                 }
435         }
436         if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
437                 if (!priv->caps.virtio_version_1_0) {
438                         DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
439                                 "version 1 was not reported by HW/driver"
440                                 " capability.", priv->vid);
441                         return -ENOTSUP;
442                 }
443         }
444         return 0;
445 }
446
447 int
448 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
449 {
450         uint32_t i;
451         uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
452         int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
453
454         if (ret || mlx5_vdpa_features_validate(priv)) {
455                 DRV_LOG(ERR, "Failed to configure negotiated features.");
456                 return -1;
457         }
458         if ((priv->features & (1ULL << VIRTIO_NET_F_CSUM)) == 0 &&
459             ((priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) > 0 ||
460              (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) > 0)) {
461                 /* Packet may be corrupted if TSO is enabled without CSUM. */
462                 DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
463                 priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
464         }
465         if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
466                 DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
467                         (int)priv->caps.max_num_virtio_queues * 2,
468                         (int)nr_vring);
469                 return -1;
470         }
471         priv->nr_virtqs = nr_vring;
472         for (i = 0; i < nr_vring; i++)
473                 if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
474                         goto error;
475         return 0;
476 error:
477         mlx5_vdpa_virtqs_release(priv);
478         return -1;
479 }
480
481 static int
482 mlx5_vdpa_virtq_is_modified(struct mlx5_vdpa_priv *priv,
483                             struct mlx5_vdpa_virtq *virtq)
484 {
485         struct rte_vhost_vring vq;
486         int ret = rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq);
487
488         if (ret)
489                 return -1;
490         if (vq.size != virtq->vq_size || vq.kickfd !=
491             rte_intr_fd_get(virtq->intr_handle))
492                 return 1;
493         if (virtq->eqp.cq.cq_obj.cq) {
494                 if (vq.callfd != virtq->eqp.cq.callfd)
495                         return 1;
496         } else if (vq.callfd != -1) {
497                 return 1;
498         }
499         return 0;
500 }
501
502 int
503 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
504 {
505         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
506         int ret;
507
508         DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
509                 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
510         if (priv->state == MLX5_VDPA_STATE_PROBED) {
511                 virtq->enable = !!enable;
512                 return 0;
513         }
514         if (virtq->enable == !!enable) {
515                 if (!enable)
516                         return 0;
517                 ret = mlx5_vdpa_virtq_is_modified(priv, virtq);
518                 if (ret < 0) {
519                         DRV_LOG(ERR, "Virtq %d modify check failed.", index);
520                         return -1;
521                 }
522                 if (ret == 0)
523                         return 0;
524                 DRV_LOG(INFO, "Virtq %d was modified, recreate it.", index);
525         }
526         if (virtq->virtq) {
527                 virtq->enable = 0;
528                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
529                         ret = mlx5_vdpa_steer_update(priv);
530                         if (ret)
531                                 DRV_LOG(WARNING, "Failed to disable steering "
532                                         "for virtq %d.", index);
533                 }
534                 mlx5_vdpa_virtq_unset(virtq);
535         }
536         if (enable) {
537                 ret = mlx5_vdpa_virtq_setup(priv, index);
538                 if (ret) {
539                         DRV_LOG(ERR, "Failed to setup virtq %d.", index);
540                         return ret;
541                 }
542                 virtq->enable = 1;
543                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
544                         ret = mlx5_vdpa_steer_update(priv);
545                         if (ret)
546                                 DRV_LOG(WARNING, "Failed to enable steering "
547                                         "for virtq %d.", index);
548                 }
549         }
550         return 0;
551 }
552
553 int
554 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
555                           struct rte_vdpa_stat *stats, unsigned int n)
556 {
557         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
558         struct mlx5_devx_virtio_q_couners_attr attr = {0};
559         int ret;
560
561         if (!virtq->counters) {
562                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
563                         "is invalid.", qid);
564                 return -EINVAL;
565         }
566         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
567         if (ret) {
568                 DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
569                 return ret;
570         }
571         ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
572         if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
573                 return ret;
574         stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
575                 .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
576                 .value = attr.received_desc - virtq->reset.received_desc,
577         };
578         if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
579                 return ret;
580         stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
581                 .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
582                 .value = attr.completed_desc - virtq->reset.completed_desc,
583         };
584         if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
585                 return ret;
586         stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
587                 .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
588                 .value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
589         };
590         if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
591                 return ret;
592         stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
593                 .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
594                 .value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
595         };
596         if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
597                 return ret;
598         stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
599                 .id = MLX5_VDPA_STATS_INVALID_BUFFER,
600                 .value = attr.invalid_buffer - virtq->reset.invalid_buffer,
601         };
602         if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
603                 return ret;
604         stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
605                 .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
606                 .value = attr.error_cqes - virtq->reset.error_cqes,
607         };
608         return ret;
609 }
610
611 int
612 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
613 {
614         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
615         int ret;
616
617         if (!virtq->counters) {
618                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
619                         "is invalid.", qid);
620                 return -EINVAL;
621         }
622         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
623                                                     &virtq->reset);
624         if (ret)
625                 DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",
626                         qid);
627         return ret;
628 }