2e517beda2437f1576a6b079a14c93a39a9feabe
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_virtq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <string.h>
5 #include <unistd.h>
6 #include <sys/mman.h>
7 #include <sys/eventfd.h>
8
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_io.h>
12
13 #include <mlx5_common.h>
14
15 #include "mlx5_vdpa_utils.h"
16 #include "mlx5_vdpa.h"
17
18
19 static void
20 mlx5_vdpa_virtq_kick_handler(void *cb_arg)
21 {
22         struct mlx5_vdpa_virtq *virtq = cb_arg;
23         struct mlx5_vdpa_priv *priv = virtq->priv;
24         uint64_t buf;
25         int nbytes;
26
27         if (rte_intr_fd_get(virtq->intr_handle) < 0)
28                 return;
29
30         do {
31                 nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
32                               8);
33                 if (nbytes < 0) {
34                         if (errno == EINTR ||
35                             errno == EWOULDBLOCK ||
36                             errno == EAGAIN)
37                                 continue;
38                         DRV_LOG(ERR,  "Failed to read kickfd of virtq %d: %s",
39                                 virtq->index, strerror(errno));
40                 }
41                 break;
42         } while (1);
43         rte_write32(virtq->index, priv->virtq_db_addr);
44         if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
45                 if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
46                         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_ERR;
47                 else
48                         virtq->notifier_state =
49                                                MLX5_VDPA_NOTIFIER_STATE_ENABLED;
50                 DRV_LOG(INFO, "Virtq %u notifier state is %s.", virtq->index,
51                         virtq->notifier_state ==
52                                 MLX5_VDPA_NOTIFIER_STATE_ENABLED ? "enabled" :
53                                                                     "disabled");
54         }
55         DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
56 }
57
58 static int
59 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
60 {
61         unsigned int i;
62         int ret = -EAGAIN;
63
64         if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
65                 while (ret == -EAGAIN) {
66                         ret = rte_intr_callback_unregister(virtq->intr_handle,
67                                         mlx5_vdpa_virtq_kick_handler, virtq);
68                         if (ret == -EAGAIN) {
69                                 DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
70                                         rte_intr_fd_get(virtq->intr_handle),
71                                         virtq->index);
72                                 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
73                         }
74                 }
75                 rte_intr_fd_set(virtq->intr_handle, -1);
76         }
77         rte_intr_instance_free(virtq->intr_handle);
78         if (virtq->virtq) {
79                 ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
80                 if (ret)
81                         DRV_LOG(WARNING, "Failed to stop virtq %d.",
82                                 virtq->index);
83                 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
84         }
85         virtq->virtq = NULL;
86         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
87                 if (virtq->umems[i].obj)
88                         claim_zero(mlx5_glue->devx_umem_dereg
89                                                          (virtq->umems[i].obj));
90                 rte_free(virtq->umems[i].buf);
91         }
92         memset(&virtq->umems, 0, sizeof(virtq->umems));
93         if (virtq->eqp.fw_qp)
94                 mlx5_vdpa_event_qp_destroy(&virtq->eqp);
95         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
96         return 0;
97 }
98
99 void
100 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
101 {
102         int i;
103         struct mlx5_vdpa_virtq *virtq;
104
105         for (i = 0; i < priv->nr_virtqs; i++) {
106                 virtq = &priv->virtqs[i];
107                 mlx5_vdpa_virtq_unset(virtq);
108                 if (virtq->counters)
109                         claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
110         }
111         for (i = 0; i < priv->num_lag_ports; i++) {
112                 if (priv->tiss[i]) {
113                         claim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));
114                         priv->tiss[i] = NULL;
115                 }
116         }
117         if (priv->td) {
118                 claim_zero(mlx5_devx_cmd_destroy(priv->td));
119                 priv->td = NULL;
120         }
121         if (priv->virtq_db_addr) {
122                 claim_zero(munmap(priv->virtq_db_addr, priv->var->length));
123                 priv->virtq_db_addr = NULL;
124         }
125         priv->features = 0;
126         memset(priv->virtqs, 0, sizeof(*virtq) * priv->nr_virtqs);
127         priv->nr_virtqs = 0;
128 }
129
130 int
131 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
132 {
133         struct mlx5_devx_virtq_attr attr = {
134                         .type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
135                         .state = state ? MLX5_VIRTQ_STATE_RDY :
136                                          MLX5_VIRTQ_STATE_SUSPEND,
137                         .queue_index = virtq->index,
138         };
139
140         return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
141 }
142
143 int
144 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
145 {
146         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
147         int ret;
148
149         if (virtq->stopped)
150                 return 0;
151         ret = mlx5_vdpa_virtq_modify(virtq, 0);
152         if (ret)
153                 return -1;
154         virtq->stopped = true;
155         DRV_LOG(DEBUG, "vid %u virtq %u was stopped.", priv->vid, index);
156         return mlx5_vdpa_virtq_query(priv, index);
157 }
158
159 int
160 mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)
161 {
162         struct mlx5_devx_virtq_attr attr = {0};
163         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
164         int ret;
165
166         if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
167                 DRV_LOG(ERR, "Failed to query virtq %d.", index);
168                 return -1;
169         }
170         DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
171                 "hw_used_index=%d", priv->vid, index,
172                 attr.hw_available_index, attr.hw_used_index);
173         ret = rte_vhost_set_vring_base(priv->vid, index,
174                                        attr.hw_available_index,
175                                        attr.hw_used_index);
176         if (ret) {
177                 DRV_LOG(ERR, "Failed to set virtq %d base.", index);
178                 return -1;
179         }
180         if (attr.state == MLX5_VIRTQ_STATE_ERROR)
181                 DRV_LOG(WARNING, "vid %d vring %d hw error=%hhu",
182                         priv->vid, index, attr.error_type);
183         return 0;
184 }
185
186 static uint64_t
187 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
188 {
189         struct rte_vhost_mem_region *reg;
190         uint32_t i;
191         uint64_t gpa = 0;
192
193         for (i = 0; i < mem->nregions; i++) {
194                 reg = &mem->regions[i];
195                 if (hva >= reg->host_user_addr &&
196                     hva < reg->host_user_addr + reg->size) {
197                         gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
198                         break;
199                 }
200         }
201         return gpa;
202 }
203
204 static int
205 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
206 {
207         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
208         struct rte_vhost_vring vq;
209         struct mlx5_devx_virtq_attr attr = {0};
210         uint64_t gpa;
211         int ret;
212         unsigned int i;
213         uint16_t last_avail_idx;
214         uint16_t last_used_idx;
215         uint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;
216         uint64_t cookie;
217
218         ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
219         if (ret)
220                 return -1;
221         virtq->index = index;
222         virtq->vq_size = vq.size;
223         attr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
224         attr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
225         attr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
226         attr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
227         attr.virtio_version_1_0 = !!(priv->features & (1ULL <<
228                                                         VIRTIO_F_VERSION_1));
229         attr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
230                         MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
231         /*
232          * No need event QPs creation when the guest in poll mode or when the
233          * capability allows it.
234          */
235         attr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<
236                                                MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
237                                                       MLX5_VIRTQ_EVENT_MODE_QP :
238                                                   MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
239         if (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
240                 ret = mlx5_vdpa_event_qp_create(priv, vq.size, vq.callfd,
241                                                 &virtq->eqp);
242                 if (ret) {
243                         DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
244                                 index);
245                         return -1;
246                 }
247                 attr.qp_id = virtq->eqp.fw_qp->id;
248         } else {
249                 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
250                         " need event QPs and event mechanism.", index);
251         }
252         if (priv->caps.queue_counters_valid) {
253                 if (!virtq->counters)
254                         virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
255                                                               (priv->cdev->ctx);
256                 if (!virtq->counters) {
257                         DRV_LOG(ERR, "Failed to create virtq couners for virtq"
258                                 " %d.", index);
259                         goto error;
260                 }
261                 attr.counters_obj_id = virtq->counters->id;
262         }
263         /* Setup 3 UMEMs for each virtq. */
264         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
265                 virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
266                                                           priv->caps.umems[i].b;
267                 virtq->umems[i].buf = rte_zmalloc(__func__,
268                                                   virtq->umems[i].size, 4096);
269                 if (!virtq->umems[i].buf) {
270                         DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
271                                 " %u.", i, index);
272                         goto error;
273                 }
274                 virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx,
275                                                         virtq->umems[i].buf,
276                                                         virtq->umems[i].size,
277                                                         IBV_ACCESS_LOCAL_WRITE);
278                 if (!virtq->umems[i].obj) {
279                         DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
280                                 i, index);
281                         goto error;
282                 }
283                 attr.umems[i].id = virtq->umems[i].obj->umem_id;
284                 attr.umems[i].offset = 0;
285                 attr.umems[i].size = virtq->umems[i].size;
286         }
287         if (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {
288                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
289                                            (uint64_t)(uintptr_t)vq.desc);
290                 if (!gpa) {
291                         DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
292                         goto error;
293                 }
294                 attr.desc_addr = gpa;
295                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
296                                            (uint64_t)(uintptr_t)vq.used);
297                 if (!gpa) {
298                         DRV_LOG(ERR, "Failed to get GPA for used ring.");
299                         goto error;
300                 }
301                 attr.used_addr = gpa;
302                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
303                                            (uint64_t)(uintptr_t)vq.avail);
304                 if (!gpa) {
305                         DRV_LOG(ERR, "Failed to get GPA for available ring.");
306                         goto error;
307                 }
308                 attr.available_addr = gpa;
309         }
310         ret = rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,
311                                  &last_used_idx);
312         if (ret) {
313                 last_avail_idx = 0;
314                 last_used_idx = 0;
315                 DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
316         } else {
317                 DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
318                                 "virtq %d.", priv->vid, last_avail_idx,
319                                 last_used_idx, index);
320         }
321         attr.hw_available_index = last_avail_idx;
322         attr.hw_used_index = last_used_idx;
323         attr.q_size = vq.size;
324         attr.mkey = priv->gpa_mkey_index;
325         attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
326         attr.queue_index = index;
327         attr.pd = priv->cdev->pdn;
328         attr.hw_latency_mode = priv->hw_latency_mode;
329         attr.hw_max_latency_us = priv->hw_max_latency_us;
330         attr.hw_max_pending_comp = priv->hw_max_pending_comp;
331         virtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);
332         virtq->priv = priv;
333         if (!virtq->virtq)
334                 goto error;
335         claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
336         if (mlx5_vdpa_virtq_modify(virtq, 1))
337                 goto error;
338         virtq->priv = priv;
339         rte_write32(virtq->index, priv->virtq_db_addr);
340         /* Setup doorbell mapping. */
341         virtq->intr_handle =
342                 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
343         if (virtq->intr_handle == NULL) {
344                 DRV_LOG(ERR, "Fail to allocate intr_handle");
345                 goto error;
346         }
347
348         if (rte_intr_fd_set(virtq->intr_handle, vq.kickfd))
349                 goto error;
350
351         if (rte_intr_fd_get(virtq->intr_handle) == -1) {
352                 DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
353         } else {
354                 if (rte_intr_type_set(virtq->intr_handle, RTE_INTR_HANDLE_EXT))
355                         goto error;
356
357                 if (rte_intr_callback_register(virtq->intr_handle,
358                                                mlx5_vdpa_virtq_kick_handler,
359                                                virtq)) {
360                         rte_intr_fd_set(virtq->intr_handle, -1);
361                         DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
362                                 index);
363                         goto error;
364                 } else {
365                         DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
366                                 rte_intr_fd_get(virtq->intr_handle),
367                                 index);
368                 }
369         }
370         /* Subscribe virtq error event. */
371         virtq->version++;
372         cookie = ((uint64_t)virtq->version << 32) + index;
373         ret = mlx5_glue->devx_subscribe_devx_event(priv->err_chnl,
374                                                    virtq->virtq->obj,
375                                                    sizeof(event_num),
376                                                    &event_num, cookie);
377         if (ret) {
378                 DRV_LOG(ERR, "Failed to subscribe device %d virtq %d error event.",
379                         priv->vid, index);
380                 rte_errno = errno;
381                 goto error;
382         }
383         virtq->stopped = false;
384         /* Initial notification to ask Qemu handling completed buffers. */
385         if (virtq->eqp.cq.callfd != -1)
386                 eventfd_write(virtq->eqp.cq.callfd, (eventfd_t)1);
387         DRV_LOG(DEBUG, "vid %u virtq %u was created successfully.", priv->vid,
388                 index);
389         return 0;
390 error:
391         mlx5_vdpa_virtq_unset(virtq);
392         return -1;
393 }
394
395 static int
396 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
397 {
398         if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
399                 if (!(priv->caps.virtio_queue_type & (1 <<
400                                                      MLX5_VIRTQ_TYPE_PACKED))) {
401                         DRV_LOG(ERR, "Failed to configure PACKED mode for vdev "
402                                 "%d - it was not reported by HW/driver"
403                                 " capability.", priv->vid);
404                         return -ENOTSUP;
405                 }
406         }
407         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
408                 if (!priv->caps.tso_ipv4) {
409                         DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
410                                 " was not reported by HW/driver capability.",
411                                 priv->vid);
412                         return -ENOTSUP;
413                 }
414         }
415         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
416                 if (!priv->caps.tso_ipv6) {
417                         DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
418                                 " was not reported by HW/driver capability.",
419                                 priv->vid);
420                         return -ENOTSUP;
421                 }
422         }
423         if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
424                 if (!priv->caps.tx_csum) {
425                         DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
426                                 " was not reported by HW/driver capability.",
427                                 priv->vid);
428                         return -ENOTSUP;
429                 }
430         }
431         if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
432                 if (!priv->caps.rx_csum) {
433                         DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
434                                 " GUEST CSUM was not reported by HW/driver "
435                                 "capability.", priv->vid);
436                         return -ENOTSUP;
437                 }
438         }
439         if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
440                 if (!priv->caps.virtio_version_1_0) {
441                         DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
442                                 "version 1 was not reported by HW/driver"
443                                 " capability.", priv->vid);
444                         return -ENOTSUP;
445                 }
446         }
447         return 0;
448 }
449
450 int
451 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
452 {
453         struct mlx5_devx_tis_attr tis_attr = {0};
454         struct ibv_context *ctx = priv->cdev->ctx;
455         uint32_t i;
456         uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
457         int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
458
459         if (ret || mlx5_vdpa_features_validate(priv)) {
460                 DRV_LOG(ERR, "Failed to configure negotiated features.");
461                 return -1;
462         }
463         if ((priv->features & (1ULL << VIRTIO_NET_F_CSUM)) == 0 &&
464             ((priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) > 0 ||
465              (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) > 0)) {
466                 /* Packet may be corrupted if TSO is enabled without CSUM. */
467                 DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
468                 priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
469         }
470         if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
471                 DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
472                         (int)priv->caps.max_num_virtio_queues * 2,
473                         (int)nr_vring);
474                 return -1;
475         }
476         /* Always map the entire page. */
477         priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
478                                    PROT_WRITE, MAP_SHARED, ctx->cmd_fd,
479                                    priv->var->mmap_off);
480         if (priv->virtq_db_addr == MAP_FAILED) {
481                 DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
482                 priv->virtq_db_addr = NULL;
483                 goto error;
484         } else {
485                 DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
486                         priv->virtq_db_addr);
487         }
488         priv->td = mlx5_devx_cmd_create_td(ctx);
489         if (!priv->td) {
490                 DRV_LOG(ERR, "Failed to create transport domain.");
491                 return -rte_errno;
492         }
493         tis_attr.transport_domain = priv->td->id;
494         for (i = 0; i < priv->num_lag_ports; i++) {
495                 /* 0 is auto affinity, non-zero value to propose port. */
496                 tis_attr.lag_tx_port_affinity = i + 1;
497                 priv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);
498                 if (!priv->tiss[i]) {
499                         DRV_LOG(ERR, "Failed to create TIS %u.", i);
500                         goto error;
501                 }
502         }
503         priv->nr_virtqs = nr_vring;
504         for (i = 0; i < nr_vring; i++)
505                 if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
506                         goto error;
507         return 0;
508 error:
509         mlx5_vdpa_virtqs_release(priv);
510         return -1;
511 }
512
513 static int
514 mlx5_vdpa_virtq_is_modified(struct mlx5_vdpa_priv *priv,
515                             struct mlx5_vdpa_virtq *virtq)
516 {
517         struct rte_vhost_vring vq;
518         int ret = rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq);
519
520         if (ret)
521                 return -1;
522         if (vq.size != virtq->vq_size || vq.kickfd !=
523             rte_intr_fd_get(virtq->intr_handle))
524                 return 1;
525         if (virtq->eqp.cq.cq_obj.cq) {
526                 if (vq.callfd != virtq->eqp.cq.callfd)
527                         return 1;
528         } else if (vq.callfd != -1) {
529                 return 1;
530         }
531         return 0;
532 }
533
534 int
535 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
536 {
537         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
538         int ret;
539
540         DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
541                 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
542         if (!priv->configured) {
543                 virtq->enable = !!enable;
544                 return 0;
545         }
546         if (virtq->enable == !!enable) {
547                 if (!enable)
548                         return 0;
549                 ret = mlx5_vdpa_virtq_is_modified(priv, virtq);
550                 if (ret < 0) {
551                         DRV_LOG(ERR, "Virtq %d modify check failed.", index);
552                         return -1;
553                 }
554                 if (ret == 0)
555                         return 0;
556                 DRV_LOG(INFO, "Virtq %d was modified, recreate it.", index);
557         }
558         if (virtq->virtq) {
559                 virtq->enable = 0;
560                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
561                         ret = mlx5_vdpa_steer_update(priv);
562                         if (ret)
563                                 DRV_LOG(WARNING, "Failed to disable steering "
564                                         "for virtq %d.", index);
565                 }
566                 mlx5_vdpa_virtq_unset(virtq);
567         }
568         if (enable) {
569                 ret = mlx5_vdpa_virtq_setup(priv, index);
570                 if (ret) {
571                         DRV_LOG(ERR, "Failed to setup virtq %d.", index);
572                         return ret;
573                 }
574                 virtq->enable = 1;
575                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
576                         ret = mlx5_vdpa_steer_update(priv);
577                         if (ret)
578                                 DRV_LOG(WARNING, "Failed to enable steering "
579                                         "for virtq %d.", index);
580                 }
581         }
582         return 0;
583 }
584
585 int
586 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
587                           struct rte_vdpa_stat *stats, unsigned int n)
588 {
589         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
590         struct mlx5_devx_virtio_q_couners_attr attr = {0};
591         int ret;
592
593         if (!virtq->counters) {
594                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
595                         "is invalid.", qid);
596                 return -EINVAL;
597         }
598         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
599         if (ret) {
600                 DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
601                 return ret;
602         }
603         ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
604         if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
605                 return ret;
606         stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
607                 .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
608                 .value = attr.received_desc - virtq->reset.received_desc,
609         };
610         if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
611                 return ret;
612         stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
613                 .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
614                 .value = attr.completed_desc - virtq->reset.completed_desc,
615         };
616         if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
617                 return ret;
618         stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
619                 .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
620                 .value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
621         };
622         if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
623                 return ret;
624         stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
625                 .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
626                 .value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
627         };
628         if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
629                 return ret;
630         stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
631                 .id = MLX5_VDPA_STATS_INVALID_BUFFER,
632                 .value = attr.invalid_buffer - virtq->reset.invalid_buffer,
633         };
634         if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
635                 return ret;
636         stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
637                 .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
638                 .value = attr.error_cqes - virtq->reset.error_cqes,
639         };
640         return ret;
641 }
642
643 int
644 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
645 {
646         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
647         int ret;
648
649         if (!virtq->counters) {
650                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
651                         "is invalid.", qid);
652                 return -EINVAL;
653         }
654         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
655                                                     &virtq->reset);
656         if (ret)
657                 DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",
658                         qid);
659         return ret;
660 }