drivers: remove direct access to interrupt handle
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_virtq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <string.h>
5 #include <unistd.h>
6 #include <sys/mman.h>
7 #include <sys/eventfd.h>
8
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_io.h>
12
13 #include <mlx5_common.h>
14
15 #include "mlx5_vdpa_utils.h"
16 #include "mlx5_vdpa.h"
17
18
19 static void
20 mlx5_vdpa_virtq_handler(void *cb_arg)
21 {
22         struct mlx5_vdpa_virtq *virtq = cb_arg;
23         struct mlx5_vdpa_priv *priv = virtq->priv;
24         uint64_t buf;
25         int nbytes;
26
27         do {
28                 nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
29                               8);
30                 if (nbytes < 0) {
31                         if (errno == EINTR ||
32                             errno == EWOULDBLOCK ||
33                             errno == EAGAIN)
34                                 continue;
35                         DRV_LOG(ERR,  "Failed to read kickfd of virtq %d: %s",
36                                 virtq->index, strerror(errno));
37                 }
38                 break;
39         } while (1);
40         rte_write32(virtq->index, priv->virtq_db_addr);
41         if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
42                 if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
43                         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_ERR;
44                 else
45                         virtq->notifier_state =
46                                                MLX5_VDPA_NOTIFIER_STATE_ENABLED;
47                 DRV_LOG(INFO, "Virtq %u notifier state is %s.", virtq->index,
48                         virtq->notifier_state ==
49                                 MLX5_VDPA_NOTIFIER_STATE_ENABLED ? "enabled" :
50                                                                     "disabled");
51         }
52         DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
53 }
54
55 static int
56 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
57 {
58         unsigned int i;
59         int retries = MLX5_VDPA_INTR_RETRIES;
60         int ret = -EAGAIN;
61
62         if (rte_intr_fd_get(virtq->intr_handle) != -1) {
63                 while (retries-- && ret == -EAGAIN) {
64                         ret = rte_intr_callback_unregister(virtq->intr_handle,
65                                                         mlx5_vdpa_virtq_handler,
66                                                         virtq);
67                         if (ret == -EAGAIN) {
68                                 DRV_LOG(DEBUG, "Try again to unregister fd %d "
69                                 "of virtq %d interrupt, retries = %d.",
70                                 rte_intr_fd_get(virtq->intr_handle),
71                                 (int)virtq->index, retries);
72
73                                 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
74                         }
75                 }
76                 rte_intr_fd_set(virtq->intr_handle, -1);
77         }
78         rte_intr_instance_free(virtq->intr_handle);
79         if (virtq->virtq) {
80                 ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
81                 if (ret)
82                         DRV_LOG(WARNING, "Failed to stop virtq %d.",
83                                 virtq->index);
84                 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
85         }
86         virtq->virtq = NULL;
87         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
88                 if (virtq->umems[i].obj)
89                         claim_zero(mlx5_glue->devx_umem_dereg
90                                                          (virtq->umems[i].obj));
91                 if (virtq->umems[i].buf)
92                         rte_free(virtq->umems[i].buf);
93         }
94         memset(&virtq->umems, 0, sizeof(virtq->umems));
95         if (virtq->eqp.fw_qp)
96                 mlx5_vdpa_event_qp_destroy(&virtq->eqp);
97         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
98         return 0;
99 }
100
101 void
102 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
103 {
104         int i;
105         struct mlx5_vdpa_virtq *virtq;
106
107         for (i = 0; i < priv->nr_virtqs; i++) {
108                 virtq = &priv->virtqs[i];
109                 mlx5_vdpa_virtq_unset(virtq);
110                 if (virtq->counters)
111                         claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
112         }
113         for (i = 0; i < priv->num_lag_ports; i++) {
114                 if (priv->tiss[i]) {
115                         claim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));
116                         priv->tiss[i] = NULL;
117                 }
118         }
119         if (priv->td) {
120                 claim_zero(mlx5_devx_cmd_destroy(priv->td));
121                 priv->td = NULL;
122         }
123         if (priv->virtq_db_addr) {
124                 claim_zero(munmap(priv->virtq_db_addr, priv->var->length));
125                 priv->virtq_db_addr = NULL;
126         }
127         priv->features = 0;
128         memset(priv->virtqs, 0, sizeof(*virtq) * priv->nr_virtqs);
129         priv->nr_virtqs = 0;
130 }
131
132 int
133 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
134 {
135         struct mlx5_devx_virtq_attr attr = {
136                         .type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
137                         .state = state ? MLX5_VIRTQ_STATE_RDY :
138                                          MLX5_VIRTQ_STATE_SUSPEND,
139                         .queue_index = virtq->index,
140         };
141
142         return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
143 }
144
145 int
146 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
147 {
148         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
149         int ret;
150
151         if (virtq->stopped)
152                 return 0;
153         ret = mlx5_vdpa_virtq_modify(virtq, 0);
154         if (ret)
155                 return -1;
156         virtq->stopped = true;
157         DRV_LOG(DEBUG, "vid %u virtq %u was stopped.", priv->vid, index);
158         return mlx5_vdpa_virtq_query(priv, index);
159 }
160
161 int
162 mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)
163 {
164         struct mlx5_devx_virtq_attr attr = {0};
165         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
166         int ret;
167
168         if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
169                 DRV_LOG(ERR, "Failed to query virtq %d.", index);
170                 return -1;
171         }
172         DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
173                 "hw_used_index=%d", priv->vid, index,
174                 attr.hw_available_index, attr.hw_used_index);
175         ret = rte_vhost_set_vring_base(priv->vid, index,
176                                        attr.hw_available_index,
177                                        attr.hw_used_index);
178         if (ret) {
179                 DRV_LOG(ERR, "Failed to set virtq %d base.", index);
180                 return -1;
181         }
182         if (attr.state == MLX5_VIRTQ_STATE_ERROR)
183                 DRV_LOG(WARNING, "vid %d vring %d hw error=%hhu",
184                         priv->vid, index, attr.error_type);
185         return 0;
186 }
187
188 static uint64_t
189 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
190 {
191         struct rte_vhost_mem_region *reg;
192         uint32_t i;
193         uint64_t gpa = 0;
194
195         for (i = 0; i < mem->nregions; i++) {
196                 reg = &mem->regions[i];
197                 if (hva >= reg->host_user_addr &&
198                     hva < reg->host_user_addr + reg->size) {
199                         gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
200                         break;
201                 }
202         }
203         return gpa;
204 }
205
206 static int
207 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
208 {
209         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
210         struct rte_vhost_vring vq;
211         struct mlx5_devx_virtq_attr attr = {0};
212         uint64_t gpa;
213         int ret;
214         unsigned int i;
215         uint16_t last_avail_idx;
216         uint16_t last_used_idx;
217         uint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;
218         uint64_t cookie;
219
220         ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
221         if (ret)
222                 return -1;
223         virtq->index = index;
224         virtq->vq_size = vq.size;
225         attr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
226         attr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
227         attr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
228         attr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
229         attr.virtio_version_1_0 = !!(priv->features & (1ULL <<
230                                                         VIRTIO_F_VERSION_1));
231         attr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
232                         MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
233         /*
234          * No need event QPs creation when the guest in poll mode or when the
235          * capability allows it.
236          */
237         attr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<
238                                                MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
239                                                       MLX5_VIRTQ_EVENT_MODE_QP :
240                                                   MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
241         if (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
242                 ret = mlx5_vdpa_event_qp_create(priv, vq.size, vq.callfd,
243                                                 &virtq->eqp);
244                 if (ret) {
245                         DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
246                                 index);
247                         return -1;
248                 }
249                 attr.qp_id = virtq->eqp.fw_qp->id;
250         } else {
251                 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
252                         " need event QPs and event mechanism.", index);
253         }
254         if (priv->caps.queue_counters_valid) {
255                 if (!virtq->counters)
256                         virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
257                                                               (priv->cdev->ctx);
258                 if (!virtq->counters) {
259                         DRV_LOG(ERR, "Failed to create virtq couners for virtq"
260                                 " %d.", index);
261                         goto error;
262                 }
263                 attr.counters_obj_id = virtq->counters->id;
264         }
265         /* Setup 3 UMEMs for each virtq. */
266         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
267                 virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
268                                                           priv->caps.umems[i].b;
269                 virtq->umems[i].buf = rte_zmalloc(__func__,
270                                                   virtq->umems[i].size, 4096);
271                 if (!virtq->umems[i].buf) {
272                         DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
273                                 " %u.", i, index);
274                         goto error;
275                 }
276                 virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx,
277                                                         virtq->umems[i].buf,
278                                                         virtq->umems[i].size,
279                                                         IBV_ACCESS_LOCAL_WRITE);
280                 if (!virtq->umems[i].obj) {
281                         DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
282                                 i, index);
283                         goto error;
284                 }
285                 attr.umems[i].id = virtq->umems[i].obj->umem_id;
286                 attr.umems[i].offset = 0;
287                 attr.umems[i].size = virtq->umems[i].size;
288         }
289         if (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {
290                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
291                                            (uint64_t)(uintptr_t)vq.desc);
292                 if (!gpa) {
293                         DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
294                         goto error;
295                 }
296                 attr.desc_addr = gpa;
297                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
298                                            (uint64_t)(uintptr_t)vq.used);
299                 if (!gpa) {
300                         DRV_LOG(ERR, "Failed to get GPA for used ring.");
301                         goto error;
302                 }
303                 attr.used_addr = gpa;
304                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
305                                            (uint64_t)(uintptr_t)vq.avail);
306                 if (!gpa) {
307                         DRV_LOG(ERR, "Failed to get GPA for available ring.");
308                         goto error;
309                 }
310                 attr.available_addr = gpa;
311         }
312         ret = rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,
313                                  &last_used_idx);
314         if (ret) {
315                 last_avail_idx = 0;
316                 last_used_idx = 0;
317                 DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
318         } else {
319                 DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
320                                 "virtq %d.", priv->vid, last_avail_idx,
321                                 last_used_idx, index);
322         }
323         attr.hw_available_index = last_avail_idx;
324         attr.hw_used_index = last_used_idx;
325         attr.q_size = vq.size;
326         attr.mkey = priv->gpa_mkey_index;
327         attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
328         attr.queue_index = index;
329         attr.pd = priv->cdev->pdn;
330         attr.hw_latency_mode = priv->hw_latency_mode;
331         attr.hw_max_latency_us = priv->hw_max_latency_us;
332         attr.hw_max_pending_comp = priv->hw_max_pending_comp;
333         virtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);
334         virtq->priv = priv;
335         if (!virtq->virtq)
336                 goto error;
337         claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
338         if (mlx5_vdpa_virtq_modify(virtq, 1))
339                 goto error;
340         virtq->priv = priv;
341         rte_write32(virtq->index, priv->virtq_db_addr);
342         /* Setup doorbell mapping. */
343         virtq->intr_handle =
344                 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
345         if (virtq->intr_handle == NULL) {
346                 DRV_LOG(ERR, "Fail to allocate intr_handle");
347                 goto error;
348         }
349
350         if (rte_intr_fd_set(virtq->intr_handle, vq.kickfd))
351                 goto error;
352
353         if (rte_intr_fd_get(virtq->intr_handle) == -1) {
354                 DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
355         } else {
356                 if (rte_intr_type_set(virtq->intr_handle, RTE_INTR_HANDLE_EXT))
357                         goto error;
358
359                 if (rte_intr_callback_register(virtq->intr_handle,
360                                                mlx5_vdpa_virtq_handler,
361                                                virtq)) {
362                         rte_intr_fd_set(virtq->intr_handle, -1);
363                         DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
364                                 index);
365                         goto error;
366                 } else {
367                         DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
368                                 rte_intr_fd_get(virtq->intr_handle),
369                                 index);
370                 }
371         }
372         /* Subscribe virtq error event. */
373         virtq->version++;
374         cookie = ((uint64_t)virtq->version << 32) + index;
375         ret = mlx5_glue->devx_subscribe_devx_event(priv->err_chnl,
376                                                    virtq->virtq->obj,
377                                                    sizeof(event_num),
378                                                    &event_num, cookie);
379         if (ret) {
380                 DRV_LOG(ERR, "Failed to subscribe device %d virtq %d error event.",
381                         priv->vid, index);
382                 rte_errno = errno;
383                 goto error;
384         }
385         virtq->stopped = false;
386         /* Initial notification to ask Qemu handling completed buffers. */
387         if (virtq->eqp.cq.callfd != -1)
388                 eventfd_write(virtq->eqp.cq.callfd, (eventfd_t)1);
389         DRV_LOG(DEBUG, "vid %u virtq %u was created successfully.", priv->vid,
390                 index);
391         return 0;
392 error:
393         mlx5_vdpa_virtq_unset(virtq);
394         return -1;
395 }
396
397 static int
398 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
399 {
400         if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
401                 if (!(priv->caps.virtio_queue_type & (1 <<
402                                                      MLX5_VIRTQ_TYPE_PACKED))) {
403                         DRV_LOG(ERR, "Failed to configur PACKED mode for vdev "
404                                 "%d - it was not reported by HW/driver"
405                                 " capability.", priv->vid);
406                         return -ENOTSUP;
407                 }
408         }
409         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
410                 if (!priv->caps.tso_ipv4) {
411                         DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
412                                 " was not reported by HW/driver capability.",
413                                 priv->vid);
414                         return -ENOTSUP;
415                 }
416         }
417         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
418                 if (!priv->caps.tso_ipv6) {
419                         DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
420                                 " was not reported by HW/driver capability.",
421                                 priv->vid);
422                         return -ENOTSUP;
423                 }
424         }
425         if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
426                 if (!priv->caps.tx_csum) {
427                         DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
428                                 " was not reported by HW/driver capability.",
429                                 priv->vid);
430                         return -ENOTSUP;
431                 }
432         }
433         if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
434                 if (!priv->caps.rx_csum) {
435                         DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
436                                 " GUEST CSUM was not reported by HW/driver "
437                                 "capability.", priv->vid);
438                         return -ENOTSUP;
439                 }
440         }
441         if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
442                 if (!priv->caps.virtio_version_1_0) {
443                         DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
444                                 "version 1 was not reported by HW/driver"
445                                 " capability.", priv->vid);
446                         return -ENOTSUP;
447                 }
448         }
449         return 0;
450 }
451
452 int
453 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
454 {
455         struct mlx5_devx_tis_attr tis_attr = {0};
456         struct ibv_context *ctx = priv->cdev->ctx;
457         uint32_t i;
458         uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
459         int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
460
461         if (ret || mlx5_vdpa_features_validate(priv)) {
462                 DRV_LOG(ERR, "Failed to configure negotiated features.");
463                 return -1;
464         }
465         if ((priv->features & (1ULL << VIRTIO_NET_F_CSUM)) == 0 &&
466             ((priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) > 0 ||
467              (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) > 0)) {
468                 /* Packet may be corrupted if TSO is enabled without CSUM. */
469                 DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
470                 priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
471         }
472         if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
473                 DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
474                         (int)priv->caps.max_num_virtio_queues * 2,
475                         (int)nr_vring);
476                 return -1;
477         }
478         /* Always map the entire page. */
479         priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
480                                    PROT_WRITE, MAP_SHARED, ctx->cmd_fd,
481                                    priv->var->mmap_off);
482         if (priv->virtq_db_addr == MAP_FAILED) {
483                 DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
484                 priv->virtq_db_addr = NULL;
485                 goto error;
486         } else {
487                 DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
488                         priv->virtq_db_addr);
489         }
490         priv->td = mlx5_devx_cmd_create_td(ctx);
491         if (!priv->td) {
492                 DRV_LOG(ERR, "Failed to create transport domain.");
493                 return -rte_errno;
494         }
495         tis_attr.transport_domain = priv->td->id;
496         for (i = 0; i < priv->num_lag_ports; i++) {
497                 /* 0 is auto affinity, non-zero value to propose port. */
498                 tis_attr.lag_tx_port_affinity = i + 1;
499                 priv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);
500                 if (!priv->tiss[i]) {
501                         DRV_LOG(ERR, "Failed to create TIS %u.", i);
502                         goto error;
503                 }
504         }
505         priv->nr_virtqs = nr_vring;
506         for (i = 0; i < nr_vring; i++)
507                 if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
508                         goto error;
509         return 0;
510 error:
511         mlx5_vdpa_virtqs_release(priv);
512         return -1;
513 }
514
515 static int
516 mlx5_vdpa_virtq_is_modified(struct mlx5_vdpa_priv *priv,
517                             struct mlx5_vdpa_virtq *virtq)
518 {
519         struct rte_vhost_vring vq;
520         int ret = rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq);
521
522         if (ret)
523                 return -1;
524         if (vq.size != virtq->vq_size || vq.kickfd !=
525             rte_intr_fd_get(virtq->intr_handle))
526                 return 1;
527         if (virtq->eqp.cq.cq_obj.cq) {
528                 if (vq.callfd != virtq->eqp.cq.callfd)
529                         return 1;
530         } else if (vq.callfd != -1) {
531                 return 1;
532         }
533         return 0;
534 }
535
536 int
537 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
538 {
539         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
540         int ret;
541
542         DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
543                 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
544         if (!priv->configured) {
545                 virtq->enable = !!enable;
546                 return 0;
547         }
548         if (virtq->enable == !!enable) {
549                 if (!enable)
550                         return 0;
551                 ret = mlx5_vdpa_virtq_is_modified(priv, virtq);
552                 if (ret < 0) {
553                         DRV_LOG(ERR, "Virtq %d modify check failed.", index);
554                         return -1;
555                 }
556                 if (ret == 0)
557                         return 0;
558                 DRV_LOG(INFO, "Virtq %d was modified, recreate it.", index);
559         }
560         if (virtq->virtq) {
561                 virtq->enable = 0;
562                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
563                         ret = mlx5_vdpa_steer_update(priv);
564                         if (ret)
565                                 DRV_LOG(WARNING, "Failed to disable steering "
566                                         "for virtq %d.", index);
567                 }
568                 mlx5_vdpa_virtq_unset(virtq);
569         }
570         if (enable) {
571                 ret = mlx5_vdpa_virtq_setup(priv, index);
572                 if (ret) {
573                         DRV_LOG(ERR, "Failed to setup virtq %d.", index);
574                         return ret;
575                 }
576                 virtq->enable = 1;
577                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
578                         ret = mlx5_vdpa_steer_update(priv);
579                         if (ret)
580                                 DRV_LOG(WARNING, "Failed to enable steering "
581                                         "for virtq %d.", index);
582                 }
583         }
584         return 0;
585 }
586
587 int
588 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
589                           struct rte_vdpa_stat *stats, unsigned int n)
590 {
591         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
592         struct mlx5_devx_virtio_q_couners_attr attr = {0};
593         int ret;
594
595         if (!virtq->counters) {
596                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
597                         "is invalid.", qid);
598                 return -EINVAL;
599         }
600         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
601         if (ret) {
602                 DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
603                 return ret;
604         }
605         ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
606         if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
607                 return ret;
608         stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
609                 .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
610                 .value = attr.received_desc - virtq->reset.received_desc,
611         };
612         if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
613                 return ret;
614         stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
615                 .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
616                 .value = attr.completed_desc - virtq->reset.completed_desc,
617         };
618         if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
619                 return ret;
620         stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
621                 .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
622                 .value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
623         };
624         if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
625                 return ret;
626         stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
627                 .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
628                 .value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
629         };
630         if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
631                 return ret;
632         stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
633                 .id = MLX5_VDPA_STATS_INVALID_BUFFER,
634                 .value = attr.invalid_buffer - virtq->reset.invalid_buffer,
635         };
636         if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
637                 return ret;
638         stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
639                 .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
640                 .value = attr.error_cqes - virtq->reset.error_cqes,
641         };
642         return ret;
643 }
644
645 int
646 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
647 {
648         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
649         int ret;
650
651         if (!virtq->counters) {
652                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
653                         "is invalid.", qid);
654                 return -EINVAL;
655         }
656         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
657                                                     &virtq->reset);
658         if (ret)
659                 DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",
660                         qid);
661         return ret;
662 }