common/cnxk: support CNF95xx B0 variant
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_virtq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <string.h>
5 #include <unistd.h>
6 #include <sys/mman.h>
7 #include <sys/eventfd.h>
8
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_io.h>
12
13 #include <mlx5_common.h>
14
15 #include "mlx5_vdpa_utils.h"
16 #include "mlx5_vdpa.h"
17
18
19 static void
20 mlx5_vdpa_virtq_handler(void *cb_arg)
21 {
22         struct mlx5_vdpa_virtq *virtq = cb_arg;
23         struct mlx5_vdpa_priv *priv = virtq->priv;
24         uint64_t buf;
25         int nbytes;
26
27         if (rte_intr_fd_get(virtq->intr_handle) < 0)
28                 return;
29
30         do {
31                 nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
32                               8);
33                 if (nbytes < 0) {
34                         if (errno == EINTR ||
35                             errno == EWOULDBLOCK ||
36                             errno == EAGAIN)
37                                 continue;
38                         DRV_LOG(ERR,  "Failed to read kickfd of virtq %d: %s",
39                                 virtq->index, strerror(errno));
40                 }
41                 break;
42         } while (1);
43         rte_write32(virtq->index, priv->virtq_db_addr);
44         if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
45                 if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
46                         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_ERR;
47                 else
48                         virtq->notifier_state =
49                                                MLX5_VDPA_NOTIFIER_STATE_ENABLED;
50                 DRV_LOG(INFO, "Virtq %u notifier state is %s.", virtq->index,
51                         virtq->notifier_state ==
52                                 MLX5_VDPA_NOTIFIER_STATE_ENABLED ? "enabled" :
53                                                                     "disabled");
54         }
55         DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
56 }
57
58 static int
59 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
60 {
61         unsigned int i;
62         int retries = MLX5_VDPA_INTR_RETRIES;
63         int ret = -EAGAIN;
64
65         if (rte_intr_fd_get(virtq->intr_handle) != -1) {
66                 while (retries-- && ret == -EAGAIN) {
67                         ret = rte_intr_callback_unregister(virtq->intr_handle,
68                                                         mlx5_vdpa_virtq_handler,
69                                                         virtq);
70                         if (ret == -EAGAIN) {
71                                 DRV_LOG(DEBUG, "Try again to unregister fd %d "
72                                 "of virtq %d interrupt, retries = %d.",
73                                 rte_intr_fd_get(virtq->intr_handle),
74                                 (int)virtq->index, retries);
75
76                                 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
77                         }
78                 }
79                 rte_intr_fd_set(virtq->intr_handle, -1);
80         }
81         rte_intr_instance_free(virtq->intr_handle);
82         if (virtq->virtq) {
83                 ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
84                 if (ret)
85                         DRV_LOG(WARNING, "Failed to stop virtq %d.",
86                                 virtq->index);
87                 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
88         }
89         virtq->virtq = NULL;
90         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
91                 if (virtq->umems[i].obj)
92                         claim_zero(mlx5_glue->devx_umem_dereg
93                                                          (virtq->umems[i].obj));
94                 rte_free(virtq->umems[i].buf);
95         }
96         memset(&virtq->umems, 0, sizeof(virtq->umems));
97         if (virtq->eqp.fw_qp)
98                 mlx5_vdpa_event_qp_destroy(&virtq->eqp);
99         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
100         return 0;
101 }
102
103 void
104 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
105 {
106         int i;
107         struct mlx5_vdpa_virtq *virtq;
108
109         for (i = 0; i < priv->nr_virtqs; i++) {
110                 virtq = &priv->virtqs[i];
111                 mlx5_vdpa_virtq_unset(virtq);
112                 if (virtq->counters)
113                         claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
114         }
115         for (i = 0; i < priv->num_lag_ports; i++) {
116                 if (priv->tiss[i]) {
117                         claim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));
118                         priv->tiss[i] = NULL;
119                 }
120         }
121         if (priv->td) {
122                 claim_zero(mlx5_devx_cmd_destroy(priv->td));
123                 priv->td = NULL;
124         }
125         if (priv->virtq_db_addr) {
126                 claim_zero(munmap(priv->virtq_db_addr, priv->var->length));
127                 priv->virtq_db_addr = NULL;
128         }
129         priv->features = 0;
130         memset(priv->virtqs, 0, sizeof(*virtq) * priv->nr_virtqs);
131         priv->nr_virtqs = 0;
132 }
133
134 int
135 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
136 {
137         struct mlx5_devx_virtq_attr attr = {
138                         .type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
139                         .state = state ? MLX5_VIRTQ_STATE_RDY :
140                                          MLX5_VIRTQ_STATE_SUSPEND,
141                         .queue_index = virtq->index,
142         };
143
144         return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
145 }
146
147 int
148 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
149 {
150         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
151         int ret;
152
153         if (virtq->stopped)
154                 return 0;
155         ret = mlx5_vdpa_virtq_modify(virtq, 0);
156         if (ret)
157                 return -1;
158         virtq->stopped = true;
159         DRV_LOG(DEBUG, "vid %u virtq %u was stopped.", priv->vid, index);
160         return mlx5_vdpa_virtq_query(priv, index);
161 }
162
163 int
164 mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)
165 {
166         struct mlx5_devx_virtq_attr attr = {0};
167         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
168         int ret;
169
170         if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
171                 DRV_LOG(ERR, "Failed to query virtq %d.", index);
172                 return -1;
173         }
174         DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
175                 "hw_used_index=%d", priv->vid, index,
176                 attr.hw_available_index, attr.hw_used_index);
177         ret = rte_vhost_set_vring_base(priv->vid, index,
178                                        attr.hw_available_index,
179                                        attr.hw_used_index);
180         if (ret) {
181                 DRV_LOG(ERR, "Failed to set virtq %d base.", index);
182                 return -1;
183         }
184         if (attr.state == MLX5_VIRTQ_STATE_ERROR)
185                 DRV_LOG(WARNING, "vid %d vring %d hw error=%hhu",
186                         priv->vid, index, attr.error_type);
187         return 0;
188 }
189
190 static uint64_t
191 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
192 {
193         struct rte_vhost_mem_region *reg;
194         uint32_t i;
195         uint64_t gpa = 0;
196
197         for (i = 0; i < mem->nregions; i++) {
198                 reg = &mem->regions[i];
199                 if (hva >= reg->host_user_addr &&
200                     hva < reg->host_user_addr + reg->size) {
201                         gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
202                         break;
203                 }
204         }
205         return gpa;
206 }
207
208 static int
209 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
210 {
211         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
212         struct rte_vhost_vring vq;
213         struct mlx5_devx_virtq_attr attr = {0};
214         uint64_t gpa;
215         int ret;
216         unsigned int i;
217         uint16_t last_avail_idx;
218         uint16_t last_used_idx;
219         uint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;
220         uint64_t cookie;
221
222         ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
223         if (ret)
224                 return -1;
225         virtq->index = index;
226         virtq->vq_size = vq.size;
227         attr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
228         attr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
229         attr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
230         attr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
231         attr.virtio_version_1_0 = !!(priv->features & (1ULL <<
232                                                         VIRTIO_F_VERSION_1));
233         attr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
234                         MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
235         /*
236          * No need event QPs creation when the guest in poll mode or when the
237          * capability allows it.
238          */
239         attr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<
240                                                MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
241                                                       MLX5_VIRTQ_EVENT_MODE_QP :
242                                                   MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
243         if (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
244                 ret = mlx5_vdpa_event_qp_create(priv, vq.size, vq.callfd,
245                                                 &virtq->eqp);
246                 if (ret) {
247                         DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
248                                 index);
249                         return -1;
250                 }
251                 attr.qp_id = virtq->eqp.fw_qp->id;
252         } else {
253                 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
254                         " need event QPs and event mechanism.", index);
255         }
256         if (priv->caps.queue_counters_valid) {
257                 if (!virtq->counters)
258                         virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
259                                                               (priv->cdev->ctx);
260                 if (!virtq->counters) {
261                         DRV_LOG(ERR, "Failed to create virtq couners for virtq"
262                                 " %d.", index);
263                         goto error;
264                 }
265                 attr.counters_obj_id = virtq->counters->id;
266         }
267         /* Setup 3 UMEMs for each virtq. */
268         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
269                 virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
270                                                           priv->caps.umems[i].b;
271                 virtq->umems[i].buf = rte_zmalloc(__func__,
272                                                   virtq->umems[i].size, 4096);
273                 if (!virtq->umems[i].buf) {
274                         DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
275                                 " %u.", i, index);
276                         goto error;
277                 }
278                 virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx,
279                                                         virtq->umems[i].buf,
280                                                         virtq->umems[i].size,
281                                                         IBV_ACCESS_LOCAL_WRITE);
282                 if (!virtq->umems[i].obj) {
283                         DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
284                                 i, index);
285                         goto error;
286                 }
287                 attr.umems[i].id = virtq->umems[i].obj->umem_id;
288                 attr.umems[i].offset = 0;
289                 attr.umems[i].size = virtq->umems[i].size;
290         }
291         if (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {
292                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
293                                            (uint64_t)(uintptr_t)vq.desc);
294                 if (!gpa) {
295                         DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
296                         goto error;
297                 }
298                 attr.desc_addr = gpa;
299                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
300                                            (uint64_t)(uintptr_t)vq.used);
301                 if (!gpa) {
302                         DRV_LOG(ERR, "Failed to get GPA for used ring.");
303                         goto error;
304                 }
305                 attr.used_addr = gpa;
306                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
307                                            (uint64_t)(uintptr_t)vq.avail);
308                 if (!gpa) {
309                         DRV_LOG(ERR, "Failed to get GPA for available ring.");
310                         goto error;
311                 }
312                 attr.available_addr = gpa;
313         }
314         ret = rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,
315                                  &last_used_idx);
316         if (ret) {
317                 last_avail_idx = 0;
318                 last_used_idx = 0;
319                 DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
320         } else {
321                 DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
322                                 "virtq %d.", priv->vid, last_avail_idx,
323                                 last_used_idx, index);
324         }
325         attr.hw_available_index = last_avail_idx;
326         attr.hw_used_index = last_used_idx;
327         attr.q_size = vq.size;
328         attr.mkey = priv->gpa_mkey_index;
329         attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
330         attr.queue_index = index;
331         attr.pd = priv->cdev->pdn;
332         attr.hw_latency_mode = priv->hw_latency_mode;
333         attr.hw_max_latency_us = priv->hw_max_latency_us;
334         attr.hw_max_pending_comp = priv->hw_max_pending_comp;
335         virtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);
336         virtq->priv = priv;
337         if (!virtq->virtq)
338                 goto error;
339         claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
340         if (mlx5_vdpa_virtq_modify(virtq, 1))
341                 goto error;
342         virtq->priv = priv;
343         rte_write32(virtq->index, priv->virtq_db_addr);
344         /* Setup doorbell mapping. */
345         virtq->intr_handle =
346                 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
347         if (virtq->intr_handle == NULL) {
348                 DRV_LOG(ERR, "Fail to allocate intr_handle");
349                 goto error;
350         }
351
352         if (rte_intr_fd_set(virtq->intr_handle, vq.kickfd))
353                 goto error;
354
355         if (rte_intr_fd_get(virtq->intr_handle) == -1) {
356                 DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
357         } else {
358                 if (rte_intr_type_set(virtq->intr_handle, RTE_INTR_HANDLE_EXT))
359                         goto error;
360
361                 if (rte_intr_callback_register(virtq->intr_handle,
362                                                mlx5_vdpa_virtq_handler,
363                                                virtq)) {
364                         rte_intr_fd_set(virtq->intr_handle, -1);
365                         DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
366                                 index);
367                         goto error;
368                 } else {
369                         DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
370                                 rte_intr_fd_get(virtq->intr_handle),
371                                 index);
372                 }
373         }
374         /* Subscribe virtq error event. */
375         virtq->version++;
376         cookie = ((uint64_t)virtq->version << 32) + index;
377         ret = mlx5_glue->devx_subscribe_devx_event(priv->err_chnl,
378                                                    virtq->virtq->obj,
379                                                    sizeof(event_num),
380                                                    &event_num, cookie);
381         if (ret) {
382                 DRV_LOG(ERR, "Failed to subscribe device %d virtq %d error event.",
383                         priv->vid, index);
384                 rte_errno = errno;
385                 goto error;
386         }
387         virtq->stopped = false;
388         /* Initial notification to ask Qemu handling completed buffers. */
389         if (virtq->eqp.cq.callfd != -1)
390                 eventfd_write(virtq->eqp.cq.callfd, (eventfd_t)1);
391         DRV_LOG(DEBUG, "vid %u virtq %u was created successfully.", priv->vid,
392                 index);
393         return 0;
394 error:
395         mlx5_vdpa_virtq_unset(virtq);
396         return -1;
397 }
398
399 static int
400 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
401 {
402         if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
403                 if (!(priv->caps.virtio_queue_type & (1 <<
404                                                      MLX5_VIRTQ_TYPE_PACKED))) {
405                         DRV_LOG(ERR, "Failed to configure PACKED mode for vdev "
406                                 "%d - it was not reported by HW/driver"
407                                 " capability.", priv->vid);
408                         return -ENOTSUP;
409                 }
410         }
411         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
412                 if (!priv->caps.tso_ipv4) {
413                         DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
414                                 " was not reported by HW/driver capability.",
415                                 priv->vid);
416                         return -ENOTSUP;
417                 }
418         }
419         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
420                 if (!priv->caps.tso_ipv6) {
421                         DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
422                                 " was not reported by HW/driver capability.",
423                                 priv->vid);
424                         return -ENOTSUP;
425                 }
426         }
427         if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
428                 if (!priv->caps.tx_csum) {
429                         DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
430                                 " was not reported by HW/driver capability.",
431                                 priv->vid);
432                         return -ENOTSUP;
433                 }
434         }
435         if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
436                 if (!priv->caps.rx_csum) {
437                         DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
438                                 " GUEST CSUM was not reported by HW/driver "
439                                 "capability.", priv->vid);
440                         return -ENOTSUP;
441                 }
442         }
443         if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
444                 if (!priv->caps.virtio_version_1_0) {
445                         DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
446                                 "version 1 was not reported by HW/driver"
447                                 " capability.", priv->vid);
448                         return -ENOTSUP;
449                 }
450         }
451         return 0;
452 }
453
454 int
455 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
456 {
457         struct mlx5_devx_tis_attr tis_attr = {0};
458         struct ibv_context *ctx = priv->cdev->ctx;
459         uint32_t i;
460         uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
461         int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
462
463         if (ret || mlx5_vdpa_features_validate(priv)) {
464                 DRV_LOG(ERR, "Failed to configure negotiated features.");
465                 return -1;
466         }
467         if ((priv->features & (1ULL << VIRTIO_NET_F_CSUM)) == 0 &&
468             ((priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) > 0 ||
469              (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) > 0)) {
470                 /* Packet may be corrupted if TSO is enabled without CSUM. */
471                 DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
472                 priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
473         }
474         if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
475                 DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
476                         (int)priv->caps.max_num_virtio_queues * 2,
477                         (int)nr_vring);
478                 return -1;
479         }
480         /* Always map the entire page. */
481         priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
482                                    PROT_WRITE, MAP_SHARED, ctx->cmd_fd,
483                                    priv->var->mmap_off);
484         if (priv->virtq_db_addr == MAP_FAILED) {
485                 DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
486                 priv->virtq_db_addr = NULL;
487                 goto error;
488         } else {
489                 DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
490                         priv->virtq_db_addr);
491         }
492         priv->td = mlx5_devx_cmd_create_td(ctx);
493         if (!priv->td) {
494                 DRV_LOG(ERR, "Failed to create transport domain.");
495                 return -rte_errno;
496         }
497         tis_attr.transport_domain = priv->td->id;
498         for (i = 0; i < priv->num_lag_ports; i++) {
499                 /* 0 is auto affinity, non-zero value to propose port. */
500                 tis_attr.lag_tx_port_affinity = i + 1;
501                 priv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);
502                 if (!priv->tiss[i]) {
503                         DRV_LOG(ERR, "Failed to create TIS %u.", i);
504                         goto error;
505                 }
506         }
507         priv->nr_virtqs = nr_vring;
508         for (i = 0; i < nr_vring; i++)
509                 if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
510                         goto error;
511         return 0;
512 error:
513         mlx5_vdpa_virtqs_release(priv);
514         return -1;
515 }
516
517 static int
518 mlx5_vdpa_virtq_is_modified(struct mlx5_vdpa_priv *priv,
519                             struct mlx5_vdpa_virtq *virtq)
520 {
521         struct rte_vhost_vring vq;
522         int ret = rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq);
523
524         if (ret)
525                 return -1;
526         if (vq.size != virtq->vq_size || vq.kickfd !=
527             rte_intr_fd_get(virtq->intr_handle))
528                 return 1;
529         if (virtq->eqp.cq.cq_obj.cq) {
530                 if (vq.callfd != virtq->eqp.cq.callfd)
531                         return 1;
532         } else if (vq.callfd != -1) {
533                 return 1;
534         }
535         return 0;
536 }
537
538 int
539 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
540 {
541         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
542         int ret;
543
544         DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
545                 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
546         if (!priv->configured) {
547                 virtq->enable = !!enable;
548                 return 0;
549         }
550         if (virtq->enable == !!enable) {
551                 if (!enable)
552                         return 0;
553                 ret = mlx5_vdpa_virtq_is_modified(priv, virtq);
554                 if (ret < 0) {
555                         DRV_LOG(ERR, "Virtq %d modify check failed.", index);
556                         return -1;
557                 }
558                 if (ret == 0)
559                         return 0;
560                 DRV_LOG(INFO, "Virtq %d was modified, recreate it.", index);
561         }
562         if (virtq->virtq) {
563                 virtq->enable = 0;
564                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
565                         ret = mlx5_vdpa_steer_update(priv);
566                         if (ret)
567                                 DRV_LOG(WARNING, "Failed to disable steering "
568                                         "for virtq %d.", index);
569                 }
570                 mlx5_vdpa_virtq_unset(virtq);
571         }
572         if (enable) {
573                 ret = mlx5_vdpa_virtq_setup(priv, index);
574                 if (ret) {
575                         DRV_LOG(ERR, "Failed to setup virtq %d.", index);
576                         return ret;
577                 }
578                 virtq->enable = 1;
579                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
580                         ret = mlx5_vdpa_steer_update(priv);
581                         if (ret)
582                                 DRV_LOG(WARNING, "Failed to enable steering "
583                                         "for virtq %d.", index);
584                 }
585         }
586         return 0;
587 }
588
589 int
590 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
591                           struct rte_vdpa_stat *stats, unsigned int n)
592 {
593         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
594         struct mlx5_devx_virtio_q_couners_attr attr = {0};
595         int ret;
596
597         if (!virtq->counters) {
598                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
599                         "is invalid.", qid);
600                 return -EINVAL;
601         }
602         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
603         if (ret) {
604                 DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
605                 return ret;
606         }
607         ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
608         if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
609                 return ret;
610         stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
611                 .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
612                 .value = attr.received_desc - virtq->reset.received_desc,
613         };
614         if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
615                 return ret;
616         stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
617                 .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
618                 .value = attr.completed_desc - virtq->reset.completed_desc,
619         };
620         if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
621                 return ret;
622         stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
623                 .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
624                 .value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
625         };
626         if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
627                 return ret;
628         stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
629                 .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
630                 .value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
631         };
632         if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
633                 return ret;
634         stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
635                 .id = MLX5_VDPA_STATS_INVALID_BUFFER,
636                 .value = attr.invalid_buffer - virtq->reset.invalid_buffer,
637         };
638         if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
639                 return ret;
640         stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
641                 .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
642                 .value = attr.error_cqes - virtq->reset.error_cqes,
643         };
644         return ret;
645 }
646
647 int
648 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
649 {
650         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
651         int ret;
652
653         if (!virtq->counters) {
654                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
655                         "is invalid.", qid);
656                 return -EINVAL;
657         }
658         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
659                                                     &virtq->reset);
660         if (ret)
661                 DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",
662                         qid);
663         return ret;
664 }