vdpa/mlx5: reuse event queues
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_virtq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <string.h>
5 #include <unistd.h>
6 #include <sys/eventfd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_errno.h>
10 #include <rte_io.h>
11
12 #include <mlx5_common.h>
13
14 #include "mlx5_vdpa_utils.h"
15 #include "mlx5_vdpa.h"
16
17
18 static void
19 mlx5_vdpa_virtq_kick_handler(void *cb_arg)
20 {
21         struct mlx5_vdpa_virtq *virtq = cb_arg;
22         struct mlx5_vdpa_priv *priv = virtq->priv;
23         uint64_t buf;
24         int nbytes;
25         int retry;
26
27         if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
28                 DRV_LOG(ERR,  "device %d queue %d down, skip kick handling",
29                         priv->vid, virtq->index);
30                 return;
31         }
32         if (rte_intr_fd_get(virtq->intr_handle) < 0)
33                 return;
34         for (retry = 0; retry < 3; ++retry) {
35                 nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
36                               8);
37                 if (nbytes < 0) {
38                         if (errno == EINTR ||
39                             errno == EWOULDBLOCK ||
40                             errno == EAGAIN)
41                                 continue;
42                         DRV_LOG(ERR,  "Failed to read kickfd of virtq %d: %s",
43                                 virtq->index, strerror(errno));
44                 }
45                 break;
46         }
47         if (nbytes < 0)
48                 return;
49         rte_write32(virtq->index, priv->virtq_db_addr);
50         if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
51                 DRV_LOG(ERR,  "device %d queue %d down, skip kick handling",
52                         priv->vid, virtq->index);
53                 return;
54         }
55         if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
56                 if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
57                         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_ERR;
58                 else
59                         virtq->notifier_state =
60                                                MLX5_VDPA_NOTIFIER_STATE_ENABLED;
61                 DRV_LOG(INFO, "Virtq %u notifier state is %s.", virtq->index,
62                         virtq->notifier_state ==
63                                 MLX5_VDPA_NOTIFIER_STATE_ENABLED ? "enabled" :
64                                                                     "disabled");
65         }
66         DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
67 }
68
69 /* Release cached VQ resources. */
70 void
71 mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
72 {
73         unsigned int i, j;
74
75         for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
76                 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
77
78                 for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
79                         if (virtq->umems[j].obj) {
80                                 claim_zero(mlx5_glue->devx_umem_dereg
81                                                         (virtq->umems[j].obj));
82                                 virtq->umems[j].obj = NULL;
83                         }
84                         if (virtq->umems[j].buf) {
85                                 rte_free(virtq->umems[j].buf);
86                                 virtq->umems[j].buf = NULL;
87                         }
88                         virtq->umems[j].size = 0;
89                 }
90                 if (virtq->eqp.fw_qp)
91                         mlx5_vdpa_event_qp_destroy(&virtq->eqp);
92         }
93 }
94
95 static int
96 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
97 {
98         int ret = -EAGAIN;
99
100         if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
101                 while (ret == -EAGAIN) {
102                         ret = rte_intr_callback_unregister(virtq->intr_handle,
103                                         mlx5_vdpa_virtq_kick_handler, virtq);
104                         if (ret == -EAGAIN) {
105                                 DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
106                                         rte_intr_fd_get(virtq->intr_handle),
107                                         virtq->index);
108                                 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
109                         }
110                 }
111                 rte_intr_fd_set(virtq->intr_handle, -1);
112         }
113         rte_intr_instance_free(virtq->intr_handle);
114         if (virtq->virtq) {
115                 ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
116                 if (ret)
117                         DRV_LOG(WARNING, "Failed to stop virtq %d.",
118                                 virtq->index);
119                 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
120         }
121         virtq->virtq = NULL;
122         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
123         return 0;
124 }
125
126 void
127 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
128 {
129         int i;
130
131         for (i = 0; i < priv->nr_virtqs; i++)
132                 mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
133         priv->features = 0;
134         priv->nr_virtqs = 0;
135 }
136
137 int
138 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
139 {
140         struct mlx5_devx_virtq_attr attr = {
141                         .type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
142                         .state = state ? MLX5_VIRTQ_STATE_RDY :
143                                          MLX5_VIRTQ_STATE_SUSPEND,
144                         .queue_index = virtq->index,
145         };
146
147         return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
148 }
149
150 int
151 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
152 {
153         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
154         int ret;
155
156         if (virtq->stopped)
157                 return 0;
158         ret = mlx5_vdpa_virtq_modify(virtq, 0);
159         if (ret)
160                 return -1;
161         virtq->stopped = true;
162         DRV_LOG(DEBUG, "vid %u virtq %u was stopped.", priv->vid, index);
163         return mlx5_vdpa_virtq_query(priv, index);
164 }
165
166 int
167 mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)
168 {
169         struct mlx5_devx_virtq_attr attr = {0};
170         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
171         int ret;
172
173         if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
174                 DRV_LOG(ERR, "Failed to query virtq %d.", index);
175                 return -1;
176         }
177         DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
178                 "hw_used_index=%d", priv->vid, index,
179                 attr.hw_available_index, attr.hw_used_index);
180         ret = rte_vhost_set_vring_base(priv->vid, index,
181                                        attr.hw_available_index,
182                                        attr.hw_used_index);
183         if (ret) {
184                 DRV_LOG(ERR, "Failed to set virtq %d base.", index);
185                 return -1;
186         }
187         if (attr.state == MLX5_VIRTQ_STATE_ERROR)
188                 DRV_LOG(WARNING, "vid %d vring %d hw error=%hhu",
189                         priv->vid, index, attr.error_type);
190         return 0;
191 }
192
193 static uint64_t
194 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
195 {
196         struct rte_vhost_mem_region *reg;
197         uint32_t i;
198         uint64_t gpa = 0;
199
200         for (i = 0; i < mem->nregions; i++) {
201                 reg = &mem->regions[i];
202                 if (hva >= reg->host_user_addr &&
203                     hva < reg->host_user_addr + reg->size) {
204                         gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
205                         break;
206                 }
207         }
208         return gpa;
209 }
210
211 static int
212 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
213 {
214         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
215         struct rte_vhost_vring vq;
216         struct mlx5_devx_virtq_attr attr = {0};
217         uint64_t gpa;
218         int ret;
219         unsigned int i;
220         uint16_t last_avail_idx;
221         uint16_t last_used_idx;
222         uint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;
223         uint64_t cookie;
224
225         ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
226         if (ret)
227                 return -1;
228         if (vq.size == 0)
229                 return 0;
230         virtq->index = index;
231         virtq->vq_size = vq.size;
232         attr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
233         attr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
234         attr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
235         attr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
236         attr.virtio_version_1_0 = !!(priv->features & (1ULL <<
237                                                         VIRTIO_F_VERSION_1));
238         attr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
239                         MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
240         /*
241          * No need event QPs creation when the guest in poll mode or when the
242          * capability allows it.
243          */
244         attr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<
245                                                MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
246                                                       MLX5_VIRTQ_EVENT_MODE_QP :
247                                                   MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
248         if (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
249                 ret = mlx5_vdpa_event_qp_prepare(priv, vq.size, vq.callfd,
250                                                 &virtq->eqp);
251                 if (ret) {
252                         DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
253                                 index);
254                         return -1;
255                 }
256                 attr.qp_id = virtq->eqp.fw_qp->id;
257         } else {
258                 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
259                         " need event QPs and event mechanism.", index);
260         }
261         if (priv->caps.queue_counters_valid) {
262                 if (!virtq->counters)
263                         virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
264                                                               (priv->cdev->ctx);
265                 if (!virtq->counters) {
266                         DRV_LOG(ERR, "Failed to create virtq couners for virtq"
267                                 " %d.", index);
268                         goto error;
269                 }
270                 attr.counters_obj_id = virtq->counters->id;
271         }
272         /* Setup 3 UMEMs for each virtq. */
273         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
274                 uint32_t size;
275                 void *buf;
276                 struct mlx5dv_devx_umem *obj;
277
278                 size = priv->caps.umems[i].a * vq.size + priv->caps.umems[i].b;
279                 if (virtq->umems[i].size == size &&
280                     virtq->umems[i].obj != NULL) {
281                         /* Reuse registered memory. */
282                         memset(virtq->umems[i].buf, 0, size);
283                         goto reuse;
284                 }
285                 if (virtq->umems[i].obj)
286                         claim_zero(mlx5_glue->devx_umem_dereg
287                                    (virtq->umems[i].obj));
288                 if (virtq->umems[i].buf)
289                         rte_free(virtq->umems[i].buf);
290                 virtq->umems[i].size = 0;
291                 virtq->umems[i].obj = NULL;
292                 virtq->umems[i].buf = NULL;
293                 buf = rte_zmalloc(__func__, size, 4096);
294                 if (buf == NULL) {
295                         DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
296                                 " %u.", i, index);
297                         goto error;
298                 }
299                 obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx, buf, size,
300                                                IBV_ACCESS_LOCAL_WRITE);
301                 if (obj == NULL) {
302                         DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
303                                 i, index);
304                         goto error;
305                 }
306                 virtq->umems[i].size = size;
307                 virtq->umems[i].buf = buf;
308                 virtq->umems[i].obj = obj;
309 reuse:
310                 attr.umems[i].id = virtq->umems[i].obj->umem_id;
311                 attr.umems[i].offset = 0;
312                 attr.umems[i].size = virtq->umems[i].size;
313         }
314         if (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {
315                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
316                                            (uint64_t)(uintptr_t)vq.desc);
317                 if (!gpa) {
318                         DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
319                         goto error;
320                 }
321                 attr.desc_addr = gpa;
322                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
323                                            (uint64_t)(uintptr_t)vq.used);
324                 if (!gpa) {
325                         DRV_LOG(ERR, "Failed to get GPA for used ring.");
326                         goto error;
327                 }
328                 attr.used_addr = gpa;
329                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
330                                            (uint64_t)(uintptr_t)vq.avail);
331                 if (!gpa) {
332                         DRV_LOG(ERR, "Failed to get GPA for available ring.");
333                         goto error;
334                 }
335                 attr.available_addr = gpa;
336         }
337         ret = rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,
338                                  &last_used_idx);
339         if (ret) {
340                 last_avail_idx = 0;
341                 last_used_idx = 0;
342                 DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
343         } else {
344                 DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
345                                 "virtq %d.", priv->vid, last_avail_idx,
346                                 last_used_idx, index);
347         }
348         attr.hw_available_index = last_avail_idx;
349         attr.hw_used_index = last_used_idx;
350         attr.q_size = vq.size;
351         attr.mkey = priv->gpa_mkey_index;
352         attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
353         attr.queue_index = index;
354         attr.pd = priv->cdev->pdn;
355         attr.hw_latency_mode = priv->hw_latency_mode;
356         attr.hw_max_latency_us = priv->hw_max_latency_us;
357         attr.hw_max_pending_comp = priv->hw_max_pending_comp;
358         virtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);
359         virtq->priv = priv;
360         if (!virtq->virtq)
361                 goto error;
362         claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
363         if (mlx5_vdpa_virtq_modify(virtq, 1))
364                 goto error;
365         virtq->priv = priv;
366         rte_write32(virtq->index, priv->virtq_db_addr);
367         /* Setup doorbell mapping. */
368         virtq->intr_handle =
369                 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
370         if (virtq->intr_handle == NULL) {
371                 DRV_LOG(ERR, "Fail to allocate intr_handle");
372                 goto error;
373         }
374
375         if (rte_intr_fd_set(virtq->intr_handle, vq.kickfd))
376                 goto error;
377
378         if (rte_intr_fd_get(virtq->intr_handle) == -1) {
379                 DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
380         } else {
381                 if (rte_intr_type_set(virtq->intr_handle, RTE_INTR_HANDLE_EXT))
382                         goto error;
383
384                 if (rte_intr_callback_register(virtq->intr_handle,
385                                                mlx5_vdpa_virtq_kick_handler,
386                                                virtq)) {
387                         rte_intr_fd_set(virtq->intr_handle, -1);
388                         DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
389                                 index);
390                         goto error;
391                 } else {
392                         DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
393                                 rte_intr_fd_get(virtq->intr_handle),
394                                 index);
395                 }
396         }
397         /* Subscribe virtq error event. */
398         virtq->version++;
399         cookie = ((uint64_t)virtq->version << 32) + index;
400         ret = mlx5_glue->devx_subscribe_devx_event(priv->err_chnl,
401                                                    virtq->virtq->obj,
402                                                    sizeof(event_num),
403                                                    &event_num, cookie);
404         if (ret) {
405                 DRV_LOG(ERR, "Failed to subscribe device %d virtq %d error event.",
406                         priv->vid, index);
407                 rte_errno = errno;
408                 goto error;
409         }
410         virtq->stopped = false;
411         /* Initial notification to ask Qemu handling completed buffers. */
412         if (virtq->eqp.cq.callfd != -1)
413                 eventfd_write(virtq->eqp.cq.callfd, (eventfd_t)1);
414         DRV_LOG(DEBUG, "vid %u virtq %u was created successfully.", priv->vid,
415                 index);
416         return 0;
417 error:
418         mlx5_vdpa_virtq_unset(virtq);
419         return -1;
420 }
421
422 static int
423 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
424 {
425         if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
426                 if (!(priv->caps.virtio_queue_type & (1 <<
427                                                      MLX5_VIRTQ_TYPE_PACKED))) {
428                         DRV_LOG(ERR, "Failed to configure PACKED mode for vdev "
429                                 "%d - it was not reported by HW/driver"
430                                 " capability.", priv->vid);
431                         return -ENOTSUP;
432                 }
433         }
434         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
435                 if (!priv->caps.tso_ipv4) {
436                         DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
437                                 " was not reported by HW/driver capability.",
438                                 priv->vid);
439                         return -ENOTSUP;
440                 }
441         }
442         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
443                 if (!priv->caps.tso_ipv6) {
444                         DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
445                                 " was not reported by HW/driver capability.",
446                                 priv->vid);
447                         return -ENOTSUP;
448                 }
449         }
450         if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
451                 if (!priv->caps.tx_csum) {
452                         DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
453                                 " was not reported by HW/driver capability.",
454                                 priv->vid);
455                         return -ENOTSUP;
456                 }
457         }
458         if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
459                 if (!priv->caps.rx_csum) {
460                         DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
461                                 " GUEST CSUM was not reported by HW/driver "
462                                 "capability.", priv->vid);
463                         return -ENOTSUP;
464                 }
465         }
466         if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
467                 if (!priv->caps.virtio_version_1_0) {
468                         DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
469                                 "version 1 was not reported by HW/driver"
470                                 " capability.", priv->vid);
471                         return -ENOTSUP;
472                 }
473         }
474         return 0;
475 }
476
477 int
478 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
479 {
480         uint32_t i;
481         uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
482         int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
483
484         if (ret || mlx5_vdpa_features_validate(priv)) {
485                 DRV_LOG(ERR, "Failed to configure negotiated features.");
486                 return -1;
487         }
488         if ((priv->features & (1ULL << VIRTIO_NET_F_CSUM)) == 0 &&
489             ((priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) > 0 ||
490              (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) > 0)) {
491                 /* Packet may be corrupted if TSO is enabled without CSUM. */
492                 DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
493                 priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
494         }
495         if (nr_vring > priv->caps.max_num_virtio_queues) {
496                 DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
497                         (int)priv->caps.max_num_virtio_queues,
498                         (int)nr_vring);
499                 return -1;
500         }
501         priv->nr_virtqs = nr_vring;
502         for (i = 0; i < nr_vring; i++)
503                 if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
504                         goto error;
505         return 0;
506 error:
507         mlx5_vdpa_virtqs_release(priv);
508         return -1;
509 }
510
511 static int
512 mlx5_vdpa_virtq_is_modified(struct mlx5_vdpa_priv *priv,
513                             struct mlx5_vdpa_virtq *virtq)
514 {
515         struct rte_vhost_vring vq;
516         int ret = rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq);
517
518         if (ret)
519                 return -1;
520         if (vq.size != virtq->vq_size || vq.kickfd !=
521             rte_intr_fd_get(virtq->intr_handle))
522                 return 1;
523         if (virtq->eqp.cq.cq_obj.cq) {
524                 if (vq.callfd != virtq->eqp.cq.callfd)
525                         return 1;
526         } else if (vq.callfd != -1) {
527                 return 1;
528         }
529         return 0;
530 }
531
532 int
533 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
534 {
535         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
536         int ret;
537
538         DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
539                 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
540         if (priv->state == MLX5_VDPA_STATE_PROBED) {
541                 virtq->enable = !!enable;
542                 return 0;
543         }
544         if (virtq->enable == !!enable) {
545                 if (!enable)
546                         return 0;
547                 ret = mlx5_vdpa_virtq_is_modified(priv, virtq);
548                 if (ret < 0) {
549                         DRV_LOG(ERR, "Virtq %d modify check failed.", index);
550                         return -1;
551                 }
552                 if (ret == 0)
553                         return 0;
554                 DRV_LOG(INFO, "Virtq %d was modified, recreate it.", index);
555         }
556         if (virtq->virtq) {
557                 virtq->enable = 0;
558                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
559                         ret = mlx5_vdpa_steer_update(priv);
560                         if (ret)
561                                 DRV_LOG(WARNING, "Failed to disable steering "
562                                         "for virtq %d.", index);
563                 }
564                 mlx5_vdpa_virtq_unset(virtq);
565         }
566         if (enable) {
567                 ret = mlx5_vdpa_virtq_setup(priv, index);
568                 if (ret) {
569                         DRV_LOG(ERR, "Failed to setup virtq %d.", index);
570                         return ret;
571                 }
572                 virtq->enable = 1;
573                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
574                         ret = mlx5_vdpa_steer_update(priv);
575                         if (ret)
576                                 DRV_LOG(WARNING, "Failed to enable steering "
577                                         "for virtq %d.", index);
578                 }
579         }
580         return 0;
581 }
582
583 int
584 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
585                           struct rte_vdpa_stat *stats, unsigned int n)
586 {
587         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
588         struct mlx5_devx_virtio_q_couners_attr *attr = &virtq->stats;
589         int ret;
590
591         if (!virtq->counters) {
592                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
593                         "is invalid.", qid);
594                 return -EINVAL;
595         }
596         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, attr);
597         if (ret) {
598                 DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
599                 return ret;
600         }
601         ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
602         if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
603                 return ret;
604         stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
605                 .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
606                 .value = attr->received_desc - virtq->reset.received_desc,
607         };
608         if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
609                 return ret;
610         stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
611                 .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
612                 .value = attr->completed_desc - virtq->reset.completed_desc,
613         };
614         if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
615                 return ret;
616         stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
617                 .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
618                 .value = attr->bad_desc_errors - virtq->reset.bad_desc_errors,
619         };
620         if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
621                 return ret;
622         stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
623                 .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
624                 .value = attr->exceed_max_chain - virtq->reset.exceed_max_chain,
625         };
626         if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
627                 return ret;
628         stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
629                 .id = MLX5_VDPA_STATS_INVALID_BUFFER,
630                 .value = attr->invalid_buffer - virtq->reset.invalid_buffer,
631         };
632         if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
633                 return ret;
634         stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
635                 .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
636                 .value = attr->error_cqes - virtq->reset.error_cqes,
637         };
638         return ret;
639 }
640
641 int
642 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
643 {
644         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
645         int ret;
646
647         if (virtq->counters == NULL) /* VQ not enabled. */
648                 return 0;
649         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
650                                                     &virtq->reset);
651         if (ret)
652                 DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",
653                         qid);
654         return ret;
655 }