common/mlx5: share protection domain object
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_virtq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <string.h>
5 #include <unistd.h>
6 #include <sys/mman.h>
7
8 #include <rte_malloc.h>
9 #include <rte_errno.h>
10 #include <rte_io.h>
11
12 #include <mlx5_common.h>
13
14 #include "mlx5_vdpa_utils.h"
15 #include "mlx5_vdpa.h"
16
17
18 static void
19 mlx5_vdpa_virtq_handler(void *cb_arg)
20 {
21         struct mlx5_vdpa_virtq *virtq = cb_arg;
22         struct mlx5_vdpa_priv *priv = virtq->priv;
23         uint64_t buf;
24         int nbytes;
25
26         do {
27                 nbytes = read(virtq->intr_handle.fd, &buf, 8);
28                 if (nbytes < 0) {
29                         if (errno == EINTR ||
30                             errno == EWOULDBLOCK ||
31                             errno == EAGAIN)
32                                 continue;
33                         DRV_LOG(ERR,  "Failed to read kickfd of virtq %d: %s",
34                                 virtq->index, strerror(errno));
35                 }
36                 break;
37         } while (1);
38         rte_write32(virtq->index, priv->virtq_db_addr);
39         if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
40                 if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
41                         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_ERR;
42                 else
43                         virtq->notifier_state =
44                                                MLX5_VDPA_NOTIFIER_STATE_ENABLED;
45                 DRV_LOG(INFO, "Virtq %u notifier state is %s.", virtq->index,
46                         virtq->notifier_state ==
47                                 MLX5_VDPA_NOTIFIER_STATE_ENABLED ? "enabled" :
48                                                                     "disabled");
49         }
50         DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
51 }
52
53 static int
54 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
55 {
56         unsigned int i;
57         int retries = MLX5_VDPA_INTR_RETRIES;
58         int ret = -EAGAIN;
59
60         if (virtq->intr_handle.fd != -1) {
61                 while (retries-- && ret == -EAGAIN) {
62                         ret = rte_intr_callback_unregister(&virtq->intr_handle,
63                                                         mlx5_vdpa_virtq_handler,
64                                                         virtq);
65                         if (ret == -EAGAIN) {
66                                 DRV_LOG(DEBUG, "Try again to unregister fd %d "
67                                         "of virtq %d interrupt, retries = %d.",
68                                         virtq->intr_handle.fd,
69                                         (int)virtq->index, retries);
70                                 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
71                         }
72                 }
73                 virtq->intr_handle.fd = -1;
74         }
75         if (virtq->virtq) {
76                 ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
77                 if (ret)
78                         DRV_LOG(WARNING, "Failed to stop virtq %d.",
79                                 virtq->index);
80                 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
81         }
82         virtq->virtq = NULL;
83         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
84                 if (virtq->umems[i].obj)
85                         claim_zero(mlx5_glue->devx_umem_dereg
86                                                          (virtq->umems[i].obj));
87                 if (virtq->umems[i].buf)
88                         rte_free(virtq->umems[i].buf);
89         }
90         memset(&virtq->umems, 0, sizeof(virtq->umems));
91         if (virtq->eqp.fw_qp)
92                 mlx5_vdpa_event_qp_destroy(&virtq->eqp);
93         virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
94         return 0;
95 }
96
97 void
98 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
99 {
100         int i;
101         struct mlx5_vdpa_virtq *virtq;
102
103         for (i = 0; i < priv->nr_virtqs; i++) {
104                 virtq = &priv->virtqs[i];
105                 mlx5_vdpa_virtq_unset(virtq);
106                 if (virtq->counters)
107                         claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
108         }
109         for (i = 0; i < priv->num_lag_ports; i++) {
110                 if (priv->tiss[i]) {
111                         claim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));
112                         priv->tiss[i] = NULL;
113                 }
114         }
115         if (priv->td) {
116                 claim_zero(mlx5_devx_cmd_destroy(priv->td));
117                 priv->td = NULL;
118         }
119         if (priv->virtq_db_addr) {
120                 claim_zero(munmap(priv->virtq_db_addr, priv->var->length));
121                 priv->virtq_db_addr = NULL;
122         }
123         priv->features = 0;
124         memset(priv->virtqs, 0, sizeof(*virtq) * priv->nr_virtqs);
125         priv->nr_virtqs = 0;
126 }
127
128 int
129 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
130 {
131         struct mlx5_devx_virtq_attr attr = {
132                         .type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
133                         .state = state ? MLX5_VIRTQ_STATE_RDY :
134                                          MLX5_VIRTQ_STATE_SUSPEND,
135                         .queue_index = virtq->index,
136         };
137
138         return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
139 }
140
141 int
142 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
143 {
144         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
145         int ret;
146
147         if (virtq->stopped)
148                 return 0;
149         ret = mlx5_vdpa_virtq_modify(virtq, 0);
150         if (ret)
151                 return -1;
152         virtq->stopped = true;
153         DRV_LOG(DEBUG, "vid %u virtq %u was stopped.", priv->vid, index);
154         return mlx5_vdpa_virtq_query(priv, index);
155 }
156
157 int
158 mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)
159 {
160         struct mlx5_devx_virtq_attr attr = {0};
161         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
162         int ret;
163
164         if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
165                 DRV_LOG(ERR, "Failed to query virtq %d.", index);
166                 return -1;
167         }
168         DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
169                 "hw_used_index=%d", priv->vid, index,
170                 attr.hw_available_index, attr.hw_used_index);
171         ret = rte_vhost_set_vring_base(priv->vid, index,
172                                        attr.hw_available_index,
173                                        attr.hw_used_index);
174         if (ret) {
175                 DRV_LOG(ERR, "Failed to set virtq %d base.", index);
176                 return -1;
177         }
178         if (attr.state == MLX5_VIRTQ_STATE_ERROR)
179                 DRV_LOG(WARNING, "vid %d vring %d hw error=%hhu",
180                         priv->vid, index, attr.error_type);
181         return 0;
182 }
183
184 static uint64_t
185 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
186 {
187         struct rte_vhost_mem_region *reg;
188         uint32_t i;
189         uint64_t gpa = 0;
190
191         for (i = 0; i < mem->nregions; i++) {
192                 reg = &mem->regions[i];
193                 if (hva >= reg->host_user_addr &&
194                     hva < reg->host_user_addr + reg->size) {
195                         gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
196                         break;
197                 }
198         }
199         return gpa;
200 }
201
202 static int
203 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
204 {
205         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
206         struct rte_vhost_vring vq;
207         struct mlx5_devx_virtq_attr attr = {0};
208         uint64_t gpa;
209         int ret;
210         unsigned int i;
211         uint16_t last_avail_idx;
212         uint16_t last_used_idx;
213         uint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;
214         uint64_t cookie;
215
216         ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
217         if (ret)
218                 return -1;
219         virtq->index = index;
220         virtq->vq_size = vq.size;
221         attr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
222         attr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
223         attr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
224         attr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
225         attr.virtio_version_1_0 = !!(priv->features & (1ULL <<
226                                                         VIRTIO_F_VERSION_1));
227         attr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
228                         MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
229         /*
230          * No need event QPs creation when the guest in poll mode or when the
231          * capability allows it.
232          */
233         attr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<
234                                                MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
235                                                       MLX5_VIRTQ_EVENT_MODE_QP :
236                                                   MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
237         if (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
238                 ret = mlx5_vdpa_event_qp_create(priv, vq.size, vq.callfd,
239                                                 &virtq->eqp);
240                 if (ret) {
241                         DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
242                                 index);
243                         return -1;
244                 }
245                 attr.qp_id = virtq->eqp.fw_qp->id;
246         } else {
247                 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
248                         " need event QPs and event mechanism.", index);
249         }
250         if (priv->caps.queue_counters_valid) {
251                 if (!virtq->counters)
252                         virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
253                                                               (priv->cdev->ctx);
254                 if (!virtq->counters) {
255                         DRV_LOG(ERR, "Failed to create virtq couners for virtq"
256                                 " %d.", index);
257                         goto error;
258                 }
259                 attr.counters_obj_id = virtq->counters->id;
260         }
261         /* Setup 3 UMEMs for each virtq. */
262         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
263                 virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
264                                                           priv->caps.umems[i].b;
265                 virtq->umems[i].buf = rte_zmalloc(__func__,
266                                                   virtq->umems[i].size, 4096);
267                 if (!virtq->umems[i].buf) {
268                         DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
269                                 " %u.", i, index);
270                         goto error;
271                 }
272                 virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx,
273                                                         virtq->umems[i].buf,
274                                                         virtq->umems[i].size,
275                                                         IBV_ACCESS_LOCAL_WRITE);
276                 if (!virtq->umems[i].obj) {
277                         DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
278                                 i, index);
279                         goto error;
280                 }
281                 attr.umems[i].id = virtq->umems[i].obj->umem_id;
282                 attr.umems[i].offset = 0;
283                 attr.umems[i].size = virtq->umems[i].size;
284         }
285         if (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {
286                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
287                                            (uint64_t)(uintptr_t)vq.desc);
288                 if (!gpa) {
289                         DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
290                         goto error;
291                 }
292                 attr.desc_addr = gpa;
293                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
294                                            (uint64_t)(uintptr_t)vq.used);
295                 if (!gpa) {
296                         DRV_LOG(ERR, "Failed to get GPA for used ring.");
297                         goto error;
298                 }
299                 attr.used_addr = gpa;
300                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
301                                            (uint64_t)(uintptr_t)vq.avail);
302                 if (!gpa) {
303                         DRV_LOG(ERR, "Failed to get GPA for available ring.");
304                         goto error;
305                 }
306                 attr.available_addr = gpa;
307         }
308         ret = rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,
309                                  &last_used_idx);
310         if (ret) {
311                 last_avail_idx = 0;
312                 last_used_idx = 0;
313                 DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
314         } else {
315                 DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
316                                 "virtq %d.", priv->vid, last_avail_idx,
317                                 last_used_idx, index);
318         }
319         attr.hw_available_index = last_avail_idx;
320         attr.hw_used_index = last_used_idx;
321         attr.q_size = vq.size;
322         attr.mkey = priv->gpa_mkey_index;
323         attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
324         attr.queue_index = index;
325         attr.pd = priv->cdev->pdn;
326         attr.hw_latency_mode = priv->hw_latency_mode;
327         attr.hw_max_latency_us = priv->hw_max_latency_us;
328         attr.hw_max_pending_comp = priv->hw_max_pending_comp;
329         virtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);
330         virtq->priv = priv;
331         if (!virtq->virtq)
332                 goto error;
333         claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
334         if (mlx5_vdpa_virtq_modify(virtq, 1))
335                 goto error;
336         virtq->priv = priv;
337         rte_write32(virtq->index, priv->virtq_db_addr);
338         /* Setup doorbell mapping. */
339         virtq->intr_handle.fd = vq.kickfd;
340         if (virtq->intr_handle.fd == -1) {
341                 DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
342         } else {
343                 virtq->intr_handle.type = RTE_INTR_HANDLE_EXT;
344                 if (rte_intr_callback_register(&virtq->intr_handle,
345                                                mlx5_vdpa_virtq_handler,
346                                                virtq)) {
347                         virtq->intr_handle.fd = -1;
348                         DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
349                                 index);
350                         goto error;
351                 } else {
352                         DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
353                                 virtq->intr_handle.fd, index);
354                 }
355         }
356         /* Subscribe virtq error event. */
357         virtq->version++;
358         cookie = ((uint64_t)virtq->version << 32) + index;
359         ret = mlx5_glue->devx_subscribe_devx_event(priv->err_chnl,
360                                                    virtq->virtq->obj,
361                                                    sizeof(event_num),
362                                                    &event_num, cookie);
363         if (ret) {
364                 DRV_LOG(ERR, "Failed to subscribe device %d virtq %d error event.",
365                         priv->vid, index);
366                 rte_errno = errno;
367                 goto error;
368         }
369         virtq->stopped = false;
370         DRV_LOG(DEBUG, "vid %u virtq %u was created successfully.", priv->vid,
371                 index);
372         return 0;
373 error:
374         mlx5_vdpa_virtq_unset(virtq);
375         return -1;
376 }
377
378 static int
379 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
380 {
381         if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
382                 if (!(priv->caps.virtio_queue_type & (1 <<
383                                                      MLX5_VIRTQ_TYPE_PACKED))) {
384                         DRV_LOG(ERR, "Failed to configur PACKED mode for vdev "
385                                 "%d - it was not reported by HW/driver"
386                                 " capability.", priv->vid);
387                         return -ENOTSUP;
388                 }
389         }
390         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
391                 if (!priv->caps.tso_ipv4) {
392                         DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
393                                 " was not reported by HW/driver capability.",
394                                 priv->vid);
395                         return -ENOTSUP;
396                 }
397         }
398         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
399                 if (!priv->caps.tso_ipv6) {
400                         DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
401                                 " was not reported by HW/driver capability.",
402                                 priv->vid);
403                         return -ENOTSUP;
404                 }
405         }
406         if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
407                 if (!priv->caps.tx_csum) {
408                         DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
409                                 " was not reported by HW/driver capability.",
410                                 priv->vid);
411                         return -ENOTSUP;
412                 }
413         }
414         if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
415                 if (!priv->caps.rx_csum) {
416                         DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
417                                 " GUEST CSUM was not reported by HW/driver "
418                                 "capability.", priv->vid);
419                         return -ENOTSUP;
420                 }
421         }
422         if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
423                 if (!priv->caps.virtio_version_1_0) {
424                         DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
425                                 "version 1 was not reported by HW/driver"
426                                 " capability.", priv->vid);
427                         return -ENOTSUP;
428                 }
429         }
430         return 0;
431 }
432
433 int
434 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
435 {
436         struct mlx5_devx_tis_attr tis_attr = {0};
437         struct ibv_context *ctx = priv->cdev->ctx;
438         uint32_t i;
439         uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
440         int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
441
442         if (ret || mlx5_vdpa_features_validate(priv)) {
443                 DRV_LOG(ERR, "Failed to configure negotiated features.");
444                 return -1;
445         }
446         if ((priv->features & (1ULL << VIRTIO_NET_F_CSUM)) == 0 &&
447             ((priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) > 0 ||
448              (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) > 0)) {
449                 /* Packet may be corrupted if TSO is enabled without CSUM. */
450                 DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
451                 priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
452         }
453         if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
454                 DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
455                         (int)priv->caps.max_num_virtio_queues * 2,
456                         (int)nr_vring);
457                 return -1;
458         }
459         /* Always map the entire page. */
460         priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
461                                    PROT_WRITE, MAP_SHARED, ctx->cmd_fd,
462                                    priv->var->mmap_off);
463         if (priv->virtq_db_addr == MAP_FAILED) {
464                 DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
465                 priv->virtq_db_addr = NULL;
466                 goto error;
467         } else {
468                 DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
469                         priv->virtq_db_addr);
470         }
471         priv->td = mlx5_devx_cmd_create_td(ctx);
472         if (!priv->td) {
473                 DRV_LOG(ERR, "Failed to create transport domain.");
474                 return -rte_errno;
475         }
476         tis_attr.transport_domain = priv->td->id;
477         for (i = 0; i < priv->num_lag_ports; i++) {
478                 /* 0 is auto affinity, non-zero value to propose port. */
479                 tis_attr.lag_tx_port_affinity = i + 1;
480                 priv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);
481                 if (!priv->tiss[i]) {
482                         DRV_LOG(ERR, "Failed to create TIS %u.", i);
483                         goto error;
484                 }
485         }
486         priv->nr_virtqs = nr_vring;
487         for (i = 0; i < nr_vring; i++)
488                 if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
489                         goto error;
490         return 0;
491 error:
492         mlx5_vdpa_virtqs_release(priv);
493         return -1;
494 }
495
496 static int
497 mlx5_vdpa_virtq_is_modified(struct mlx5_vdpa_priv *priv,
498                             struct mlx5_vdpa_virtq *virtq)
499 {
500         struct rte_vhost_vring vq;
501         int ret = rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq);
502
503         if (ret)
504                 return -1;
505         if (vq.size != virtq->vq_size || vq.kickfd != virtq->intr_handle.fd)
506                 return 1;
507         if (virtq->eqp.cq.cq_obj.cq) {
508                 if (vq.callfd != virtq->eqp.cq.callfd)
509                         return 1;
510         } else if (vq.callfd != -1) {
511                 return 1;
512         }
513         return 0;
514 }
515
516 int
517 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
518 {
519         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
520         int ret;
521
522         DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
523                 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
524         if (!priv->configured) {
525                 virtq->enable = !!enable;
526                 return 0;
527         }
528         if (virtq->enable == !!enable) {
529                 if (!enable)
530                         return 0;
531                 ret = mlx5_vdpa_virtq_is_modified(priv, virtq);
532                 if (ret < 0) {
533                         DRV_LOG(ERR, "Virtq %d modify check failed.", index);
534                         return -1;
535                 }
536                 if (ret == 0)
537                         return 0;
538                 DRV_LOG(INFO, "Virtq %d was modified, recreate it.", index);
539         }
540         if (virtq->virtq) {
541                 virtq->enable = 0;
542                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
543                         ret = mlx5_vdpa_steer_update(priv);
544                         if (ret)
545                                 DRV_LOG(WARNING, "Failed to disable steering "
546                                         "for virtq %d.", index);
547                 }
548                 mlx5_vdpa_virtq_unset(virtq);
549         }
550         if (enable) {
551                 ret = mlx5_vdpa_virtq_setup(priv, index);
552                 if (ret) {
553                         DRV_LOG(ERR, "Failed to setup virtq %d.", index);
554                         return ret;
555                 }
556                 virtq->enable = 1;
557                 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
558                         ret = mlx5_vdpa_steer_update(priv);
559                         if (ret)
560                                 DRV_LOG(WARNING, "Failed to enable steering "
561                                         "for virtq %d.", index);
562                 }
563         }
564         return 0;
565 }
566
567 int
568 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
569                           struct rte_vdpa_stat *stats, unsigned int n)
570 {
571         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
572         struct mlx5_devx_virtio_q_couners_attr attr = {0};
573         int ret;
574
575         if (!virtq->counters) {
576                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
577                         "is invalid.", qid);
578                 return -EINVAL;
579         }
580         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
581         if (ret) {
582                 DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
583                 return ret;
584         }
585         ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
586         if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
587                 return ret;
588         stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
589                 .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
590                 .value = attr.received_desc - virtq->reset.received_desc,
591         };
592         if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
593                 return ret;
594         stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
595                 .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
596                 .value = attr.completed_desc - virtq->reset.completed_desc,
597         };
598         if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
599                 return ret;
600         stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
601                 .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
602                 .value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
603         };
604         if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
605                 return ret;
606         stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
607                 .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
608                 .value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
609         };
610         if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
611                 return ret;
612         stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
613                 .id = MLX5_VDPA_STATS_INVALID_BUFFER,
614                 .value = attr.invalid_buffer - virtq->reset.invalid_buffer,
615         };
616         if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
617                 return ret;
618         stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
619                 .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
620                 .value = attr.error_cqes - virtq->reset.error_cqes,
621         };
622         return ret;
623 }
624
625 int
626 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
627 {
628         struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
629         int ret;
630
631         if (!virtq->counters) {
632                 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
633                         "is invalid.", qid);
634                 return -EINVAL;
635         }
636         ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
637                                                     &virtq->reset);
638         if (ret)
639                 DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",
640                         qid);
641         return ret;
642 }