vdpa/mlx5: fix completion queue assertion
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <fcntl.h>
7 #include <sys/eventfd.h>
8
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_atomic.h>
13 #include <rte_common.h>
14 #include <rte_io.h>
15 #include <rte_alarm.h>
16
17 #include <mlx5_common.h>
18
19 #include "mlx5_vdpa_utils.h"
20 #include "mlx5_vdpa.h"
21
22
23 void
24 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
25 {
26         if (priv->uar) {
27                 mlx5_glue->devx_free_uar(priv->uar);
28                 priv->uar = NULL;
29         }
30 #ifdef HAVE_IBV_DEVX_EVENT
31         if (priv->eventc) {
32                 union {
33                         struct mlx5dv_devx_async_event_hdr event_resp;
34                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
35                                                                          + 128];
36                 } out;
37
38                 /* Clean all pending events. */
39                 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
40                        sizeof(out.buf)) >=
41                        (ssize_t)sizeof(out.event_resp.cookie))
42                         ;
43                 mlx5_glue->devx_destroy_event_channel(priv->eventc);
44                 priv->eventc = NULL;
45         }
46 #endif
47         priv->eqn = 0;
48 }
49
50 /* Prepare all the global resources for all the event objects.*/
51 static int
52 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
53 {
54         int flags, ret;
55
56         if (priv->eventc)
57                 return 0;
58         if (mlx5_glue->devx_query_eqn(priv->ctx, 0, &priv->eqn)) {
59                 rte_errno = errno;
60                 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
61                 return -1;
62         }
63         priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
64                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
65         if (!priv->eventc) {
66                 rte_errno = errno;
67                 DRV_LOG(ERR, "Failed to create event channel %d.",
68                         rte_errno);
69                 goto error;
70         }
71         flags = fcntl(priv->eventc->fd, F_GETFL);
72         ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
73         if (ret) {
74                 DRV_LOG(ERR, "Failed to change event channel FD.");
75                 goto error;
76         }
77         priv->uar = mlx5_glue->devx_alloc_uar(priv->ctx, 0);
78         if (!priv->uar) {
79                 rte_errno = errno;
80                 DRV_LOG(ERR, "Failed to allocate UAR.");
81                 goto error;
82         }
83         return 0;
84 error:
85         mlx5_vdpa_event_qp_global_release(priv);
86         return -1;
87 }
88
89 static void
90 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
91 {
92         if (cq->cq)
93                 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
94         if (cq->umem_obj)
95                 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
96         if (cq->umem_buf)
97                 rte_free((void *)(uintptr_t)cq->umem_buf);
98         memset(cq, 0, sizeof(*cq));
99 }
100
101 static inline void __rte_unused
102 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
103 {
104         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
105         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
106         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
107         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
108         uint64_t db_be = rte_cpu_to_be_64(doorbell);
109         uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
110
111         rte_io_wmb();
112         cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
113         rte_wmb();
114 #ifdef RTE_ARCH_64
115         *(uint64_t *)addr = db_be;
116 #else
117         *(uint32_t *)addr = db_be;
118         rte_io_wmb();
119         *((uint32_t *)addr + 1) = db_be >> 32;
120 #endif
121         cq->arm_sn++;
122         cq->armed = 1;
123 }
124
125 static int
126 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
127                     int callfd, struct mlx5_vdpa_cq *cq)
128 {
129         struct mlx5_devx_cq_attr attr = {0};
130         size_t pgsize = sysconf(_SC_PAGESIZE);
131         uint32_t umem_size;
132         uint16_t event_nums[1] = {0};
133         uint16_t cq_size = 1 << log_desc_n;
134         int ret;
135
136         cq->log_desc_n = log_desc_n;
137         umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
138         cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
139         if (!cq->umem_buf) {
140                 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
141                 rte_errno = ENOMEM;
142                 return -ENOMEM;
143         }
144         cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
145                                                 (void *)(uintptr_t)cq->umem_buf,
146                                                 umem_size,
147                                                 IBV_ACCESS_LOCAL_WRITE);
148         if (!cq->umem_obj) {
149                 DRV_LOG(ERR, "Failed to register umem for CQ.");
150                 goto error;
151         }
152         attr.q_umem_valid = 1;
153         attr.db_umem_valid = 1;
154         attr.use_first_only = 1;
155         attr.overrun_ignore = 0;
156         attr.uar_page_id = priv->uar->page_id;
157         attr.q_umem_id = cq->umem_obj->umem_id;
158         attr.q_umem_offset = 0;
159         attr.db_umem_id = cq->umem_obj->umem_id;
160         attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
161         attr.eqn = priv->eqn;
162         attr.log_cq_size = log_desc_n;
163         attr.log_page_size = rte_log2_u32(pgsize);
164         cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
165         if (!cq->cq)
166                 goto error;
167         cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
168         cq->cq_ci = 0;
169         rte_spinlock_init(&cq->sl);
170         /* Subscribe CQ event to the event channel controlled by the driver. */
171         ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
172                                                    sizeof(event_nums),
173                                                    event_nums,
174                                                    (uint64_t)(uintptr_t)cq);
175         if (ret) {
176                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
177                 rte_errno = errno;
178                 goto error;
179         }
180         cq->callfd = callfd;
181         /* Init CQ to ones to be in HW owner in the start. */
182         cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
183         cq->cqes[0].wqe_counter = rte_cpu_to_be_16(cq_size - 1);
184         /* First arming. */
185         mlx5_vdpa_cq_arm(priv, cq);
186         return 0;
187 error:
188         mlx5_vdpa_cq_destroy(cq);
189         return -1;
190 }
191
192 static inline uint32_t
193 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
194 {
195         struct mlx5_vdpa_event_qp *eqp =
196                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
197         const unsigned int cq_size = 1 << cq->log_desc_n;
198         const unsigned int cq_mask = cq_size - 1;
199         union {
200                 struct {
201                         uint16_t wqe_counter;
202                         uint8_t rsvd5;
203                         uint8_t op_own;
204                 };
205                 uint32_t word;
206         } last_word;
207         uint16_t next_wqe_counter = cq->cq_ci & cq_mask;
208         uint16_t cur_wqe_counter;
209         uint16_t comp;
210
211         last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
212         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
213         comp = (cur_wqe_counter + 1u - next_wqe_counter) & cq_mask;
214         if (comp) {
215                 cq->cq_ci += comp;
216                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
217                             MLX5_CQE_INVALID);
218                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
219                                MLX5_CQE_RESP_ERR ||
220                                MLX5_CQE_OPCODE(last_word.op_own) ==
221                                MLX5_CQE_REQ_ERR)))
222                         cq->errors++;
223                 rte_io_wmb();
224                 /* Ring CQ doorbell record. */
225                 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
226                 rte_io_wmb();
227                 /* Ring SW QP doorbell record. */
228                 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
229         }
230         return comp;
231 }
232
233 static void
234 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
235 {
236         struct mlx5_vdpa_cq *cq;
237         int i;
238
239         for (i = 0; i < priv->nr_virtqs; i++) {
240                 cq = &priv->virtqs[i].eqp.cq;
241                 if (cq->cq && !cq->armed)
242                         mlx5_vdpa_cq_arm(priv, cq);
243         }
244 }
245
246 static void
247 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
248 {
249         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
250                 switch (max) {
251                 case 0:
252                         priv->timer_delay_us += priv->event_us;
253                         break;
254                 case 1:
255                         break;
256                 default:
257                         priv->timer_delay_us /= max;
258                         break;
259                 }
260         }
261         usleep(priv->timer_delay_us);
262 }
263
264 static void *
265 mlx5_vdpa_poll_handle(void *arg)
266 {
267         struct mlx5_vdpa_priv *priv = arg;
268         int i;
269         struct mlx5_vdpa_cq *cq;
270         uint32_t max;
271         uint64_t current_tic;
272
273         pthread_mutex_lock(&priv->timer_lock);
274         while (!priv->timer_on)
275                 pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
276         pthread_mutex_unlock(&priv->timer_lock);
277         priv->timer_delay_us = priv->event_mode ==
278                                             MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
279                                               MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
280                                                                  priv->event_us;
281         while (1) {
282                 max = 0;
283                 pthread_mutex_lock(&priv->vq_config_lock);
284                 for (i = 0; i < priv->nr_virtqs; i++) {
285                         cq = &priv->virtqs[i].eqp.cq;
286                         if (cq->cq && !cq->armed) {
287                                 uint32_t comp = mlx5_vdpa_cq_poll(cq);
288
289                                 if (comp) {
290                                         /* Notify guest for descs consuming. */
291                                         if (cq->callfd != -1)
292                                                 eventfd_write(cq->callfd,
293                                                               (eventfd_t)1);
294                                         if (comp > max)
295                                                 max = comp;
296                                 }
297                         }
298                 }
299                 current_tic = rte_rdtsc();
300                 if (!max) {
301                         /* No traffic ? stop timer and load interrupts. */
302                         if (current_tic - priv->last_traffic_tic >=
303                             rte_get_timer_hz() * priv->no_traffic_time_s) {
304                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
305                                         priv->vdev->device->name);
306                                 mlx5_vdpa_arm_all_cqs(priv);
307                                 pthread_mutex_unlock(&priv->vq_config_lock);
308                                 pthread_mutex_lock(&priv->timer_lock);
309                                 priv->timer_on = 0;
310                                 while (!priv->timer_on)
311                                         pthread_cond_wait(&priv->timer_cond,
312                                                           &priv->timer_lock);
313                                 pthread_mutex_unlock(&priv->timer_lock);
314                                 priv->timer_delay_us = priv->event_mode ==
315                                             MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
316                                               MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
317                                                                  priv->event_us;
318                                 continue;
319                         }
320                 } else {
321                         priv->last_traffic_tic = current_tic;
322                 }
323                 pthread_mutex_unlock(&priv->vq_config_lock);
324                 mlx5_vdpa_timer_sleep(priv, max);
325         }
326         return NULL;
327 }
328
329 static void
330 mlx5_vdpa_interrupt_handler(void *cb_arg)
331 {
332         struct mlx5_vdpa_priv *priv = cb_arg;
333 #ifdef HAVE_IBV_DEVX_EVENT
334         union {
335                 struct mlx5dv_devx_async_event_hdr event_resp;
336                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
337         } out;
338
339         pthread_mutex_lock(&priv->vq_config_lock);
340         while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
341                                          sizeof(out.buf)) >=
342                                        (ssize_t)sizeof(out.event_resp.cookie)) {
343                 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
344                                                (uintptr_t)out.event_resp.cookie;
345                 struct mlx5_vdpa_event_qp *eqp = container_of(cq,
346                                                  struct mlx5_vdpa_event_qp, cq);
347                 struct mlx5_vdpa_virtq *virtq = container_of(eqp,
348                                                    struct mlx5_vdpa_virtq, eqp);
349
350                 if (!virtq->enable)
351                         continue;
352                 mlx5_vdpa_cq_poll(cq);
353                 /* Notify guest for descs consuming. */
354                 if (cq->callfd != -1)
355                         eventfd_write(cq->callfd, (eventfd_t)1);
356                 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
357                         mlx5_vdpa_cq_arm(priv, cq);
358                         pthread_mutex_unlock(&priv->vq_config_lock);
359                         return;
360                 }
361                 /* Don't arm again - timer will take control. */
362                 DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
363                         " Timer is %s, cq ci is %u.\n",
364                         priv->vdev->device->name,
365                         (int)virtq->index, cq->cq->id,
366                         priv->timer_on ? "on" : "off", cq->cq_ci);
367                 cq->armed = 0;
368         }
369 #endif
370
371         /* Traffic detected: make sure timer is on. */
372         priv->last_traffic_tic = rte_rdtsc();
373         pthread_mutex_lock(&priv->timer_lock);
374         if (!priv->timer_on) {
375                 priv->timer_on = 1;
376                 pthread_cond_signal(&priv->timer_cond);
377         }
378         pthread_mutex_unlock(&priv->timer_lock);
379         pthread_mutex_unlock(&priv->vq_config_lock);
380 }
381
382 int
383 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
384 {
385         int ret;
386
387         if (!priv->eventc)
388                 /* All virtqs are in poll mode. */
389                 return 0;
390         if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
391                 pthread_mutex_init(&priv->timer_lock, NULL);
392                 pthread_cond_init(&priv->timer_cond, NULL);
393                 priv->timer_on = 0;
394                 ret = pthread_create(&priv->timer_tid, NULL,
395                                      mlx5_vdpa_poll_handle, (void *)priv);
396                 if (ret) {
397                         DRV_LOG(ERR, "Failed to create timer thread.");
398                         return -1;
399                 }
400         }
401         priv->intr_handle.fd = priv->eventc->fd;
402         priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
403         if (rte_intr_callback_register(&priv->intr_handle,
404                                        mlx5_vdpa_interrupt_handler, priv)) {
405                 priv->intr_handle.fd = 0;
406                 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
407                 goto error;
408         }
409         return 0;
410 error:
411         mlx5_vdpa_cqe_event_unset(priv);
412         return -1;
413 }
414
415 void
416 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
417 {
418         int retries = MLX5_VDPA_INTR_RETRIES;
419         int ret = -EAGAIN;
420         void *status;
421
422         if (priv->intr_handle.fd) {
423                 while (retries-- && ret == -EAGAIN) {
424                         ret = rte_intr_callback_unregister(&priv->intr_handle,
425                                                     mlx5_vdpa_interrupt_handler,
426                                                     priv);
427                         if (ret == -EAGAIN) {
428                                 DRV_LOG(DEBUG, "Try again to unregister fd %d "
429                                         "of CQ interrupt, retries = %d.",
430                                         priv->intr_handle.fd, retries);
431                                 rte_pause();
432                         }
433                 }
434                 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
435         }
436         if (priv->timer_tid) {
437                 pthread_cancel(priv->timer_tid);
438                 pthread_join(priv->timer_tid, &status);
439         }
440         priv->timer_tid = 0;
441 }
442
443 void
444 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
445 {
446         if (eqp->sw_qp)
447                 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
448         if (eqp->umem_obj)
449                 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
450         if (eqp->umem_buf)
451                 rte_free(eqp->umem_buf);
452         if (eqp->fw_qp)
453                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
454         mlx5_vdpa_cq_destroy(&eqp->cq);
455         memset(eqp, 0, sizeof(*eqp));
456 }
457
458 static int
459 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
460 {
461         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
462                                           eqp->sw_qp->id)) {
463                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
464                         rte_errno);
465                 return -1;
466         }
467         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
468                                           eqp->fw_qp->id)) {
469                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
470                         rte_errno);
471                 return -1;
472         }
473         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
474                                           eqp->sw_qp->id)) {
475                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
476                         rte_errno);
477                 return -1;
478         }
479         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
480                                           eqp->fw_qp->id)) {
481                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
482                         rte_errno);
483                 return -1;
484         }
485         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
486                                           eqp->sw_qp->id)) {
487                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
488                         rte_errno);
489                 return -1;
490         }
491         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
492                                           eqp->fw_qp->id)) {
493                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
494                         rte_errno);
495                 return -1;
496         }
497         return 0;
498 }
499
500 int
501 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
502                           int callfd, struct mlx5_vdpa_event_qp *eqp)
503 {
504         struct mlx5_devx_qp_attr attr = {0};
505         uint16_t log_desc_n = rte_log2_u32(desc_n);
506         uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
507                                                        sizeof(*eqp->db_rec) * 2;
508
509         if (mlx5_vdpa_event_qp_global_prepare(priv))
510                 return -1;
511         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
512                 return -1;
513         attr.pd = priv->pdn;
514         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
515         if (!eqp->fw_qp) {
516                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
517                 goto error;
518         }
519         eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
520         if (!eqp->umem_buf) {
521                 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
522                 rte_errno = ENOMEM;
523                 goto error;
524         }
525         eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
526                                                (void *)(uintptr_t)eqp->umem_buf,
527                                                umem_size,
528                                                IBV_ACCESS_LOCAL_WRITE);
529         if (!eqp->umem_obj) {
530                 DRV_LOG(ERR, "Failed to register umem for SW QP.");
531                 goto error;
532         }
533         attr.uar_index = priv->uar->page_id;
534         attr.cqn = eqp->cq.cq->id;
535         attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
536         attr.rq_size = 1 << log_desc_n;
537         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
538         attr.sq_size = 0; /* No need SQ. */
539         attr.dbr_umem_valid = 1;
540         attr.wq_umem_id = eqp->umem_obj->umem_id;
541         attr.wq_umem_offset = 0;
542         attr.dbr_umem_id = eqp->umem_obj->umem_id;
543         attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
544         eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
545         if (!eqp->sw_qp) {
546                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
547                 goto error;
548         }
549         eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
550         if (mlx5_vdpa_qps2rts(eqp))
551                 goto error;
552         /* First ringing. */
553         rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
554         return 0;
555 error:
556         mlx5_vdpa_event_qp_destroy(eqp);
557         return -1;
558 }