e14b380f19ce10162c355bc05d98b6d73dd5b5a7
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <fcntl.h>
7 #include <sys/eventfd.h>
8
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_atomic.h>
13 #include <rte_common.h>
14 #include <rte_io.h>
15 #include <rte_alarm.h>
16
17 #include <mlx5_common.h>
18
19 #include "mlx5_vdpa_utils.h"
20 #include "mlx5_vdpa.h"
21
22
23 void
24 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
25 {
26         if (priv->uar) {
27                 mlx5_glue->devx_free_uar(priv->uar);
28                 priv->uar = NULL;
29         }
30 #ifdef HAVE_IBV_DEVX_EVENT
31         if (priv->eventc) {
32                 union {
33                         struct mlx5dv_devx_async_event_hdr event_resp;
34                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
35                                                                          + 128];
36                 } out;
37
38                 /* Clean all pending events. */
39                 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
40                        sizeof(out.buf)) >=
41                        (ssize_t)sizeof(out.event_resp.cookie))
42                         ;
43                 mlx5_glue->devx_destroy_event_channel(priv->eventc);
44                 priv->eventc = NULL;
45         }
46 #endif
47         priv->eqn = 0;
48 }
49
50 /* Prepare all the global resources for all the event objects.*/
51 static int
52 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
53 {
54         uint32_t lcore;
55
56         if (priv->eventc)
57                 return 0;
58         lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
59         if (mlx5_glue->devx_query_eqn(priv->ctx, lcore, &priv->eqn)) {
60                 rte_errno = errno;
61                 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
62                 return -1;
63         }
64         priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
65                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
66         if (!priv->eventc) {
67                 rte_errno = errno;
68                 DRV_LOG(ERR, "Failed to create event channel %d.",
69                         rte_errno);
70                 goto error;
71         }
72         priv->uar = mlx5_glue->devx_alloc_uar(priv->ctx, 0);
73         if (!priv->uar) {
74                 rte_errno = errno;
75                 DRV_LOG(ERR, "Failed to allocate UAR.");
76                 goto error;
77         }
78         return 0;
79 error:
80         mlx5_vdpa_event_qp_global_release(priv);
81         return -1;
82 }
83
84 static void
85 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
86 {
87         if (cq->cq)
88                 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
89         if (cq->umem_obj)
90                 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
91         if (cq->umem_buf)
92                 rte_free((void *)(uintptr_t)cq->umem_buf);
93         memset(cq, 0, sizeof(*cq));
94 }
95
96 static inline void __rte_unused
97 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
98 {
99         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
100         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
101         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
102         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
103         uint64_t db_be = rte_cpu_to_be_64(doorbell);
104         uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
105
106         rte_io_wmb();
107         cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
108         rte_wmb();
109 #ifdef RTE_ARCH_64
110         *(uint64_t *)addr = db_be;
111 #else
112         *(uint32_t *)addr = db_be;
113         rte_io_wmb();
114         *((uint32_t *)addr + 1) = db_be >> 32;
115 #endif
116         cq->arm_sn++;
117         cq->armed = 1;
118 }
119
120 static int
121 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
122                     int callfd, struct mlx5_vdpa_cq *cq)
123 {
124         struct mlx5_devx_cq_attr attr;
125         size_t pgsize = sysconf(_SC_PAGESIZE);
126         uint32_t umem_size;
127         uint16_t event_nums[1] = {0};
128         uint16_t cq_size = 1 << log_desc_n;
129         int ret;
130
131         cq->log_desc_n = log_desc_n;
132         umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
133         cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
134         if (!cq->umem_buf) {
135                 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
136                 rte_errno = ENOMEM;
137                 return -ENOMEM;
138         }
139         cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
140                                                 (void *)(uintptr_t)cq->umem_buf,
141                                                 umem_size,
142                                                 IBV_ACCESS_LOCAL_WRITE);
143         if (!cq->umem_obj) {
144                 DRV_LOG(ERR, "Failed to register umem for CQ.");
145                 goto error;
146         }
147         attr.q_umem_valid = 1;
148         attr.db_umem_valid = 1;
149         attr.use_first_only = 1;
150         attr.overrun_ignore = 0;
151         attr.uar_page_id = priv->uar->page_id;
152         attr.q_umem_id = cq->umem_obj->umem_id;
153         attr.q_umem_offset = 0;
154         attr.db_umem_id = cq->umem_obj->umem_id;
155         attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
156         attr.eqn = priv->eqn;
157         attr.log_cq_size = log_desc_n;
158         attr.log_page_size = rte_log2_u32(pgsize);
159         cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
160         if (!cq->cq)
161                 goto error;
162         cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
163         cq->cq_ci = 0;
164         rte_spinlock_init(&cq->sl);
165         /* Subscribe CQ event to the event channel controlled by the driver. */
166         ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
167                                                    sizeof(event_nums),
168                                                    event_nums,
169                                                    (uint64_t)(uintptr_t)cq);
170         if (ret) {
171                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
172                 rte_errno = errno;
173                 goto error;
174         }
175         if (callfd != -1 &&
176             priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
177                 ret = mlx5_glue->devx_subscribe_devx_event_fd(priv->eventc,
178                                                               callfd,
179                                                               cq->cq->obj, 0);
180                 if (ret) {
181                         DRV_LOG(ERR, "Failed to subscribe CQE event fd.");
182                         rte_errno = errno;
183                         goto error;
184                 }
185         }
186         cq->callfd = callfd;
187         /* Init CQ to ones to be in HW owner in the start. */
188         cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
189         cq->cqes[0].wqe_counter = rte_cpu_to_be_16(cq_size - 1);
190         /* First arming. */
191         mlx5_vdpa_cq_arm(priv, cq);
192         return 0;
193 error:
194         mlx5_vdpa_cq_destroy(cq);
195         return -1;
196 }
197
198 static inline uint32_t
199 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
200 {
201         struct mlx5_vdpa_event_qp *eqp =
202                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
203         const unsigned int cq_size = 1 << cq->log_desc_n;
204         const unsigned int cq_mask = cq_size - 1;
205         union {
206                 struct {
207                         uint16_t wqe_counter;
208                         uint8_t rsvd5;
209                         uint8_t op_own;
210                 };
211                 uint32_t word;
212         } last_word;
213         uint16_t next_wqe_counter = cq->cq_ci & cq_mask;
214         uint16_t cur_wqe_counter;
215         uint16_t comp;
216
217         last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
218         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
219         comp = (cur_wqe_counter + 1u - next_wqe_counter) & cq_mask;
220         if (comp) {
221                 cq->cq_ci += comp;
222                 MLX5_ASSERT(!!(cq->cq_ci & cq_size) ==
223                             MLX5_CQE_OWNER(last_word.op_own));
224                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
225                             MLX5_CQE_INVALID);
226                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
227                                MLX5_CQE_RESP_ERR ||
228                                MLX5_CQE_OPCODE(last_word.op_own) ==
229                                MLX5_CQE_REQ_ERR)))
230                         cq->errors++;
231                 rte_io_wmb();
232                 /* Ring CQ doorbell record. */
233                 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
234                 rte_io_wmb();
235                 /* Ring SW QP doorbell record. */
236                 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
237         }
238         return comp;
239 }
240
241 static void
242 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
243 {
244         struct mlx5_vdpa_cq *cq;
245         int i;
246
247         for (i = 0; i < priv->nr_virtqs; i++) {
248                 cq = &priv->virtqs[i].eqp.cq;
249                 if (cq->cq && !cq->armed)
250                         mlx5_vdpa_cq_arm(priv, cq);
251         }
252 }
253
254 static void
255 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
256 {
257         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
258                 switch (max) {
259                 case 0:
260                         priv->timer_delay_us += priv->event_us;
261                         break;
262                 case 1:
263                         break;
264                 default:
265                         priv->timer_delay_us /= max;
266                         break;
267                 }
268         }
269         usleep(priv->timer_delay_us);
270 }
271
272 static void *
273 mlx5_vdpa_poll_handle(void *arg)
274 {
275         struct mlx5_vdpa_priv *priv = arg;
276         int i;
277         struct mlx5_vdpa_cq *cq;
278         uint32_t max;
279         uint64_t current_tic;
280
281         pthread_mutex_lock(&priv->timer_lock);
282         while (!priv->timer_on)
283                 pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
284         pthread_mutex_unlock(&priv->timer_lock);
285         priv->timer_delay_us = priv->event_mode ==
286                                             MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
287                                               MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
288                                                                  priv->event_us;
289         while (1) {
290                 max = 0;
291                 for (i = 0; i < priv->nr_virtqs; i++) {
292                         cq = &priv->virtqs[i].eqp.cq;
293                         if (cq->cq && !cq->armed) {
294                                 uint32_t comp = mlx5_vdpa_cq_poll(cq);
295
296                                 if (comp) {
297                                         /* Notify guest for descs consuming. */
298                                         if (cq->callfd != -1)
299                                                 eventfd_write(cq->callfd,
300                                                               (eventfd_t)1);
301                                         if (comp > max)
302                                                 max = comp;
303                                 }
304                         }
305                 }
306                 current_tic = rte_rdtsc();
307                 if (!max) {
308                         /* No traffic ? stop timer and load interrupts. */
309                         if (current_tic - priv->last_traffic_tic >=
310                             rte_get_timer_hz() * priv->no_traffic_time_s) {
311                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
312                                         priv->vdev->device->name);
313                                 mlx5_vdpa_arm_all_cqs(priv);
314                                 pthread_mutex_lock(&priv->timer_lock);
315                                 priv->timer_on = 0;
316                                 while (!priv->timer_on)
317                                         pthread_cond_wait(&priv->timer_cond,
318                                                           &priv->timer_lock);
319                                 pthread_mutex_unlock(&priv->timer_lock);
320                                 priv->timer_delay_us = priv->event_mode ==
321                                             MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
322                                               MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
323                                                                  priv->event_us;
324                                 continue;
325                         }
326                 } else {
327                         priv->last_traffic_tic = current_tic;
328                 }
329                 mlx5_vdpa_timer_sleep(priv, max);
330         }
331         return NULL;
332 }
333
334 static void
335 mlx5_vdpa_interrupt_handler(void *cb_arg)
336 {
337         struct mlx5_vdpa_priv *priv = cb_arg;
338 #ifdef HAVE_IBV_DEVX_EVENT
339         union {
340                 struct mlx5dv_devx_async_event_hdr event_resp;
341                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
342         } out;
343
344         while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
345                                          sizeof(out.buf)) >=
346                                        (ssize_t)sizeof(out.event_resp.cookie)) {
347                 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
348                                                (uintptr_t)out.event_resp.cookie;
349                 struct mlx5_vdpa_event_qp *eqp = container_of(cq,
350                                                  struct mlx5_vdpa_event_qp, cq);
351                 struct mlx5_vdpa_virtq *virtq = container_of(eqp,
352                                                    struct mlx5_vdpa_virtq, eqp);
353
354                 mlx5_vdpa_cq_poll(cq);
355                 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
356                         mlx5_vdpa_cq_arm(priv, cq);
357                         /* Notify guest for descs consuming. */
358                         if (cq->callfd != -1)
359                                 eventfd_write(cq->callfd, (eventfd_t)1);
360                         return;
361                 }
362                 /* Don't arm again - timer will take control. */
363                 DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
364                         " Timer is %s, cq ci is %u.\n",
365                         priv->vdev->device->name,
366                         (int)virtq->index, cq->cq->id,
367                         priv->timer_on ? "on" : "off", cq->cq_ci);
368                 cq->armed = 0;
369         }
370 #endif
371
372         /* Traffic detected: make sure timer is on. */
373         priv->last_traffic_tic = rte_rdtsc();
374         pthread_mutex_lock(&priv->timer_lock);
375         if (!priv->timer_on) {
376                 priv->timer_on = 1;
377                 pthread_cond_signal(&priv->timer_cond);
378         }
379         pthread_mutex_unlock(&priv->timer_lock);
380 }
381
382 int
383 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
384 {
385         int flags;
386         int ret;
387
388         if (!priv->eventc)
389                 /* All virtqs are in poll mode. */
390                 return 0;
391         if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
392                 pthread_mutex_init(&priv->timer_lock, NULL);
393                 pthread_cond_init(&priv->timer_cond, NULL);
394                 priv->timer_on = 0;
395                 ret = pthread_create(&priv->timer_tid, NULL,
396                                      mlx5_vdpa_poll_handle, (void *)priv);
397                 if (ret) {
398                         DRV_LOG(ERR, "Failed to create timer thread.");
399                         return -1;
400                 }
401         }
402         flags = fcntl(priv->eventc->fd, F_GETFL);
403         ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
404         if (ret) {
405                 DRV_LOG(ERR, "Failed to change event channel FD.");
406                 goto error;
407         }
408         priv->intr_handle.fd = priv->eventc->fd;
409         priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
410         if (rte_intr_callback_register(&priv->intr_handle,
411                                        mlx5_vdpa_interrupt_handler, priv)) {
412                 priv->intr_handle.fd = 0;
413                 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
414                 goto error;
415         }
416         return 0;
417 error:
418         mlx5_vdpa_cqe_event_unset(priv);
419         return -1;
420 }
421
422 void
423 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
424 {
425         int retries = MLX5_VDPA_INTR_RETRIES;
426         int ret = -EAGAIN;
427         void *status;
428
429         if (priv->intr_handle.fd) {
430                 while (retries-- && ret == -EAGAIN) {
431                         ret = rte_intr_callback_unregister(&priv->intr_handle,
432                                                     mlx5_vdpa_interrupt_handler,
433                                                     priv);
434                         if (ret == -EAGAIN) {
435                                 DRV_LOG(DEBUG, "Try again to unregister fd %d "
436                                         "of CQ interrupt, retries = %d.",
437                                         priv->intr_handle.fd, retries);
438                                 rte_pause();
439                         }
440                 }
441                 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
442         }
443         if (priv->timer_tid) {
444                 pthread_cancel(priv->timer_tid);
445                 pthread_join(priv->timer_tid, &status);
446         }
447         priv->timer_tid = 0;
448 }
449
450 void
451 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
452 {
453         if (eqp->sw_qp)
454                 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
455         if (eqp->umem_obj)
456                 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
457         if (eqp->umem_buf)
458                 rte_free(eqp->umem_buf);
459         if (eqp->fw_qp)
460                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
461         mlx5_vdpa_cq_destroy(&eqp->cq);
462         memset(eqp, 0, sizeof(*eqp));
463 }
464
465 static int
466 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
467 {
468         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
469                                           eqp->sw_qp->id)) {
470                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
471                         rte_errno);
472                 return -1;
473         }
474         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
475                                           eqp->fw_qp->id)) {
476                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
477                         rte_errno);
478                 return -1;
479         }
480         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
481                                           eqp->sw_qp->id)) {
482                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
483                         rte_errno);
484                 return -1;
485         }
486         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
487                                           eqp->fw_qp->id)) {
488                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
489                         rte_errno);
490                 return -1;
491         }
492         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
493                                           eqp->sw_qp->id)) {
494                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
495                         rte_errno);
496                 return -1;
497         }
498         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
499                                           eqp->fw_qp->id)) {
500                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
501                         rte_errno);
502                 return -1;
503         }
504         return 0;
505 }
506
507 int
508 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
509                           int callfd, struct mlx5_vdpa_event_qp *eqp)
510 {
511         struct mlx5_devx_qp_attr attr = {0};
512         uint16_t log_desc_n = rte_log2_u32(desc_n);
513         uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
514                                                        sizeof(*eqp->db_rec) * 2;
515
516         if (mlx5_vdpa_event_qp_global_prepare(priv))
517                 return -1;
518         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
519                 return -1;
520         attr.pd = priv->pdn;
521         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
522         if (!eqp->fw_qp) {
523                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
524                 goto error;
525         }
526         eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
527         if (!eqp->umem_buf) {
528                 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
529                 rte_errno = ENOMEM;
530                 goto error;
531         }
532         eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
533                                                (void *)(uintptr_t)eqp->umem_buf,
534                                                umem_size,
535                                                IBV_ACCESS_LOCAL_WRITE);
536         if (!eqp->umem_obj) {
537                 DRV_LOG(ERR, "Failed to register umem for SW QP.");
538                 goto error;
539         }
540         attr.uar_index = priv->uar->page_id;
541         attr.cqn = eqp->cq.cq->id;
542         attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
543         attr.rq_size = 1 << log_desc_n;
544         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
545         attr.sq_size = 0; /* No need SQ. */
546         attr.dbr_umem_valid = 1;
547         attr.wq_umem_id = eqp->umem_obj->umem_id;
548         attr.wq_umem_offset = 0;
549         attr.dbr_umem_id = eqp->umem_obj->umem_id;
550         attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
551         eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
552         if (!eqp->sw_qp) {
553                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
554                 goto error;
555         }
556         eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
557         if (mlx5_vdpa_qps2rts(eqp))
558                 goto error;
559         /* First ringing. */
560         rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
561         return 0;
562 error:
563         mlx5_vdpa_event_qp_destroy(eqp);
564         return -1;
565 }