fd6150928b9322150fad744c9fde76772dc70108
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <fcntl.h>
7 #include <sys/eventfd.h>
8
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_atomic.h>
13 #include <rte_common.h>
14 #include <rte_io.h>
15 #include <rte_alarm.h>
16
17 #include <mlx5_common.h>
18 #include <mlx5_common_os.h>
19 #include <mlx5_glue.h>
20
21 #include "mlx5_vdpa_utils.h"
22 #include "mlx5_vdpa.h"
23
24
25 #define MLX5_VDPA_ERROR_TIME_SEC 3u
26
27 void
28 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
29 {
30         if (priv->uar) {
31                 mlx5_glue->devx_free_uar(priv->uar);
32                 priv->uar = NULL;
33         }
34 #ifdef HAVE_IBV_DEVX_EVENT
35         if (priv->eventc) {
36                 union {
37                         struct mlx5dv_devx_async_event_hdr event_resp;
38                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
39                                                                          + 128];
40                 } out;
41
42                 /* Clean all pending events. */
43                 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
44                        sizeof(out.buf)) >=
45                        (ssize_t)sizeof(out.event_resp.cookie))
46                         ;
47                 mlx5_os_devx_destroy_event_channel(priv->eventc);
48                 priv->eventc = NULL;
49         }
50 #endif
51         priv->eqn = 0;
52 }
53
54 /* Prepare all the global resources for all the event objects.*/
55 static int
56 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
57 {
58         int flags, ret;
59
60         if (priv->eventc)
61                 return 0;
62         if (mlx5_glue->devx_query_eqn(priv->ctx, 0, &priv->eqn)) {
63                 rte_errno = errno;
64                 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
65                 return -1;
66         }
67         priv->eventc = mlx5_os_devx_create_event_channel(priv->ctx,
68                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
69         if (!priv->eventc) {
70                 rte_errno = errno;
71                 DRV_LOG(ERR, "Failed to create event channel %d.",
72                         rte_errno);
73                 goto error;
74         }
75         flags = fcntl(priv->eventc->fd, F_GETFL);
76         ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
77         if (ret) {
78                 DRV_LOG(ERR, "Failed to change event channel FD.");
79                 goto error;
80         }
81         /*
82          * This PMD always claims the write memory barrier on UAR
83          * registers writings, it is safe to allocate UAR with any
84          * memory mapping type.
85          */
86         priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
87         if (!priv->uar) {
88                 rte_errno = errno;
89                 DRV_LOG(ERR, "Failed to allocate UAR.");
90                 goto error;
91         }
92         return 0;
93 error:
94         mlx5_vdpa_event_qp_global_release(priv);
95         return -1;
96 }
97
98 static void
99 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
100 {
101         if (cq->cq)
102                 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
103         if (cq->umem_obj)
104                 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
105         if (cq->umem_buf)
106                 rte_free((void *)(uintptr_t)cq->umem_buf);
107         memset(cq, 0, sizeof(*cq));
108 }
109
110 static inline void __rte_unused
111 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
112 {
113         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
114         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
115         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
116         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
117         uint64_t db_be = rte_cpu_to_be_64(doorbell);
118         uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
119
120         rte_io_wmb();
121         cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
122         rte_wmb();
123 #ifdef RTE_ARCH_64
124         *(uint64_t *)addr = db_be;
125 #else
126         *(uint32_t *)addr = db_be;
127         rte_io_wmb();
128         *((uint32_t *)addr + 1) = db_be >> 32;
129 #endif
130         cq->arm_sn++;
131         cq->armed = 1;
132 }
133
134 static int
135 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
136                     int callfd, struct mlx5_vdpa_cq *cq)
137 {
138         struct mlx5_devx_cq_attr attr = {0};
139         size_t pgsize = sysconf(_SC_PAGESIZE);
140         uint32_t umem_size;
141         uint16_t event_nums[1] = {0};
142         uint16_t cq_size = 1 << log_desc_n;
143         int ret;
144
145         cq->log_desc_n = log_desc_n;
146         umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
147         cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
148         if (!cq->umem_buf) {
149                 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
150                 rte_errno = ENOMEM;
151                 return -ENOMEM;
152         }
153         cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
154                                                 (void *)(uintptr_t)cq->umem_buf,
155                                                 umem_size,
156                                                 IBV_ACCESS_LOCAL_WRITE);
157         if (!cq->umem_obj) {
158                 DRV_LOG(ERR, "Failed to register umem for CQ.");
159                 goto error;
160         }
161         attr.q_umem_valid = 1;
162         attr.db_umem_valid = 1;
163         attr.use_first_only = 1;
164         attr.overrun_ignore = 0;
165         attr.uar_page_id = priv->uar->page_id;
166         attr.q_umem_id = cq->umem_obj->umem_id;
167         attr.q_umem_offset = 0;
168         attr.db_umem_id = cq->umem_obj->umem_id;
169         attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
170         attr.eqn = priv->eqn;
171         attr.log_cq_size = log_desc_n;
172         attr.log_page_size = rte_log2_u32(pgsize);
173         cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
174         if (!cq->cq)
175                 goto error;
176         cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
177         cq->cq_ci = 0;
178         rte_spinlock_init(&cq->sl);
179         /* Subscribe CQ event to the event channel controlled by the driver. */
180         ret = mlx5_os_devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
181                                                    sizeof(event_nums),
182                                                    event_nums,
183                                                    (uint64_t)(uintptr_t)cq);
184         if (ret) {
185                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
186                 rte_errno = errno;
187                 goto error;
188         }
189         cq->callfd = callfd;
190         /* Init CQ to ones to be in HW owner in the start. */
191         cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
192         cq->cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
193         /* First arming. */
194         mlx5_vdpa_cq_arm(priv, cq);
195         return 0;
196 error:
197         mlx5_vdpa_cq_destroy(cq);
198         return -1;
199 }
200
201 static inline uint32_t
202 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
203 {
204         struct mlx5_vdpa_event_qp *eqp =
205                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
206         const unsigned int cq_size = 1 << cq->log_desc_n;
207         union {
208                 struct {
209                         uint16_t wqe_counter;
210                         uint8_t rsvd5;
211                         uint8_t op_own;
212                 };
213                 uint32_t word;
214         } last_word;
215         uint16_t next_wqe_counter = cq->cq_ci;
216         uint16_t cur_wqe_counter;
217         uint16_t comp;
218
219         last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
220         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
221         comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
222         if (comp) {
223                 cq->cq_ci += comp;
224                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
225                             MLX5_CQE_INVALID);
226                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
227                                MLX5_CQE_RESP_ERR ||
228                                MLX5_CQE_OPCODE(last_word.op_own) ==
229                                MLX5_CQE_REQ_ERR)))
230                         cq->errors++;
231                 rte_io_wmb();
232                 /* Ring CQ doorbell record. */
233                 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
234                 rte_io_wmb();
235                 /* Ring SW QP doorbell record. */
236                 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
237         }
238         return comp;
239 }
240
241 static void
242 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
243 {
244         struct mlx5_vdpa_cq *cq;
245         int i;
246
247         for (i = 0; i < priv->nr_virtqs; i++) {
248                 cq = &priv->virtqs[i].eqp.cq;
249                 if (cq->cq && !cq->armed)
250                         mlx5_vdpa_cq_arm(priv, cq);
251         }
252 }
253
254 static void
255 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
256 {
257         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
258                 switch (max) {
259                 case 0:
260                         priv->timer_delay_us += priv->event_us;
261                         break;
262                 case 1:
263                         break;
264                 default:
265                         priv->timer_delay_us /= max;
266                         break;
267                 }
268         }
269         usleep(priv->timer_delay_us);
270 }
271
272 static void *
273 mlx5_vdpa_poll_handle(void *arg)
274 {
275         struct mlx5_vdpa_priv *priv = arg;
276         int i;
277         struct mlx5_vdpa_cq *cq;
278         uint32_t max;
279         uint64_t current_tic;
280
281         pthread_mutex_lock(&priv->timer_lock);
282         while (!priv->timer_on)
283                 pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
284         pthread_mutex_unlock(&priv->timer_lock);
285         priv->timer_delay_us = priv->event_mode ==
286                                             MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
287                                               MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
288                                                                  priv->event_us;
289         while (1) {
290                 max = 0;
291                 pthread_mutex_lock(&priv->vq_config_lock);
292                 for (i = 0; i < priv->nr_virtqs; i++) {
293                         cq = &priv->virtqs[i].eqp.cq;
294                         if (cq->cq && !cq->armed) {
295                                 uint32_t comp = mlx5_vdpa_cq_poll(cq);
296
297                                 if (comp) {
298                                         /* Notify guest for descs consuming. */
299                                         if (cq->callfd != -1)
300                                                 eventfd_write(cq->callfd,
301                                                               (eventfd_t)1);
302                                         if (comp > max)
303                                                 max = comp;
304                                 }
305                         }
306                 }
307                 current_tic = rte_rdtsc();
308                 if (!max) {
309                         /* No traffic ? stop timer and load interrupts. */
310                         if (current_tic - priv->last_traffic_tic >=
311                             rte_get_timer_hz() * priv->no_traffic_time_s) {
312                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
313                                         priv->vdev->device->name);
314                                 mlx5_vdpa_arm_all_cqs(priv);
315                                 pthread_mutex_unlock(&priv->vq_config_lock);
316                                 pthread_mutex_lock(&priv->timer_lock);
317                                 priv->timer_on = 0;
318                                 while (!priv->timer_on)
319                                         pthread_cond_wait(&priv->timer_cond,
320                                                           &priv->timer_lock);
321                                 pthread_mutex_unlock(&priv->timer_lock);
322                                 priv->timer_delay_us = priv->event_mode ==
323                                             MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
324                                               MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
325                                                                  priv->event_us;
326                                 continue;
327                         }
328                 } else {
329                         priv->last_traffic_tic = current_tic;
330                 }
331                 pthread_mutex_unlock(&priv->vq_config_lock);
332                 mlx5_vdpa_timer_sleep(priv, max);
333         }
334         return NULL;
335 }
336
337 static void
338 mlx5_vdpa_interrupt_handler(void *cb_arg)
339 {
340         struct mlx5_vdpa_priv *priv = cb_arg;
341 #ifdef HAVE_IBV_DEVX_EVENT
342         union {
343                 struct mlx5dv_devx_async_event_hdr event_resp;
344                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
345         } out;
346
347         pthread_mutex_lock(&priv->vq_config_lock);
348         while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
349                                          sizeof(out.buf)) >=
350                                        (ssize_t)sizeof(out.event_resp.cookie)) {
351                 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
352                                                (uintptr_t)out.event_resp.cookie;
353                 struct mlx5_vdpa_event_qp *eqp = container_of(cq,
354                                                  struct mlx5_vdpa_event_qp, cq);
355                 struct mlx5_vdpa_virtq *virtq = container_of(eqp,
356                                                    struct mlx5_vdpa_virtq, eqp);
357
358                 if (!virtq->enable)
359                         continue;
360                 mlx5_vdpa_cq_poll(cq);
361                 /* Notify guest for descs consuming. */
362                 if (cq->callfd != -1)
363                         eventfd_write(cq->callfd, (eventfd_t)1);
364                 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
365                         mlx5_vdpa_cq_arm(priv, cq);
366                         pthread_mutex_unlock(&priv->vq_config_lock);
367                         return;
368                 }
369                 /* Don't arm again - timer will take control. */
370                 DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
371                         " Timer is %s, cq ci is %u.\n",
372                         priv->vdev->device->name,
373                         (int)virtq->index, cq->cq->id,
374                         priv->timer_on ? "on" : "off", cq->cq_ci);
375                 cq->armed = 0;
376         }
377 #endif
378
379         /* Traffic detected: make sure timer is on. */
380         priv->last_traffic_tic = rte_rdtsc();
381         pthread_mutex_lock(&priv->timer_lock);
382         if (!priv->timer_on) {
383                 priv->timer_on = 1;
384                 pthread_cond_signal(&priv->timer_cond);
385         }
386         pthread_mutex_unlock(&priv->timer_lock);
387         pthread_mutex_unlock(&priv->vq_config_lock);
388 }
389
390 static void
391 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
392 {
393 #ifdef HAVE_IBV_DEVX_EVENT
394         struct mlx5_vdpa_priv *priv = cb_arg;
395         union {
396                 struct mlx5dv_devx_async_event_hdr event_resp;
397                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
398         } out;
399         uint32_t vq_index, i, version;
400         struct mlx5_vdpa_virtq *virtq;
401         uint64_t sec;
402
403         pthread_mutex_lock(&priv->vq_config_lock);
404         while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
405                                          sizeof(out.buf)) >=
406                                        (ssize_t)sizeof(out.event_resp.cookie)) {
407                 vq_index = out.event_resp.cookie & UINT32_MAX;
408                 version = out.event_resp.cookie >> 32;
409                 if (vq_index >= priv->nr_virtqs) {
410                         DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
411                                 priv->vdev->device->name, vq_index);
412                         continue;
413                 }
414                 virtq = &priv->virtqs[vq_index];
415                 if (!virtq->enable || virtq->version != version)
416                         continue;
417                 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
418                         continue;
419                 virtq->stopped = true;
420                 /* Query error info. */
421                 if (mlx5_vdpa_virtq_query(priv, vq_index))
422                         goto log;
423                 /* Disable vq. */
424                 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
425                         DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
426                         goto log;
427                 }
428                 /* Retry if error happens less than N times in 3 seconds. */
429                 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
430                 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
431                         /* Retry. */
432                         if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
433                                 DRV_LOG(ERR, "Failed to enable virtq %d.",
434                                         vq_index);
435                         else
436                                 DRV_LOG(WARNING, "Recover virtq %d: %u.",
437                                         vq_index, ++virtq->n_retry);
438                 } else {
439                         /* Retry timeout, give up. */
440                         DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
441                                 priv->vdev->device->name, vq_index);
442                 }
443 log:
444                 /* Shift in current time to error time log end. */
445                 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
446                         virtq->err_time[i - 1] = virtq->err_time[i];
447                 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
448         }
449         pthread_mutex_unlock(&priv->vq_config_lock);
450 #endif
451 }
452
453 int
454 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
455 {
456         int ret;
457         int flags;
458
459         /* Setup device event channel. */
460         priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);
461         if (!priv->err_chnl) {
462                 rte_errno = errno;
463                 DRV_LOG(ERR, "Failed to create device event channel %d.",
464                         rte_errno);
465                 goto error;
466         }
467         flags = fcntl(priv->err_chnl->fd, F_GETFL);
468         ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
469         if (ret) {
470                 DRV_LOG(ERR, "Failed to change device event channel FD.");
471                 goto error;
472         }
473         priv->err_intr_handle.fd = priv->err_chnl->fd;
474         priv->err_intr_handle.type = RTE_INTR_HANDLE_EXT;
475         if (rte_intr_callback_register(&priv->err_intr_handle,
476                                        mlx5_vdpa_err_interrupt_handler,
477                                        priv)) {
478                 priv->err_intr_handle.fd = 0;
479                 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
480                         priv->vid);
481                 goto error;
482         } else {
483                 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
484                         priv->vid);
485         }
486         return 0;
487 error:
488         mlx5_vdpa_err_event_unset(priv);
489         return -1;
490 }
491
492 void
493 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
494 {
495         int retries = MLX5_VDPA_INTR_RETRIES;
496         int ret = -EAGAIN;
497
498         if (!priv->err_intr_handle.fd)
499                 return;
500         while (retries-- && ret == -EAGAIN) {
501                 ret = rte_intr_callback_unregister(&priv->err_intr_handle,
502                                             mlx5_vdpa_err_interrupt_handler,
503                                             priv);
504                 if (ret == -EAGAIN) {
505                         DRV_LOG(DEBUG, "Try again to unregister fd %d "
506                                 "of error interrupt, retries = %d.",
507                                 priv->err_intr_handle.fd, retries);
508                         rte_pause();
509                 }
510         }
511         memset(&priv->err_intr_handle, 0, sizeof(priv->err_intr_handle));
512         if (priv->err_chnl) {
513 #ifdef HAVE_IBV_DEVX_EVENT
514                 union {
515                         struct mlx5dv_devx_async_event_hdr event_resp;
516                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
517                                     128];
518                 } out;
519
520                 /* Clean all pending events. */
521                 while (mlx5_glue->devx_get_event(priv->err_chnl,
522                        &out.event_resp, sizeof(out.buf)) >=
523                        (ssize_t)sizeof(out.event_resp.cookie))
524                         ;
525 #endif
526                 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
527                 priv->err_chnl = NULL;
528         }
529 }
530
531 int
532 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
533 {
534         int ret;
535
536         if (!priv->eventc)
537                 /* All virtqs are in poll mode. */
538                 return 0;
539         if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
540                 pthread_mutex_init(&priv->timer_lock, NULL);
541                 pthread_cond_init(&priv->timer_cond, NULL);
542                 priv->timer_on = 0;
543                 ret = pthread_create(&priv->timer_tid, NULL,
544                                      mlx5_vdpa_poll_handle, (void *)priv);
545                 if (ret) {
546                         DRV_LOG(ERR, "Failed to create timer thread.");
547                         return -1;
548                 }
549         }
550         priv->intr_handle.fd = priv->eventc->fd;
551         priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
552         if (rte_intr_callback_register(&priv->intr_handle,
553                                        mlx5_vdpa_interrupt_handler, priv)) {
554                 priv->intr_handle.fd = 0;
555                 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
556                 goto error;
557         }
558         return 0;
559 error:
560         mlx5_vdpa_cqe_event_unset(priv);
561         return -1;
562 }
563
564 void
565 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
566 {
567         int retries = MLX5_VDPA_INTR_RETRIES;
568         int ret = -EAGAIN;
569         void *status;
570
571         if (priv->intr_handle.fd) {
572                 while (retries-- && ret == -EAGAIN) {
573                         ret = rte_intr_callback_unregister(&priv->intr_handle,
574                                                     mlx5_vdpa_interrupt_handler,
575                                                     priv);
576                         if (ret == -EAGAIN) {
577                                 DRV_LOG(DEBUG, "Try again to unregister fd %d "
578                                         "of CQ interrupt, retries = %d.",
579                                         priv->intr_handle.fd, retries);
580                                 rte_pause();
581                         }
582                 }
583                 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
584         }
585         if (priv->timer_tid) {
586                 pthread_cancel(priv->timer_tid);
587                 pthread_join(priv->timer_tid, &status);
588         }
589         priv->timer_tid = 0;
590 }
591
592 void
593 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
594 {
595         if (eqp->sw_qp)
596                 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
597         if (eqp->umem_obj)
598                 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
599         if (eqp->umem_buf)
600                 rte_free(eqp->umem_buf);
601         if (eqp->fw_qp)
602                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
603         mlx5_vdpa_cq_destroy(&eqp->cq);
604         memset(eqp, 0, sizeof(*eqp));
605 }
606
607 static int
608 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
609 {
610         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
611                                           eqp->sw_qp->id)) {
612                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
613                         rte_errno);
614                 return -1;
615         }
616         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
617                                           eqp->fw_qp->id)) {
618                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
619                         rte_errno);
620                 return -1;
621         }
622         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
623                                           eqp->sw_qp->id)) {
624                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
625                         rte_errno);
626                 return -1;
627         }
628         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
629                                           eqp->fw_qp->id)) {
630                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
631                         rte_errno);
632                 return -1;
633         }
634         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
635                                           eqp->sw_qp->id)) {
636                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
637                         rte_errno);
638                 return -1;
639         }
640         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
641                                           eqp->fw_qp->id)) {
642                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
643                         rte_errno);
644                 return -1;
645         }
646         return 0;
647 }
648
649 int
650 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
651                           int callfd, struct mlx5_vdpa_event_qp *eqp)
652 {
653         struct mlx5_devx_qp_attr attr = {0};
654         uint16_t log_desc_n = rte_log2_u32(desc_n);
655         uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
656                                                        sizeof(*eqp->db_rec) * 2;
657
658         if (mlx5_vdpa_event_qp_global_prepare(priv))
659                 return -1;
660         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
661                 return -1;
662         attr.pd = priv->pdn;
663         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
664         if (!eqp->fw_qp) {
665                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
666                 goto error;
667         }
668         eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
669         if (!eqp->umem_buf) {
670                 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
671                 rte_errno = ENOMEM;
672                 goto error;
673         }
674         eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
675                                                (void *)(uintptr_t)eqp->umem_buf,
676                                                umem_size,
677                                                IBV_ACCESS_LOCAL_WRITE);
678         if (!eqp->umem_obj) {
679                 DRV_LOG(ERR, "Failed to register umem for SW QP.");
680                 goto error;
681         }
682         attr.uar_index = priv->uar->page_id;
683         attr.cqn = eqp->cq.cq->id;
684         attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
685         attr.rq_size = 1 << log_desc_n;
686         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
687         attr.sq_size = 0; /* No need SQ. */
688         attr.dbr_umem_valid = 1;
689         attr.wq_umem_id = eqp->umem_obj->umem_id;
690         attr.wq_umem_offset = 0;
691         attr.dbr_umem_id = eqp->umem_obj->umem_id;
692         attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
693         eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
694         if (!eqp->sw_qp) {
695                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
696                 goto error;
697         }
698         eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
699         if (mlx5_vdpa_qps2rts(eqp))
700                 goto error;
701         /* First ringing. */
702         rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
703         return 0;
704 error:
705         mlx5_vdpa_event_qp_destroy(eqp);
706         return -1;
707 }