vdpa/mlx5: fix polling threads scheduling
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <fcntl.h>
7 #include <sys/eventfd.h>
8
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_atomic.h>
14 #include <rte_common.h>
15 #include <rte_io.h>
16 #include <rte_alarm.h>
17
18 #include <mlx5_common.h>
19 #include <mlx5_common_os.h>
20 #include <mlx5_common_devx.h>
21 #include <mlx5_glue.h>
22
23 #include "mlx5_vdpa_utils.h"
24 #include "mlx5_vdpa.h"
25
26
27 #define MLX5_VDPA_ERROR_TIME_SEC 3u
28
29 void
30 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
31 {
32         if (priv->uar) {
33                 mlx5_glue->devx_free_uar(priv->uar);
34                 priv->uar = NULL;
35         }
36 #ifdef HAVE_IBV_DEVX_EVENT
37         if (priv->eventc) {
38                 union {
39                         struct mlx5dv_devx_async_event_hdr event_resp;
40                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
41                                                                          + 128];
42                 } out;
43
44                 /* Clean all pending events. */
45                 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
46                        sizeof(out.buf)) >=
47                        (ssize_t)sizeof(out.event_resp.cookie))
48                         ;
49                 mlx5_os_devx_destroy_event_channel(priv->eventc);
50                 priv->eventc = NULL;
51         }
52 #endif
53 }
54
55 /* Prepare all the global resources for all the event objects.*/
56 static int
57 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
58 {
59         int flags, ret;
60
61         if (priv->eventc)
62                 return 0;
63         priv->eventc = mlx5_os_devx_create_event_channel(priv->ctx,
64                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
65         if (!priv->eventc) {
66                 rte_errno = errno;
67                 DRV_LOG(ERR, "Failed to create event channel %d.",
68                         rte_errno);
69                 goto error;
70         }
71         flags = fcntl(priv->eventc->fd, F_GETFL);
72         ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
73         if (ret) {
74                 DRV_LOG(ERR, "Failed to change event channel FD.");
75                 goto error;
76         }
77         /*
78          * This PMD always claims the write memory barrier on UAR
79          * registers writings, it is safe to allocate UAR with any
80          * memory mapping type.
81          */
82         priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
83         if (!priv->uar) {
84                 rte_errno = errno;
85                 DRV_LOG(ERR, "Failed to allocate UAR.");
86                 goto error;
87         }
88         return 0;
89 error:
90         mlx5_vdpa_event_qp_global_release(priv);
91         return -1;
92 }
93
94 static void
95 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
96 {
97         mlx5_devx_cq_destroy(&cq->cq_obj);
98         memset(cq, 0, sizeof(*cq));
99 }
100
101 static inline void __rte_unused
102 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
103 {
104         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
105         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
106         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
107         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
108         uint64_t db_be = rte_cpu_to_be_64(doorbell);
109         uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
110
111         rte_io_wmb();
112         cq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
113         rte_wmb();
114 #ifdef RTE_ARCH_64
115         *(uint64_t *)addr = db_be;
116 #else
117         *(uint32_t *)addr = db_be;
118         rte_io_wmb();
119         *((uint32_t *)addr + 1) = db_be >> 32;
120 #endif
121         cq->arm_sn++;
122         cq->armed = 1;
123 }
124
125 static int
126 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
127                     int callfd, struct mlx5_vdpa_cq *cq)
128 {
129         struct mlx5_devx_cq_attr attr = {
130                 .use_first_only = 1,
131                 .uar_page_id = priv->uar->page_id,
132         };
133         uint16_t event_nums[1] = {0};
134         int ret;
135
136         ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, log_desc_n, &attr,
137                                   SOCKET_ID_ANY);
138         if (ret)
139                 goto error;
140         cq->cq_ci = 0;
141         cq->log_desc_n = log_desc_n;
142         rte_spinlock_init(&cq->sl);
143         /* Subscribe CQ event to the event channel controlled by the driver. */
144         ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
145                                                 cq->cq_obj.cq->obj,
146                                                 sizeof(event_nums), event_nums,
147                                                 (uint64_t)(uintptr_t)cq);
148         if (ret) {
149                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
150                 rte_errno = errno;
151                 goto error;
152         }
153         cq->callfd = callfd;
154         /* Init CQ to ones to be in HW owner in the start. */
155         cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
156         cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
157         /* First arming. */
158         mlx5_vdpa_cq_arm(priv, cq);
159         return 0;
160 error:
161         mlx5_vdpa_cq_destroy(cq);
162         return -1;
163 }
164
165 static inline uint32_t
166 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
167 {
168         struct mlx5_vdpa_event_qp *eqp =
169                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
170         const unsigned int cq_size = 1 << cq->log_desc_n;
171         union {
172                 struct {
173                         uint16_t wqe_counter;
174                         uint8_t rsvd5;
175                         uint8_t op_own;
176                 };
177                 uint32_t word;
178         } last_word;
179         uint16_t next_wqe_counter = cq->cq_ci;
180         uint16_t cur_wqe_counter;
181         uint16_t comp;
182
183         last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
184         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
185         comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
186         if (comp) {
187                 cq->cq_ci += comp;
188                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
189                             MLX5_CQE_INVALID);
190                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
191                                MLX5_CQE_RESP_ERR ||
192                                MLX5_CQE_OPCODE(last_word.op_own) ==
193                                MLX5_CQE_REQ_ERR)))
194                         cq->errors++;
195                 rte_io_wmb();
196                 /* Ring CQ doorbell record. */
197                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
198                 rte_io_wmb();
199                 /* Ring SW QP doorbell record. */
200                 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
201         }
202         return comp;
203 }
204
205 static void
206 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
207 {
208         struct mlx5_vdpa_cq *cq;
209         int i;
210
211         for (i = 0; i < priv->nr_virtqs; i++) {
212                 cq = &priv->virtqs[i].eqp.cq;
213                 if (cq->cq_obj.cq && !cq->armed)
214                         mlx5_vdpa_cq_arm(priv, cq);
215         }
216 }
217
218 static void
219 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
220 {
221         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
222                 switch (max) {
223                 case 0:
224                         priv->timer_delay_us += priv->event_us;
225                         break;
226                 case 1:
227                         break;
228                 default:
229                         priv->timer_delay_us /= max;
230                         break;
231                 }
232         }
233         if (priv->timer_delay_us)
234                 usleep(priv->timer_delay_us);
235         else
236                 /* Give-up CPU to improve polling threads scheduling. */
237                 pthread_yield();
238 }
239
240 static void *
241 mlx5_vdpa_poll_handle(void *arg)
242 {
243         struct mlx5_vdpa_priv *priv = arg;
244         int i;
245         struct mlx5_vdpa_cq *cq;
246         uint32_t max;
247         uint64_t current_tic;
248
249         pthread_mutex_lock(&priv->timer_lock);
250         while (!priv->timer_on)
251                 pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
252         pthread_mutex_unlock(&priv->timer_lock);
253         priv->timer_delay_us = priv->event_mode ==
254                                             MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
255                                               MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
256                                                                  priv->event_us;
257         while (1) {
258                 max = 0;
259                 pthread_mutex_lock(&priv->vq_config_lock);
260                 for (i = 0; i < priv->nr_virtqs; i++) {
261                         cq = &priv->virtqs[i].eqp.cq;
262                         if (cq->cq_obj.cq && !cq->armed) {
263                                 uint32_t comp = mlx5_vdpa_cq_poll(cq);
264
265                                 if (comp) {
266                                         /* Notify guest for descs consuming. */
267                                         if (cq->callfd != -1)
268                                                 eventfd_write(cq->callfd,
269                                                               (eventfd_t)1);
270                                         if (comp > max)
271                                                 max = comp;
272                                 }
273                         }
274                 }
275                 current_tic = rte_rdtsc();
276                 if (!max) {
277                         /* No traffic ? stop timer and load interrupts. */
278                         if (current_tic - priv->last_traffic_tic >=
279                             rte_get_timer_hz() * priv->no_traffic_time_s) {
280                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
281                                         priv->vdev->device->name);
282                                 mlx5_vdpa_arm_all_cqs(priv);
283                                 pthread_mutex_unlock(&priv->vq_config_lock);
284                                 pthread_mutex_lock(&priv->timer_lock);
285                                 priv->timer_on = 0;
286                                 while (!priv->timer_on)
287                                         pthread_cond_wait(&priv->timer_cond,
288                                                           &priv->timer_lock);
289                                 pthread_mutex_unlock(&priv->timer_lock);
290                                 priv->timer_delay_us = priv->event_mode ==
291                                             MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
292                                               MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
293                                                                  priv->event_us;
294                                 continue;
295                         }
296                 } else {
297                         priv->last_traffic_tic = current_tic;
298                 }
299                 pthread_mutex_unlock(&priv->vq_config_lock);
300                 mlx5_vdpa_timer_sleep(priv, max);
301         }
302         return NULL;
303 }
304
305 static void
306 mlx5_vdpa_interrupt_handler(void *cb_arg)
307 {
308         struct mlx5_vdpa_priv *priv = cb_arg;
309 #ifdef HAVE_IBV_DEVX_EVENT
310         union {
311                 struct mlx5dv_devx_async_event_hdr event_resp;
312                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
313         } out;
314
315         pthread_mutex_lock(&priv->vq_config_lock);
316         while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
317                                          sizeof(out.buf)) >=
318                                        (ssize_t)sizeof(out.event_resp.cookie)) {
319                 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
320                                                (uintptr_t)out.event_resp.cookie;
321                 struct mlx5_vdpa_event_qp *eqp = container_of(cq,
322                                                  struct mlx5_vdpa_event_qp, cq);
323                 struct mlx5_vdpa_virtq *virtq = container_of(eqp,
324                                                    struct mlx5_vdpa_virtq, eqp);
325
326                 if (!virtq->enable)
327                         continue;
328                 mlx5_vdpa_cq_poll(cq);
329                 /* Notify guest for descs consuming. */
330                 if (cq->callfd != -1)
331                         eventfd_write(cq->callfd, (eventfd_t)1);
332                 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
333                         mlx5_vdpa_cq_arm(priv, cq);
334                         pthread_mutex_unlock(&priv->vq_config_lock);
335                         return;
336                 }
337                 /* Don't arm again - timer will take control. */
338                 DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
339                         " Timer is %s, cq ci is %u.\n",
340                         priv->vdev->device->name,
341                         (int)virtq->index, cq->cq_obj.cq->id,
342                         priv->timer_on ? "on" : "off", cq->cq_ci);
343                 cq->armed = 0;
344         }
345 #endif
346
347         /* Traffic detected: make sure timer is on. */
348         priv->last_traffic_tic = rte_rdtsc();
349         pthread_mutex_lock(&priv->timer_lock);
350         if (!priv->timer_on) {
351                 priv->timer_on = 1;
352                 pthread_cond_signal(&priv->timer_cond);
353         }
354         pthread_mutex_unlock(&priv->timer_lock);
355         pthread_mutex_unlock(&priv->vq_config_lock);
356 }
357
358 static void
359 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
360 {
361 #ifdef HAVE_IBV_DEVX_EVENT
362         struct mlx5_vdpa_priv *priv = cb_arg;
363         union {
364                 struct mlx5dv_devx_async_event_hdr event_resp;
365                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
366         } out;
367         uint32_t vq_index, i, version;
368         struct mlx5_vdpa_virtq *virtq;
369         uint64_t sec;
370
371         pthread_mutex_lock(&priv->vq_config_lock);
372         while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
373                                          sizeof(out.buf)) >=
374                                        (ssize_t)sizeof(out.event_resp.cookie)) {
375                 vq_index = out.event_resp.cookie & UINT32_MAX;
376                 version = out.event_resp.cookie >> 32;
377                 if (vq_index >= priv->nr_virtqs) {
378                         DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
379                                 priv->vdev->device->name, vq_index);
380                         continue;
381                 }
382                 virtq = &priv->virtqs[vq_index];
383                 if (!virtq->enable || virtq->version != version)
384                         continue;
385                 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
386                         continue;
387                 virtq->stopped = true;
388                 /* Query error info. */
389                 if (mlx5_vdpa_virtq_query(priv, vq_index))
390                         goto log;
391                 /* Disable vq. */
392                 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
393                         DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
394                         goto log;
395                 }
396                 /* Retry if error happens less than N times in 3 seconds. */
397                 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
398                 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
399                         /* Retry. */
400                         if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
401                                 DRV_LOG(ERR, "Failed to enable virtq %d.",
402                                         vq_index);
403                         else
404                                 DRV_LOG(WARNING, "Recover virtq %d: %u.",
405                                         vq_index, ++virtq->n_retry);
406                 } else {
407                         /* Retry timeout, give up. */
408                         DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
409                                 priv->vdev->device->name, vq_index);
410                 }
411 log:
412                 /* Shift in current time to error time log end. */
413                 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
414                         virtq->err_time[i - 1] = virtq->err_time[i];
415                 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
416         }
417         pthread_mutex_unlock(&priv->vq_config_lock);
418 #endif
419 }
420
421 int
422 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
423 {
424         int ret;
425         int flags;
426
427         /* Setup device event channel. */
428         priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);
429         if (!priv->err_chnl) {
430                 rte_errno = errno;
431                 DRV_LOG(ERR, "Failed to create device event channel %d.",
432                         rte_errno);
433                 goto error;
434         }
435         flags = fcntl(priv->err_chnl->fd, F_GETFL);
436         ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
437         if (ret) {
438                 DRV_LOG(ERR, "Failed to change device event channel FD.");
439                 goto error;
440         }
441         priv->err_intr_handle.fd = priv->err_chnl->fd;
442         priv->err_intr_handle.type = RTE_INTR_HANDLE_EXT;
443         if (rte_intr_callback_register(&priv->err_intr_handle,
444                                        mlx5_vdpa_err_interrupt_handler,
445                                        priv)) {
446                 priv->err_intr_handle.fd = 0;
447                 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
448                         priv->vid);
449                 goto error;
450         } else {
451                 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
452                         priv->vid);
453         }
454         return 0;
455 error:
456         mlx5_vdpa_err_event_unset(priv);
457         return -1;
458 }
459
460 void
461 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
462 {
463         int retries = MLX5_VDPA_INTR_RETRIES;
464         int ret = -EAGAIN;
465
466         if (!priv->err_intr_handle.fd)
467                 return;
468         while (retries-- && ret == -EAGAIN) {
469                 ret = rte_intr_callback_unregister(&priv->err_intr_handle,
470                                             mlx5_vdpa_err_interrupt_handler,
471                                             priv);
472                 if (ret == -EAGAIN) {
473                         DRV_LOG(DEBUG, "Try again to unregister fd %d "
474                                 "of error interrupt, retries = %d.",
475                                 priv->err_intr_handle.fd, retries);
476                         rte_pause();
477                 }
478         }
479         memset(&priv->err_intr_handle, 0, sizeof(priv->err_intr_handle));
480         if (priv->err_chnl) {
481 #ifdef HAVE_IBV_DEVX_EVENT
482                 union {
483                         struct mlx5dv_devx_async_event_hdr event_resp;
484                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
485                                     128];
486                 } out;
487
488                 /* Clean all pending events. */
489                 while (mlx5_glue->devx_get_event(priv->err_chnl,
490                        &out.event_resp, sizeof(out.buf)) >=
491                        (ssize_t)sizeof(out.event_resp.cookie))
492                         ;
493 #endif
494                 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
495                 priv->err_chnl = NULL;
496         }
497 }
498
499 int
500 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
501 {
502         int ret;
503         rte_cpuset_t cpuset;
504         pthread_attr_t attr;
505         char name[16];
506         const struct sched_param sp = {
507                 .sched_priority = sched_get_priority_max(SCHED_RR),
508         };
509
510         if (!priv->eventc)
511                 /* All virtqs are in poll mode. */
512                 return 0;
513         if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
514                 pthread_mutex_init(&priv->timer_lock, NULL);
515                 pthread_cond_init(&priv->timer_cond, NULL);
516                 priv->timer_on = 0;
517                 pthread_attr_init(&attr);
518                 CPU_ZERO(&cpuset);
519                 if (priv->event_core != -1)
520                         CPU_SET(priv->event_core, &cpuset);
521                 else
522                         cpuset = rte_lcore_cpuset(rte_get_main_lcore());
523                 ret = pthread_attr_setaffinity_np(&attr, sizeof(cpuset),
524                                                   &cpuset);
525                 if (ret) {
526                         DRV_LOG(ERR, "Failed to set thread affinity.");
527                         return -1;
528                 }
529                 ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
530                 if (ret) {
531                         DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
532                         return -1;
533                 }
534                 ret = pthread_attr_setschedparam(&attr, &sp);
535                 if (ret) {
536                         DRV_LOG(ERR, "Failed to set thread priority.");
537                         return -1;
538                 }
539                 ret = pthread_create(&priv->timer_tid, &attr,
540                                      mlx5_vdpa_poll_handle, (void *)priv);
541                 if (ret) {
542                         DRV_LOG(ERR, "Failed to create timer thread.");
543                         return -1;
544                 }
545                 snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
546                 ret = pthread_setname_np(priv->timer_tid, name);
547                 if (ret) {
548                         DRV_LOG(ERR, "Failed to set timer thread name.");
549                         return -1;
550                 }
551         }
552         priv->intr_handle.fd = priv->eventc->fd;
553         priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
554         if (rte_intr_callback_register(&priv->intr_handle,
555                                        mlx5_vdpa_interrupt_handler, priv)) {
556                 priv->intr_handle.fd = 0;
557                 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
558                 goto error;
559         }
560         return 0;
561 error:
562         mlx5_vdpa_cqe_event_unset(priv);
563         return -1;
564 }
565
566 void
567 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
568 {
569         int retries = MLX5_VDPA_INTR_RETRIES;
570         int ret = -EAGAIN;
571         void *status;
572
573         if (priv->intr_handle.fd) {
574                 while (retries-- && ret == -EAGAIN) {
575                         ret = rte_intr_callback_unregister(&priv->intr_handle,
576                                                     mlx5_vdpa_interrupt_handler,
577                                                     priv);
578                         if (ret == -EAGAIN) {
579                                 DRV_LOG(DEBUG, "Try again to unregister fd %d "
580                                         "of CQ interrupt, retries = %d.",
581                                         priv->intr_handle.fd, retries);
582                                 rte_pause();
583                         }
584                 }
585                 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
586         }
587         if (priv->timer_tid) {
588                 pthread_cancel(priv->timer_tid);
589                 pthread_join(priv->timer_tid, &status);
590         }
591         priv->timer_tid = 0;
592 }
593
594 void
595 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
596 {
597         if (eqp->sw_qp)
598                 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
599         if (eqp->umem_obj)
600                 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
601         if (eqp->umem_buf)
602                 rte_free(eqp->umem_buf);
603         if (eqp->fw_qp)
604                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
605         mlx5_vdpa_cq_destroy(&eqp->cq);
606         memset(eqp, 0, sizeof(*eqp));
607 }
608
609 static int
610 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
611 {
612         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
613                                           eqp->sw_qp->id)) {
614                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
615                         rte_errno);
616                 return -1;
617         }
618         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
619                                           eqp->fw_qp->id)) {
620                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
621                         rte_errno);
622                 return -1;
623         }
624         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
625                                           eqp->sw_qp->id)) {
626                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
627                         rte_errno);
628                 return -1;
629         }
630         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
631                                           eqp->fw_qp->id)) {
632                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
633                         rte_errno);
634                 return -1;
635         }
636         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
637                                           eqp->sw_qp->id)) {
638                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
639                         rte_errno);
640                 return -1;
641         }
642         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
643                                           eqp->fw_qp->id)) {
644                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
645                         rte_errno);
646                 return -1;
647         }
648         return 0;
649 }
650
651 int
652 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
653                           int callfd, struct mlx5_vdpa_event_qp *eqp)
654 {
655         struct mlx5_devx_qp_attr attr = {0};
656         uint16_t log_desc_n = rte_log2_u32(desc_n);
657         uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
658                                                        sizeof(*eqp->db_rec) * 2;
659
660         if (mlx5_vdpa_event_qp_global_prepare(priv))
661                 return -1;
662         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
663                 return -1;
664         attr.pd = priv->pdn;
665         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
666         if (!eqp->fw_qp) {
667                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
668                 goto error;
669         }
670         eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
671         if (!eqp->umem_buf) {
672                 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
673                 rte_errno = ENOMEM;
674                 goto error;
675         }
676         eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
677                                                (void *)(uintptr_t)eqp->umem_buf,
678                                                umem_size,
679                                                IBV_ACCESS_LOCAL_WRITE);
680         if (!eqp->umem_obj) {
681                 DRV_LOG(ERR, "Failed to register umem for SW QP.");
682                 goto error;
683         }
684         attr.uar_index = priv->uar->page_id;
685         attr.cqn = eqp->cq.cq_obj.cq->id;
686         attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
687         attr.rq_size = 1 << log_desc_n;
688         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
689         attr.sq_size = 0; /* No need SQ. */
690         attr.dbr_umem_valid = 1;
691         attr.wq_umem_id = eqp->umem_obj->umem_id;
692         attr.wq_umem_offset = 0;
693         attr.dbr_umem_id = eqp->umem_obj->umem_id;
694         attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
695         eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
696         if (!eqp->sw_qp) {
697                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
698                 goto error;
699         }
700         eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
701         if (mlx5_vdpa_qps2rts(eqp))
702                 goto error;
703         /* First ringing. */
704         rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
705         return 0;
706 error:
707         mlx5_vdpa_event_qp_destroy(eqp);
708         return -1;
709 }