010543cb6a247f1f956fc46ae6574975a85878a8
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <fcntl.h>
7 #include <sys/eventfd.h>
8
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_atomic.h>
13 #include <rte_common.h>
14 #include <rte_io.h>
15 #include <rte_alarm.h>
16
17 #include <mlx5_common.h>
18 #include <mlx5_glue.h>
19
20 #include "mlx5_vdpa_utils.h"
21 #include "mlx5_vdpa.h"
22
23
24 #define MLX5_VDPA_ERROR_TIME_SEC 3u
25
26 void
27 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
28 {
29         if (priv->uar) {
30                 mlx5_glue->devx_free_uar(priv->uar);
31                 priv->uar = NULL;
32         }
33 #ifdef HAVE_IBV_DEVX_EVENT
34         if (priv->eventc) {
35                 union {
36                         struct mlx5dv_devx_async_event_hdr event_resp;
37                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
38                                                                          + 128];
39                 } out;
40
41                 /* Clean all pending events. */
42                 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
43                        sizeof(out.buf)) >=
44                        (ssize_t)sizeof(out.event_resp.cookie))
45                         ;
46                 mlx5_glue->devx_destroy_event_channel(priv->eventc);
47                 priv->eventc = NULL;
48         }
49 #endif
50         priv->eqn = 0;
51 }
52
53 /* Prepare all the global resources for all the event objects.*/
54 static int
55 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
56 {
57         int flags, ret;
58
59         if (priv->eventc)
60                 return 0;
61         if (mlx5_glue->devx_query_eqn(priv->ctx, 0, &priv->eqn)) {
62                 rte_errno = errno;
63                 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
64                 return -1;
65         }
66         priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
67                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
68         if (!priv->eventc) {
69                 rte_errno = errno;
70                 DRV_LOG(ERR, "Failed to create event channel %d.",
71                         rte_errno);
72                 goto error;
73         }
74         flags = fcntl(priv->eventc->fd, F_GETFL);
75         ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
76         if (ret) {
77                 DRV_LOG(ERR, "Failed to change event channel FD.");
78                 goto error;
79         }
80         priv->uar = mlx5_glue->devx_alloc_uar(priv->ctx, 0);
81         if (!priv->uar) {
82                 rte_errno = errno;
83                 DRV_LOG(ERR, "Failed to allocate UAR.");
84                 goto error;
85         }
86         return 0;
87 error:
88         mlx5_vdpa_event_qp_global_release(priv);
89         return -1;
90 }
91
92 static void
93 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
94 {
95         if (cq->cq)
96                 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
97         if (cq->umem_obj)
98                 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
99         if (cq->umem_buf)
100                 rte_free((void *)(uintptr_t)cq->umem_buf);
101         memset(cq, 0, sizeof(*cq));
102 }
103
104 static inline void __rte_unused
105 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
106 {
107         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
108         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
109         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
110         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
111         uint64_t db_be = rte_cpu_to_be_64(doorbell);
112         uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
113
114         rte_io_wmb();
115         cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
116         rte_wmb();
117 #ifdef RTE_ARCH_64
118         *(uint64_t *)addr = db_be;
119 #else
120         *(uint32_t *)addr = db_be;
121         rte_io_wmb();
122         *((uint32_t *)addr + 1) = db_be >> 32;
123 #endif
124         cq->arm_sn++;
125         cq->armed = 1;
126 }
127
128 static int
129 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
130                     int callfd, struct mlx5_vdpa_cq *cq)
131 {
132         struct mlx5_devx_cq_attr attr = {0};
133         size_t pgsize = sysconf(_SC_PAGESIZE);
134         uint32_t umem_size;
135         uint16_t event_nums[1] = {0};
136         uint16_t cq_size = 1 << log_desc_n;
137         int ret;
138
139         cq->log_desc_n = log_desc_n;
140         umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
141         cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
142         if (!cq->umem_buf) {
143                 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
144                 rte_errno = ENOMEM;
145                 return -ENOMEM;
146         }
147         cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
148                                                 (void *)(uintptr_t)cq->umem_buf,
149                                                 umem_size,
150                                                 IBV_ACCESS_LOCAL_WRITE);
151         if (!cq->umem_obj) {
152                 DRV_LOG(ERR, "Failed to register umem for CQ.");
153                 goto error;
154         }
155         attr.q_umem_valid = 1;
156         attr.db_umem_valid = 1;
157         attr.use_first_only = 1;
158         attr.overrun_ignore = 0;
159         attr.uar_page_id = priv->uar->page_id;
160         attr.q_umem_id = cq->umem_obj->umem_id;
161         attr.q_umem_offset = 0;
162         attr.db_umem_id = cq->umem_obj->umem_id;
163         attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
164         attr.eqn = priv->eqn;
165         attr.log_cq_size = log_desc_n;
166         attr.log_page_size = rte_log2_u32(pgsize);
167         cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
168         if (!cq->cq)
169                 goto error;
170         cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
171         cq->cq_ci = 0;
172         rte_spinlock_init(&cq->sl);
173         /* Subscribe CQ event to the event channel controlled by the driver. */
174         ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
175                                                    sizeof(event_nums),
176                                                    event_nums,
177                                                    (uint64_t)(uintptr_t)cq);
178         if (ret) {
179                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
180                 rte_errno = errno;
181                 goto error;
182         }
183         cq->callfd = callfd;
184         /* Init CQ to ones to be in HW owner in the start. */
185         cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
186         cq->cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
187         /* First arming. */
188         mlx5_vdpa_cq_arm(priv, cq);
189         return 0;
190 error:
191         mlx5_vdpa_cq_destroy(cq);
192         return -1;
193 }
194
195 static inline uint32_t
196 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
197 {
198         struct mlx5_vdpa_event_qp *eqp =
199                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
200         const unsigned int cq_size = 1 << cq->log_desc_n;
201         union {
202                 struct {
203                         uint16_t wqe_counter;
204                         uint8_t rsvd5;
205                         uint8_t op_own;
206                 };
207                 uint32_t word;
208         } last_word;
209         uint16_t next_wqe_counter = cq->cq_ci;
210         uint16_t cur_wqe_counter;
211         uint16_t comp;
212
213         last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
214         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
215         comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
216         if (comp) {
217                 cq->cq_ci += comp;
218                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
219                             MLX5_CQE_INVALID);
220                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
221                                MLX5_CQE_RESP_ERR ||
222                                MLX5_CQE_OPCODE(last_word.op_own) ==
223                                MLX5_CQE_REQ_ERR)))
224                         cq->errors++;
225                 rte_io_wmb();
226                 /* Ring CQ doorbell record. */
227                 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
228                 rte_io_wmb();
229                 /* Ring SW QP doorbell record. */
230                 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
231         }
232         return comp;
233 }
234
235 static void
236 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
237 {
238         struct mlx5_vdpa_cq *cq;
239         int i;
240
241         for (i = 0; i < priv->nr_virtqs; i++) {
242                 cq = &priv->virtqs[i].eqp.cq;
243                 if (cq->cq && !cq->armed)
244                         mlx5_vdpa_cq_arm(priv, cq);
245         }
246 }
247
248 static void
249 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
250 {
251         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
252                 switch (max) {
253                 case 0:
254                         priv->timer_delay_us += priv->event_us;
255                         break;
256                 case 1:
257                         break;
258                 default:
259                         priv->timer_delay_us /= max;
260                         break;
261                 }
262         }
263         usleep(priv->timer_delay_us);
264 }
265
266 static void *
267 mlx5_vdpa_poll_handle(void *arg)
268 {
269         struct mlx5_vdpa_priv *priv = arg;
270         int i;
271         struct mlx5_vdpa_cq *cq;
272         uint32_t max;
273         uint64_t current_tic;
274
275         pthread_mutex_lock(&priv->timer_lock);
276         while (!priv->timer_on)
277                 pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
278         pthread_mutex_unlock(&priv->timer_lock);
279         priv->timer_delay_us = priv->event_mode ==
280                                             MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
281                                               MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
282                                                                  priv->event_us;
283         while (1) {
284                 max = 0;
285                 pthread_mutex_lock(&priv->vq_config_lock);
286                 for (i = 0; i < priv->nr_virtqs; i++) {
287                         cq = &priv->virtqs[i].eqp.cq;
288                         if (cq->cq && !cq->armed) {
289                                 uint32_t comp = mlx5_vdpa_cq_poll(cq);
290
291                                 if (comp) {
292                                         /* Notify guest for descs consuming. */
293                                         if (cq->callfd != -1)
294                                                 eventfd_write(cq->callfd,
295                                                               (eventfd_t)1);
296                                         if (comp > max)
297                                                 max = comp;
298                                 }
299                         }
300                 }
301                 current_tic = rte_rdtsc();
302                 if (!max) {
303                         /* No traffic ? stop timer and load interrupts. */
304                         if (current_tic - priv->last_traffic_tic >=
305                             rte_get_timer_hz() * priv->no_traffic_time_s) {
306                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
307                                         priv->vdev->device->name);
308                                 mlx5_vdpa_arm_all_cqs(priv);
309                                 pthread_mutex_unlock(&priv->vq_config_lock);
310                                 pthread_mutex_lock(&priv->timer_lock);
311                                 priv->timer_on = 0;
312                                 while (!priv->timer_on)
313                                         pthread_cond_wait(&priv->timer_cond,
314                                                           &priv->timer_lock);
315                                 pthread_mutex_unlock(&priv->timer_lock);
316                                 priv->timer_delay_us = priv->event_mode ==
317                                             MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
318                                               MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
319                                                                  priv->event_us;
320                                 continue;
321                         }
322                 } else {
323                         priv->last_traffic_tic = current_tic;
324                 }
325                 pthread_mutex_unlock(&priv->vq_config_lock);
326                 mlx5_vdpa_timer_sleep(priv, max);
327         }
328         return NULL;
329 }
330
331 static void
332 mlx5_vdpa_interrupt_handler(void *cb_arg)
333 {
334         struct mlx5_vdpa_priv *priv = cb_arg;
335 #ifdef HAVE_IBV_DEVX_EVENT
336         union {
337                 struct mlx5dv_devx_async_event_hdr event_resp;
338                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
339         } out;
340
341         pthread_mutex_lock(&priv->vq_config_lock);
342         while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
343                                          sizeof(out.buf)) >=
344                                        (ssize_t)sizeof(out.event_resp.cookie)) {
345                 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
346                                                (uintptr_t)out.event_resp.cookie;
347                 struct mlx5_vdpa_event_qp *eqp = container_of(cq,
348                                                  struct mlx5_vdpa_event_qp, cq);
349                 struct mlx5_vdpa_virtq *virtq = container_of(eqp,
350                                                    struct mlx5_vdpa_virtq, eqp);
351
352                 if (!virtq->enable)
353                         continue;
354                 mlx5_vdpa_cq_poll(cq);
355                 /* Notify guest for descs consuming. */
356                 if (cq->callfd != -1)
357                         eventfd_write(cq->callfd, (eventfd_t)1);
358                 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
359                         mlx5_vdpa_cq_arm(priv, cq);
360                         pthread_mutex_unlock(&priv->vq_config_lock);
361                         return;
362                 }
363                 /* Don't arm again - timer will take control. */
364                 DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
365                         " Timer is %s, cq ci is %u.\n",
366                         priv->vdev->device->name,
367                         (int)virtq->index, cq->cq->id,
368                         priv->timer_on ? "on" : "off", cq->cq_ci);
369                 cq->armed = 0;
370         }
371 #endif
372
373         /* Traffic detected: make sure timer is on. */
374         priv->last_traffic_tic = rte_rdtsc();
375         pthread_mutex_lock(&priv->timer_lock);
376         if (!priv->timer_on) {
377                 priv->timer_on = 1;
378                 pthread_cond_signal(&priv->timer_cond);
379         }
380         pthread_mutex_unlock(&priv->timer_lock);
381         pthread_mutex_unlock(&priv->vq_config_lock);
382 }
383
384 static void
385 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
386 {
387 #ifdef HAVE_IBV_DEVX_EVENT
388         struct mlx5_vdpa_priv *priv = cb_arg;
389         union {
390                 struct mlx5dv_devx_async_event_hdr event_resp;
391                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
392         } out;
393         uint32_t vq_index, i, version;
394         struct mlx5_vdpa_virtq *virtq;
395         uint64_t sec;
396
397         pthread_mutex_lock(&priv->vq_config_lock);
398         while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
399                                          sizeof(out.buf)) >=
400                                        (ssize_t)sizeof(out.event_resp.cookie)) {
401                 vq_index = out.event_resp.cookie & UINT32_MAX;
402                 version = out.event_resp.cookie >> 32;
403                 if (vq_index >= priv->nr_virtqs) {
404                         DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
405                                 priv->vdev->device->name, vq_index);
406                         continue;
407                 }
408                 virtq = &priv->virtqs[vq_index];
409                 if (!virtq->enable || virtq->version != version)
410                         continue;
411                 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
412                         continue;
413                 virtq->stopped = true;
414                 /* Query error info. */
415                 if (mlx5_vdpa_virtq_query(priv, vq_index))
416                         goto log;
417                 /* Disable vq. */
418                 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
419                         DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
420                         goto log;
421                 }
422                 /* Retry if error happens less than N times in 3 seconds. */
423                 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
424                 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
425                         /* Retry. */
426                         if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
427                                 DRV_LOG(ERR, "Failed to enable virtq %d.",
428                                         vq_index);
429                         else
430                                 DRV_LOG(WARNING, "Recover virtq %d: %u.",
431                                         vq_index, ++virtq->n_retry);
432                 } else {
433                         /* Retry timeout, give up. */
434                         DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
435                                 priv->vdev->device->name, vq_index);
436                 }
437 log:
438                 /* Shift in current time to error time log end. */
439                 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
440                         virtq->err_time[i - 1] = virtq->err_time[i];
441                 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
442         }
443         pthread_mutex_unlock(&priv->vq_config_lock);
444 #endif
445 }
446
447 int
448 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
449 {
450         int ret;
451         int flags;
452
453         /* Setup device event channel. */
454         priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);
455         if (!priv->err_chnl) {
456                 rte_errno = errno;
457                 DRV_LOG(ERR, "Failed to create device event channel %d.",
458                         rte_errno);
459                 goto error;
460         }
461         flags = fcntl(priv->err_chnl->fd, F_GETFL);
462         ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
463         if (ret) {
464                 DRV_LOG(ERR, "Failed to change device event channel FD.");
465                 goto error;
466         }
467         priv->err_intr_handle.fd = priv->err_chnl->fd;
468         priv->err_intr_handle.type = RTE_INTR_HANDLE_EXT;
469         if (rte_intr_callback_register(&priv->err_intr_handle,
470                                        mlx5_vdpa_err_interrupt_handler,
471                                        priv)) {
472                 priv->err_intr_handle.fd = 0;
473                 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
474                         priv->vid);
475                 goto error;
476         } else {
477                 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
478                         priv->vid);
479         }
480         return 0;
481 error:
482         mlx5_vdpa_err_event_unset(priv);
483         return -1;
484 }
485
486 void
487 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
488 {
489         int retries = MLX5_VDPA_INTR_RETRIES;
490         int ret = -EAGAIN;
491
492         if (!priv->err_intr_handle.fd)
493                 return;
494         while (retries-- && ret == -EAGAIN) {
495                 ret = rte_intr_callback_unregister(&priv->err_intr_handle,
496                                             mlx5_vdpa_err_interrupt_handler,
497                                             priv);
498                 if (ret == -EAGAIN) {
499                         DRV_LOG(DEBUG, "Try again to unregister fd %d "
500                                 "of error interrupt, retries = %d.",
501                                 priv->err_intr_handle.fd, retries);
502                         rte_pause();
503                 }
504         }
505         memset(&priv->err_intr_handle, 0, sizeof(priv->err_intr_handle));
506         if (priv->err_chnl) {
507 #ifdef HAVE_IBV_DEVX_EVENT
508                 union {
509                         struct mlx5dv_devx_async_event_hdr event_resp;
510                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
511                                     128];
512                 } out;
513
514                 /* Clean all pending events. */
515                 while (mlx5_glue->devx_get_event(priv->err_chnl,
516                        &out.event_resp, sizeof(out.buf)) >=
517                        (ssize_t)sizeof(out.event_resp.cookie))
518                         ;
519 #endif
520                 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
521                 priv->err_chnl = NULL;
522         }
523 }
524
525 int
526 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
527 {
528         int ret;
529
530         if (!priv->eventc)
531                 /* All virtqs are in poll mode. */
532                 return 0;
533         if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
534                 pthread_mutex_init(&priv->timer_lock, NULL);
535                 pthread_cond_init(&priv->timer_cond, NULL);
536                 priv->timer_on = 0;
537                 ret = pthread_create(&priv->timer_tid, NULL,
538                                      mlx5_vdpa_poll_handle, (void *)priv);
539                 if (ret) {
540                         DRV_LOG(ERR, "Failed to create timer thread.");
541                         return -1;
542                 }
543         }
544         priv->intr_handle.fd = priv->eventc->fd;
545         priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
546         if (rte_intr_callback_register(&priv->intr_handle,
547                                        mlx5_vdpa_interrupt_handler, priv)) {
548                 priv->intr_handle.fd = 0;
549                 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
550                 goto error;
551         }
552         return 0;
553 error:
554         mlx5_vdpa_cqe_event_unset(priv);
555         return -1;
556 }
557
558 void
559 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
560 {
561         int retries = MLX5_VDPA_INTR_RETRIES;
562         int ret = -EAGAIN;
563         void *status;
564
565         if (priv->intr_handle.fd) {
566                 while (retries-- && ret == -EAGAIN) {
567                         ret = rte_intr_callback_unregister(&priv->intr_handle,
568                                                     mlx5_vdpa_interrupt_handler,
569                                                     priv);
570                         if (ret == -EAGAIN) {
571                                 DRV_LOG(DEBUG, "Try again to unregister fd %d "
572                                         "of CQ interrupt, retries = %d.",
573                                         priv->intr_handle.fd, retries);
574                                 rte_pause();
575                         }
576                 }
577                 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
578         }
579         if (priv->timer_tid) {
580                 pthread_cancel(priv->timer_tid);
581                 pthread_join(priv->timer_tid, &status);
582         }
583         priv->timer_tid = 0;
584 }
585
586 void
587 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
588 {
589         if (eqp->sw_qp)
590                 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
591         if (eqp->umem_obj)
592                 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
593         if (eqp->umem_buf)
594                 rte_free(eqp->umem_buf);
595         if (eqp->fw_qp)
596                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
597         mlx5_vdpa_cq_destroy(&eqp->cq);
598         memset(eqp, 0, sizeof(*eqp));
599 }
600
601 static int
602 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
603 {
604         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
605                                           eqp->sw_qp->id)) {
606                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
607                         rte_errno);
608                 return -1;
609         }
610         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
611                                           eqp->fw_qp->id)) {
612                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
613                         rte_errno);
614                 return -1;
615         }
616         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
617                                           eqp->sw_qp->id)) {
618                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
619                         rte_errno);
620                 return -1;
621         }
622         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
623                                           eqp->fw_qp->id)) {
624                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
625                         rte_errno);
626                 return -1;
627         }
628         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
629                                           eqp->sw_qp->id)) {
630                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
631                         rte_errno);
632                 return -1;
633         }
634         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
635                                           eqp->fw_qp->id)) {
636                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
637                         rte_errno);
638                 return -1;
639         }
640         return 0;
641 }
642
643 int
644 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
645                           int callfd, struct mlx5_vdpa_event_qp *eqp)
646 {
647         struct mlx5_devx_qp_attr attr = {0};
648         uint16_t log_desc_n = rte_log2_u32(desc_n);
649         uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
650                                                        sizeof(*eqp->db_rec) * 2;
651
652         if (mlx5_vdpa_event_qp_global_prepare(priv))
653                 return -1;
654         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
655                 return -1;
656         attr.pd = priv->pdn;
657         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
658         if (!eqp->fw_qp) {
659                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
660                 goto error;
661         }
662         eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
663         if (!eqp->umem_buf) {
664                 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
665                 rte_errno = ENOMEM;
666                 goto error;
667         }
668         eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
669                                                (void *)(uintptr_t)eqp->umem_buf,
670                                                umem_size,
671                                                IBV_ACCESS_LOCAL_WRITE);
672         if (!eqp->umem_obj) {
673                 DRV_LOG(ERR, "Failed to register umem for SW QP.");
674                 goto error;
675         }
676         attr.uar_index = priv->uar->page_id;
677         attr.cqn = eqp->cq.cq->id;
678         attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
679         attr.rq_size = 1 << log_desc_n;
680         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
681         attr.sq_size = 0; /* No need SQ. */
682         attr.dbr_umem_valid = 1;
683         attr.wq_umem_id = eqp->umem_obj->umem_id;
684         attr.wq_umem_offset = 0;
685         attr.dbr_umem_id = eqp->umem_obj->umem_id;
686         attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
687         eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
688         if (!eqp->sw_qp) {
689                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
690                 goto error;
691         }
692         eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
693         if (mlx5_vdpa_qps2rts(eqp))
694                 goto error;
695         /* First ringing. */
696         rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
697         return 0;
698 error:
699         mlx5_vdpa_event_qp_destroy(eqp);
700         return -1;
701 }