1c9f939cc31000530d63b83373312f920ecc0502
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <fcntl.h>
7 #include <sys/eventfd.h>
8
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_atomic.h>
13 #include <rte_common.h>
14 #include <rte_io.h>
15 #include <rte_alarm.h>
16
17 #include <mlx5_common.h>
18
19 #include "mlx5_vdpa_utils.h"
20 #include "mlx5_vdpa.h"
21
22
23 #define MLX5_VDPA_DEFAULT_TIMER_DELAY_US 500u
24 #define MLX5_VDPA_NO_TRAFFIC_TIME_S 2LLU
25
26 void
27 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
28 {
29         if (priv->uar) {
30                 mlx5_glue->devx_free_uar(priv->uar);
31                 priv->uar = NULL;
32         }
33 #ifdef HAVE_IBV_DEVX_EVENT
34         if (priv->eventc) {
35                 union {
36                         struct mlx5dv_devx_async_event_hdr event_resp;
37                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
38                                                                          + 128];
39                 } out;
40
41                 /* Clean all pending events. */
42                 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
43                        sizeof(out.buf)) >=
44                        (ssize_t)sizeof(out.event_resp.cookie))
45                         ;
46                 mlx5_glue->devx_destroy_event_channel(priv->eventc);
47                 priv->eventc = NULL;
48         }
49 #endif
50         priv->eqn = 0;
51 }
52
53 /* Prepare all the global resources for all the event objects.*/
54 static int
55 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
56 {
57         uint32_t lcore;
58
59         if (priv->eventc)
60                 return 0;
61         lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
62         if (mlx5_glue->devx_query_eqn(priv->ctx, lcore, &priv->eqn)) {
63                 rte_errno = errno;
64                 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
65                 return -1;
66         }
67         priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
68                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
69         if (!priv->eventc) {
70                 rte_errno = errno;
71                 DRV_LOG(ERR, "Failed to create event channel %d.",
72                         rte_errno);
73                 goto error;
74         }
75         priv->uar = mlx5_glue->devx_alloc_uar(priv->ctx, 0);
76         if (!priv->uar) {
77                 rte_errno = errno;
78                 DRV_LOG(ERR, "Failed to allocate UAR.");
79                 goto error;
80         }
81         return 0;
82 error:
83         mlx5_vdpa_event_qp_global_release(priv);
84         return -1;
85 }
86
87 static void
88 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
89 {
90         if (cq->cq)
91                 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
92         if (cq->umem_obj)
93                 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
94         if (cq->umem_buf)
95                 rte_free((void *)(uintptr_t)cq->umem_buf);
96         memset(cq, 0, sizeof(*cq));
97 }
98
99 static inline void __rte_unused
100 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
101 {
102         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
103         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
104         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
105         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
106         uint64_t db_be = rte_cpu_to_be_64(doorbell);
107         uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
108
109         rte_io_wmb();
110         cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
111         rte_wmb();
112 #ifdef RTE_ARCH_64
113         *(uint64_t *)addr = db_be;
114 #else
115         *(uint32_t *)addr = db_be;
116         rte_io_wmb();
117         *((uint32_t *)addr + 1) = db_be >> 32;
118 #endif
119         cq->arm_sn++;
120         cq->armed = 1;
121 }
122
123 static int
124 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
125                     int callfd, struct mlx5_vdpa_cq *cq)
126 {
127         struct mlx5_devx_cq_attr attr;
128         size_t pgsize = sysconf(_SC_PAGESIZE);
129         uint32_t umem_size;
130         uint16_t event_nums[1] = {0};
131         uint16_t cq_size = 1 << log_desc_n;
132         int ret;
133
134         cq->log_desc_n = log_desc_n;
135         umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
136         cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
137         if (!cq->umem_buf) {
138                 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
139                 rte_errno = ENOMEM;
140                 return -ENOMEM;
141         }
142         cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
143                                                 (void *)(uintptr_t)cq->umem_buf,
144                                                 umem_size,
145                                                 IBV_ACCESS_LOCAL_WRITE);
146         if (!cq->umem_obj) {
147                 DRV_LOG(ERR, "Failed to register umem for CQ.");
148                 goto error;
149         }
150         attr.q_umem_valid = 1;
151         attr.db_umem_valid = 1;
152         attr.use_first_only = 1;
153         attr.overrun_ignore = 0;
154         attr.uar_page_id = priv->uar->page_id;
155         attr.q_umem_id = cq->umem_obj->umem_id;
156         attr.q_umem_offset = 0;
157         attr.db_umem_id = cq->umem_obj->umem_id;
158         attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
159         attr.eqn = priv->eqn;
160         attr.log_cq_size = log_desc_n;
161         attr.log_page_size = rte_log2_u32(pgsize);
162         cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
163         if (!cq->cq)
164                 goto error;
165         cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
166         cq->cq_ci = 0;
167         rte_spinlock_init(&cq->sl);
168         /* Subscribe CQ event to the event channel controlled by the driver. */
169         ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
170                                                    sizeof(event_nums),
171                                                    event_nums,
172                                                    (uint64_t)(uintptr_t)cq);
173         if (ret) {
174                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
175                 rte_errno = errno;
176                 goto error;
177         }
178         if (callfd != -1) {
179                 ret = mlx5_glue->devx_subscribe_devx_event_fd(priv->eventc,
180                                                               callfd,
181                                                               cq->cq->obj, 0);
182                 if (ret) {
183                         DRV_LOG(ERR, "Failed to subscribe CQE event fd.");
184                         rte_errno = errno;
185                         goto error;
186                 }
187         }
188         cq->callfd = callfd;
189         /* Init CQ to ones to be in HW owner in the start. */
190         cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
191         cq->cqes[0].wqe_counter = rte_cpu_to_be_16(cq_size - 1);
192         /* First arming. */
193         mlx5_vdpa_cq_arm(priv, cq);
194         return 0;
195 error:
196         mlx5_vdpa_cq_destroy(cq);
197         return -1;
198 }
199
200 static inline uint32_t
201 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
202 {
203         struct mlx5_vdpa_event_qp *eqp =
204                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
205         const unsigned int cq_size = 1 << cq->log_desc_n;
206         const unsigned int cq_mask = cq_size - 1;
207         union {
208                 struct {
209                         uint16_t wqe_counter;
210                         uint8_t rsvd5;
211                         uint8_t op_own;
212                 };
213                 uint32_t word;
214         } last_word;
215         uint16_t next_wqe_counter = cq->cq_ci & cq_mask;
216         uint16_t cur_wqe_counter;
217         uint16_t comp;
218
219         last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
220         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
221         comp = (cur_wqe_counter + 1u - next_wqe_counter) & cq_mask;
222         if (comp) {
223                 cq->cq_ci += comp;
224                 MLX5_ASSERT(!!(cq->cq_ci & cq_size) ==
225                             MLX5_CQE_OWNER(last_word.op_own));
226                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
227                             MLX5_CQE_INVALID);
228                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
229                                MLX5_CQE_RESP_ERR ||
230                                MLX5_CQE_OPCODE(last_word.op_own) ==
231                                MLX5_CQE_REQ_ERR)))
232                         cq->errors++;
233                 rte_io_wmb();
234                 /* Ring CQ doorbell record. */
235                 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
236                 rte_io_wmb();
237                 /* Ring SW QP doorbell record. */
238                 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
239         }
240         return comp;
241 }
242
243 static void
244 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
245 {
246         struct mlx5_vdpa_cq *cq;
247         int i;
248
249         for (i = 0; i < priv->nr_virtqs; i++) {
250                 cq = &priv->virtqs[i].eqp.cq;
251                 if (cq->cq && !cq->armed)
252                         mlx5_vdpa_cq_arm(priv, cq);
253         }
254 }
255
256 static void *
257 mlx5_vdpa_poll_handle(void *arg)
258 {
259         struct mlx5_vdpa_priv *priv = arg;
260         int i;
261         struct mlx5_vdpa_cq *cq;
262         uint32_t total;
263         uint64_t current_tic;
264
265         pthread_mutex_lock(&priv->timer_lock);
266         while (!priv->timer_on)
267                 pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
268         pthread_mutex_unlock(&priv->timer_lock);
269         while (1) {
270                 total = 0;
271                 for (i = 0; i < priv->nr_virtqs; i++) {
272                         cq = &priv->virtqs[i].eqp.cq;
273                         if (cq->cq && !cq->armed) {
274                                 uint32_t comp = mlx5_vdpa_cq_poll(cq);
275
276                                 if (comp) {
277                                         /* Notify guest for descs consuming. */
278                                         if (cq->callfd != -1)
279                                                 eventfd_write(cq->callfd,
280                                                               (eventfd_t)1);
281                                         total += comp;
282                                 }
283                         }
284                 }
285                 current_tic = rte_rdtsc();
286                 if (!total) {
287                         /* No traffic ? stop timer and load interrupts. */
288                         if (current_tic - priv->last_traffic_tic >=
289                             rte_get_timer_hz() * MLX5_VDPA_NO_TRAFFIC_TIME_S) {
290                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
291                                         priv->vdev->device->name);
292                                 mlx5_vdpa_arm_all_cqs(priv);
293                                 pthread_mutex_lock(&priv->timer_lock);
294                                 priv->timer_on = 0;
295                                 while (!priv->timer_on)
296                                         pthread_cond_wait(&priv->timer_cond,
297                                                           &priv->timer_lock);
298                                 pthread_mutex_unlock(&priv->timer_lock);
299                                 continue;
300                         }
301                 } else {
302                         priv->last_traffic_tic = current_tic;
303                 }
304                 usleep(priv->timer_delay_us);
305         }
306         return NULL;
307 }
308
309 static void
310 mlx5_vdpa_interrupt_handler(void *cb_arg)
311 {
312         struct mlx5_vdpa_priv *priv = cb_arg;
313 #ifdef HAVE_IBV_DEVX_EVENT
314         union {
315                 struct mlx5dv_devx_async_event_hdr event_resp;
316                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
317         } out;
318
319         while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
320                                          sizeof(out.buf)) >=
321                                        (ssize_t)sizeof(out.event_resp.cookie)) {
322                 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
323                                                (uintptr_t)out.event_resp.cookie;
324                 struct mlx5_vdpa_event_qp *eqp = container_of(cq,
325                                                  struct mlx5_vdpa_event_qp, cq);
326                 struct mlx5_vdpa_virtq *virtq = container_of(eqp,
327                                                    struct mlx5_vdpa_virtq, eqp);
328
329                 mlx5_vdpa_cq_poll(cq);
330                 /* Don't arm again - timer will take control. */
331                 DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
332                         " Timer is %s, cq ci is %u.\n",
333                         priv->vdev->device->name,
334                         (int)virtq->index, cq->cq->id,
335                         priv->timer_on ? "on" : "off", cq->cq_ci);
336                 cq->armed = 0;
337         }
338 #endif
339
340         /* Traffic detected: make sure timer is on. */
341         priv->last_traffic_tic = rte_rdtsc();
342         pthread_mutex_lock(&priv->timer_lock);
343         if (!priv->timer_on) {
344                 priv->timer_on = 1;
345                 pthread_cond_signal(&priv->timer_cond);
346         }
347         pthread_mutex_unlock(&priv->timer_lock);
348 }
349
350 int
351 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
352 {
353         int flags;
354         int ret;
355
356         if (!priv->eventc)
357                 /* All virtqs are in poll mode. */
358                 return 0;
359         pthread_mutex_init(&priv->timer_lock, NULL);
360         pthread_cond_init(&priv->timer_cond, NULL);
361         priv->timer_on = 0;
362         priv->timer_delay_us = MLX5_VDPA_DEFAULT_TIMER_DELAY_US;
363         ret = pthread_create(&priv->timer_tid, NULL, mlx5_vdpa_poll_handle,
364                              (void *)priv);
365         if (ret) {
366                 DRV_LOG(ERR, "Failed to create timer thread.");
367                 return -1;
368         }
369         flags = fcntl(priv->eventc->fd, F_GETFL);
370         ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
371         if (ret) {
372                 DRV_LOG(ERR, "Failed to change event channel FD.");
373                 goto error;
374         }
375         priv->intr_handle.fd = priv->eventc->fd;
376         priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
377         if (rte_intr_callback_register(&priv->intr_handle,
378                                        mlx5_vdpa_interrupt_handler, priv)) {
379                 priv->intr_handle.fd = 0;
380                 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
381                 goto error;
382         }
383         return 0;
384 error:
385         mlx5_vdpa_cqe_event_unset(priv);
386         return -1;
387 }
388
389 void
390 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
391 {
392         int retries = MLX5_VDPA_INTR_RETRIES;
393         int ret = -EAGAIN;
394         void *status;
395
396         if (priv->intr_handle.fd) {
397                 while (retries-- && ret == -EAGAIN) {
398                         ret = rte_intr_callback_unregister(&priv->intr_handle,
399                                                     mlx5_vdpa_interrupt_handler,
400                                                     priv);
401                         if (ret == -EAGAIN) {
402                                 DRV_LOG(DEBUG, "Try again to unregister fd %d "
403                                         "of CQ interrupt, retries = %d.",
404                                         priv->intr_handle.fd, retries);
405                                 rte_pause();
406                         }
407                 }
408                 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
409         }
410         if (priv->timer_tid) {
411                 pthread_cancel(priv->timer_tid);
412                 pthread_join(priv->timer_tid, &status);
413         }
414         priv->timer_tid = 0;
415 }
416
417 void
418 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
419 {
420         if (eqp->sw_qp)
421                 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
422         if (eqp->umem_obj)
423                 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
424         if (eqp->umem_buf)
425                 rte_free(eqp->umem_buf);
426         if (eqp->fw_qp)
427                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
428         mlx5_vdpa_cq_destroy(&eqp->cq);
429         memset(eqp, 0, sizeof(*eqp));
430 }
431
432 static int
433 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
434 {
435         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
436                                           eqp->sw_qp->id)) {
437                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
438                         rte_errno);
439                 return -1;
440         }
441         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
442                                           eqp->fw_qp->id)) {
443                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
444                         rte_errno);
445                 return -1;
446         }
447         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
448                                           eqp->sw_qp->id)) {
449                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
450                         rte_errno);
451                 return -1;
452         }
453         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
454                                           eqp->fw_qp->id)) {
455                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
456                         rte_errno);
457                 return -1;
458         }
459         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
460                                           eqp->sw_qp->id)) {
461                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
462                         rte_errno);
463                 return -1;
464         }
465         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
466                                           eqp->fw_qp->id)) {
467                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
468                         rte_errno);
469                 return -1;
470         }
471         return 0;
472 }
473
474 int
475 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
476                           int callfd, struct mlx5_vdpa_event_qp *eqp)
477 {
478         struct mlx5_devx_qp_attr attr = {0};
479         uint16_t log_desc_n = rte_log2_u32(desc_n);
480         uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
481                                                        sizeof(*eqp->db_rec) * 2;
482
483         if (mlx5_vdpa_event_qp_global_prepare(priv))
484                 return -1;
485         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
486                 return -1;
487         attr.pd = priv->pdn;
488         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
489         if (!eqp->fw_qp) {
490                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
491                 goto error;
492         }
493         eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
494         if (!eqp->umem_buf) {
495                 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
496                 rte_errno = ENOMEM;
497                 goto error;
498         }
499         eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
500                                                (void *)(uintptr_t)eqp->umem_buf,
501                                                umem_size,
502                                                IBV_ACCESS_LOCAL_WRITE);
503         if (!eqp->umem_obj) {
504                 DRV_LOG(ERR, "Failed to register umem for SW QP.");
505                 goto error;
506         }
507         attr.uar_index = priv->uar->page_id;
508         attr.cqn = eqp->cq.cq->id;
509         attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
510         attr.rq_size = 1 << log_desc_n;
511         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
512         attr.sq_size = 0; /* No need SQ. */
513         attr.dbr_umem_valid = 1;
514         attr.wq_umem_id = eqp->umem_obj->umem_id;
515         attr.wq_umem_offset = 0;
516         attr.dbr_umem_id = eqp->umem_obj->umem_id;
517         attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
518         eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
519         if (!eqp->sw_qp) {
520                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
521                 goto error;
522         }
523         eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
524         if (mlx5_vdpa_qps2rts(eqp))
525                 goto error;
526         /* First ringing. */
527         rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
528         return 0;
529 error:
530         mlx5_vdpa_event_qp_destroy(eqp);
531         return -1;
532 }