1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
7 #include <sys/eventfd.h>
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_atomic.h>
13 #include <rte_common.h>
15 #include <rte_alarm.h>
17 #include <mlx5_common.h>
19 #include "mlx5_vdpa_utils.h"
20 #include "mlx5_vdpa.h"
24 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
27 mlx5_glue->devx_free_uar(priv->uar);
30 #ifdef HAVE_IBV_DEVX_EVENT
33 struct mlx5dv_devx_async_event_hdr event_resp;
34 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
38 /* Clean all pending events. */
39 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
41 (ssize_t)sizeof(out.event_resp.cookie))
43 mlx5_glue->devx_destroy_event_channel(priv->eventc);
50 /* Prepare all the global resources for all the event objects.*/
52 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
58 if (mlx5_glue->devx_query_eqn(priv->ctx, 0, &priv->eqn)) {
60 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
63 priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
64 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
67 DRV_LOG(ERR, "Failed to create event channel %d.",
71 flags = fcntl(priv->eventc->fd, F_GETFL);
72 ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
74 DRV_LOG(ERR, "Failed to change event channel FD.");
77 priv->uar = mlx5_glue->devx_alloc_uar(priv->ctx, 0);
80 DRV_LOG(ERR, "Failed to allocate UAR.");
85 mlx5_vdpa_event_qp_global_release(priv);
90 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
93 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
95 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
97 rte_free((void *)(uintptr_t)cq->umem_buf);
98 memset(cq, 0, sizeof(*cq));
101 static inline void __rte_unused
102 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
104 uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
105 uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
106 uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
107 uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
108 uint64_t db_be = rte_cpu_to_be_64(doorbell);
109 uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
112 cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
115 *(uint64_t *)addr = db_be;
117 *(uint32_t *)addr = db_be;
119 *((uint32_t *)addr + 1) = db_be >> 32;
126 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
127 int callfd, struct mlx5_vdpa_cq *cq)
129 struct mlx5_devx_cq_attr attr = {0};
130 size_t pgsize = sysconf(_SC_PAGESIZE);
132 uint16_t event_nums[1] = {0};
133 uint16_t cq_size = 1 << log_desc_n;
136 cq->log_desc_n = log_desc_n;
137 umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
138 cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
140 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
144 cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
145 (void *)(uintptr_t)cq->umem_buf,
147 IBV_ACCESS_LOCAL_WRITE);
149 DRV_LOG(ERR, "Failed to register umem for CQ.");
152 attr.q_umem_valid = 1;
153 attr.db_umem_valid = 1;
154 attr.use_first_only = 1;
155 attr.overrun_ignore = 0;
156 attr.uar_page_id = priv->uar->page_id;
157 attr.q_umem_id = cq->umem_obj->umem_id;
158 attr.q_umem_offset = 0;
159 attr.db_umem_id = cq->umem_obj->umem_id;
160 attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
161 attr.eqn = priv->eqn;
162 attr.log_cq_size = log_desc_n;
163 attr.log_page_size = rte_log2_u32(pgsize);
164 cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
167 cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
169 rte_spinlock_init(&cq->sl);
170 /* Subscribe CQ event to the event channel controlled by the driver. */
171 ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
174 (uint64_t)(uintptr_t)cq);
176 DRV_LOG(ERR, "Failed to subscribe CQE event.");
181 /* Init CQ to ones to be in HW owner in the start. */
182 cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
183 cq->cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
185 mlx5_vdpa_cq_arm(priv, cq);
188 mlx5_vdpa_cq_destroy(cq);
192 static inline uint32_t
193 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
195 struct mlx5_vdpa_event_qp *eqp =
196 container_of(cq, struct mlx5_vdpa_event_qp, cq);
197 const unsigned int cq_size = 1 << cq->log_desc_n;
200 uint16_t wqe_counter;
206 uint16_t next_wqe_counter = cq->cq_ci;
207 uint16_t cur_wqe_counter;
210 last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
211 cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
212 comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
215 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
217 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
219 MLX5_CQE_OPCODE(last_word.op_own) ==
223 /* Ring CQ doorbell record. */
224 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
226 /* Ring SW QP doorbell record. */
227 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
233 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
235 struct mlx5_vdpa_cq *cq;
238 for (i = 0; i < priv->nr_virtqs; i++) {
239 cq = &priv->virtqs[i].eqp.cq;
240 if (cq->cq && !cq->armed)
241 mlx5_vdpa_cq_arm(priv, cq);
246 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
248 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
251 priv->timer_delay_us += priv->event_us;
256 priv->timer_delay_us /= max;
260 usleep(priv->timer_delay_us);
264 mlx5_vdpa_poll_handle(void *arg)
266 struct mlx5_vdpa_priv *priv = arg;
268 struct mlx5_vdpa_cq *cq;
270 uint64_t current_tic;
272 pthread_mutex_lock(&priv->timer_lock);
273 while (!priv->timer_on)
274 pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
275 pthread_mutex_unlock(&priv->timer_lock);
276 priv->timer_delay_us = priv->event_mode ==
277 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
278 MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
282 pthread_mutex_lock(&priv->vq_config_lock);
283 for (i = 0; i < priv->nr_virtqs; i++) {
284 cq = &priv->virtqs[i].eqp.cq;
285 if (cq->cq && !cq->armed) {
286 uint32_t comp = mlx5_vdpa_cq_poll(cq);
289 /* Notify guest for descs consuming. */
290 if (cq->callfd != -1)
291 eventfd_write(cq->callfd,
298 current_tic = rte_rdtsc();
300 /* No traffic ? stop timer and load interrupts. */
301 if (current_tic - priv->last_traffic_tic >=
302 rte_get_timer_hz() * priv->no_traffic_time_s) {
303 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
304 priv->vdev->device->name);
305 mlx5_vdpa_arm_all_cqs(priv);
306 pthread_mutex_unlock(&priv->vq_config_lock);
307 pthread_mutex_lock(&priv->timer_lock);
309 while (!priv->timer_on)
310 pthread_cond_wait(&priv->timer_cond,
312 pthread_mutex_unlock(&priv->timer_lock);
313 priv->timer_delay_us = priv->event_mode ==
314 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
315 MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
320 priv->last_traffic_tic = current_tic;
322 pthread_mutex_unlock(&priv->vq_config_lock);
323 mlx5_vdpa_timer_sleep(priv, max);
329 mlx5_vdpa_interrupt_handler(void *cb_arg)
331 struct mlx5_vdpa_priv *priv = cb_arg;
332 #ifdef HAVE_IBV_DEVX_EVENT
334 struct mlx5dv_devx_async_event_hdr event_resp;
335 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
338 pthread_mutex_lock(&priv->vq_config_lock);
339 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
341 (ssize_t)sizeof(out.event_resp.cookie)) {
342 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
343 (uintptr_t)out.event_resp.cookie;
344 struct mlx5_vdpa_event_qp *eqp = container_of(cq,
345 struct mlx5_vdpa_event_qp, cq);
346 struct mlx5_vdpa_virtq *virtq = container_of(eqp,
347 struct mlx5_vdpa_virtq, eqp);
351 mlx5_vdpa_cq_poll(cq);
352 /* Notify guest for descs consuming. */
353 if (cq->callfd != -1)
354 eventfd_write(cq->callfd, (eventfd_t)1);
355 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
356 mlx5_vdpa_cq_arm(priv, cq);
357 pthread_mutex_unlock(&priv->vq_config_lock);
360 /* Don't arm again - timer will take control. */
361 DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
362 " Timer is %s, cq ci is %u.\n",
363 priv->vdev->device->name,
364 (int)virtq->index, cq->cq->id,
365 priv->timer_on ? "on" : "off", cq->cq_ci);
370 /* Traffic detected: make sure timer is on. */
371 priv->last_traffic_tic = rte_rdtsc();
372 pthread_mutex_lock(&priv->timer_lock);
373 if (!priv->timer_on) {
375 pthread_cond_signal(&priv->timer_cond);
377 pthread_mutex_unlock(&priv->timer_lock);
378 pthread_mutex_unlock(&priv->vq_config_lock);
382 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
387 /* All virtqs are in poll mode. */
389 if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
390 pthread_mutex_init(&priv->timer_lock, NULL);
391 pthread_cond_init(&priv->timer_cond, NULL);
393 ret = pthread_create(&priv->timer_tid, NULL,
394 mlx5_vdpa_poll_handle, (void *)priv);
396 DRV_LOG(ERR, "Failed to create timer thread.");
400 priv->intr_handle.fd = priv->eventc->fd;
401 priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
402 if (rte_intr_callback_register(&priv->intr_handle,
403 mlx5_vdpa_interrupt_handler, priv)) {
404 priv->intr_handle.fd = 0;
405 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
410 mlx5_vdpa_cqe_event_unset(priv);
415 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
417 int retries = MLX5_VDPA_INTR_RETRIES;
421 if (priv->intr_handle.fd) {
422 while (retries-- && ret == -EAGAIN) {
423 ret = rte_intr_callback_unregister(&priv->intr_handle,
424 mlx5_vdpa_interrupt_handler,
426 if (ret == -EAGAIN) {
427 DRV_LOG(DEBUG, "Try again to unregister fd %d "
428 "of CQ interrupt, retries = %d.",
429 priv->intr_handle.fd, retries);
433 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
435 if (priv->timer_tid) {
436 pthread_cancel(priv->timer_tid);
437 pthread_join(priv->timer_tid, &status);
443 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
446 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
448 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
450 rte_free(eqp->umem_buf);
452 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
453 mlx5_vdpa_cq_destroy(&eqp->cq);
454 memset(eqp, 0, sizeof(*eqp));
458 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
460 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
462 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
466 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
468 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
472 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
474 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
478 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
480 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
484 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
486 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
490 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
492 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
500 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
501 int callfd, struct mlx5_vdpa_event_qp *eqp)
503 struct mlx5_devx_qp_attr attr = {0};
504 uint16_t log_desc_n = rte_log2_u32(desc_n);
505 uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
506 sizeof(*eqp->db_rec) * 2;
508 if (mlx5_vdpa_event_qp_global_prepare(priv))
510 if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
513 eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
515 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
518 eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
519 if (!eqp->umem_buf) {
520 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
524 eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
525 (void *)(uintptr_t)eqp->umem_buf,
527 IBV_ACCESS_LOCAL_WRITE);
528 if (!eqp->umem_obj) {
529 DRV_LOG(ERR, "Failed to register umem for SW QP.");
532 attr.uar_index = priv->uar->page_id;
533 attr.cqn = eqp->cq.cq->id;
534 attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
535 attr.rq_size = 1 << log_desc_n;
536 attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
537 attr.sq_size = 0; /* No need SQ. */
538 attr.dbr_umem_valid = 1;
539 attr.wq_umem_id = eqp->umem_obj->umem_id;
540 attr.wq_umem_offset = 0;
541 attr.dbr_umem_id = eqp->umem_obj->umem_id;
542 attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
543 eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
545 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
548 eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
549 if (mlx5_vdpa_qps2rts(eqp))
552 rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
555 mlx5_vdpa_event_qp_destroy(eqp);