1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
7 #include <sys/eventfd.h>
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_atomic.h>
13 #include <rte_common.h>
15 #include <rte_alarm.h>
17 #include <mlx5_common.h>
18 #include <mlx5_glue.h>
20 #include "mlx5_vdpa_utils.h"
21 #include "mlx5_vdpa.h"
24 #define MLX5_VDPA_ERROR_TIME_SEC 3u
27 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
30 mlx5_glue->devx_free_uar(priv->uar);
33 #ifdef HAVE_IBV_DEVX_EVENT
36 struct mlx5dv_devx_async_event_hdr event_resp;
37 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
41 /* Clean all pending events. */
42 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
44 (ssize_t)sizeof(out.event_resp.cookie))
46 mlx5_glue->devx_destroy_event_channel(priv->eventc);
53 /* Prepare all the global resources for all the event objects.*/
55 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
61 if (mlx5_glue->devx_query_eqn(priv->ctx, 0, &priv->eqn)) {
63 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
66 priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
67 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
70 DRV_LOG(ERR, "Failed to create event channel %d.",
74 flags = fcntl(priv->eventc->fd, F_GETFL);
75 ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
77 DRV_LOG(ERR, "Failed to change event channel FD.");
80 priv->uar = mlx5_glue->devx_alloc_uar(priv->ctx, 0);
83 DRV_LOG(ERR, "Failed to allocate UAR.");
88 mlx5_vdpa_event_qp_global_release(priv);
93 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
96 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
98 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
100 rte_free((void *)(uintptr_t)cq->umem_buf);
101 memset(cq, 0, sizeof(*cq));
104 static inline void __rte_unused
105 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
107 uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
108 uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
109 uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
110 uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
111 uint64_t db_be = rte_cpu_to_be_64(doorbell);
112 uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
115 cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
118 *(uint64_t *)addr = db_be;
120 *(uint32_t *)addr = db_be;
122 *((uint32_t *)addr + 1) = db_be >> 32;
129 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
130 int callfd, struct mlx5_vdpa_cq *cq)
132 struct mlx5_devx_cq_attr attr = {0};
133 size_t pgsize = sysconf(_SC_PAGESIZE);
135 uint16_t event_nums[1] = {0};
136 uint16_t cq_size = 1 << log_desc_n;
139 cq->log_desc_n = log_desc_n;
140 umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
141 cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
143 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
147 cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
148 (void *)(uintptr_t)cq->umem_buf,
150 IBV_ACCESS_LOCAL_WRITE);
152 DRV_LOG(ERR, "Failed to register umem for CQ.");
155 attr.q_umem_valid = 1;
156 attr.db_umem_valid = 1;
157 attr.use_first_only = 1;
158 attr.overrun_ignore = 0;
159 attr.uar_page_id = priv->uar->page_id;
160 attr.q_umem_id = cq->umem_obj->umem_id;
161 attr.q_umem_offset = 0;
162 attr.db_umem_id = cq->umem_obj->umem_id;
163 attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
164 attr.eqn = priv->eqn;
165 attr.log_cq_size = log_desc_n;
166 attr.log_page_size = rte_log2_u32(pgsize);
167 cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
170 cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
172 rte_spinlock_init(&cq->sl);
173 /* Subscribe CQ event to the event channel controlled by the driver. */
174 ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
177 (uint64_t)(uintptr_t)cq);
179 DRV_LOG(ERR, "Failed to subscribe CQE event.");
184 /* Init CQ to ones to be in HW owner in the start. */
185 cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
186 cq->cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
188 mlx5_vdpa_cq_arm(priv, cq);
191 mlx5_vdpa_cq_destroy(cq);
195 static inline uint32_t
196 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
198 struct mlx5_vdpa_event_qp *eqp =
199 container_of(cq, struct mlx5_vdpa_event_qp, cq);
200 const unsigned int cq_size = 1 << cq->log_desc_n;
203 uint16_t wqe_counter;
209 uint16_t next_wqe_counter = cq->cq_ci;
210 uint16_t cur_wqe_counter;
213 last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
214 cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
215 comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
218 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
220 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
222 MLX5_CQE_OPCODE(last_word.op_own) ==
226 /* Ring CQ doorbell record. */
227 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
229 /* Ring SW QP doorbell record. */
230 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
236 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
238 struct mlx5_vdpa_cq *cq;
241 for (i = 0; i < priv->nr_virtqs; i++) {
242 cq = &priv->virtqs[i].eqp.cq;
243 if (cq->cq && !cq->armed)
244 mlx5_vdpa_cq_arm(priv, cq);
249 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
251 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
254 priv->timer_delay_us += priv->event_us;
259 priv->timer_delay_us /= max;
263 usleep(priv->timer_delay_us);
267 mlx5_vdpa_poll_handle(void *arg)
269 struct mlx5_vdpa_priv *priv = arg;
271 struct mlx5_vdpa_cq *cq;
273 uint64_t current_tic;
275 pthread_mutex_lock(&priv->timer_lock);
276 while (!priv->timer_on)
277 pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
278 pthread_mutex_unlock(&priv->timer_lock);
279 priv->timer_delay_us = priv->event_mode ==
280 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
281 MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
285 pthread_mutex_lock(&priv->vq_config_lock);
286 for (i = 0; i < priv->nr_virtqs; i++) {
287 cq = &priv->virtqs[i].eqp.cq;
288 if (cq->cq && !cq->armed) {
289 uint32_t comp = mlx5_vdpa_cq_poll(cq);
292 /* Notify guest for descs consuming. */
293 if (cq->callfd != -1)
294 eventfd_write(cq->callfd,
301 current_tic = rte_rdtsc();
303 /* No traffic ? stop timer and load interrupts. */
304 if (current_tic - priv->last_traffic_tic >=
305 rte_get_timer_hz() * priv->no_traffic_time_s) {
306 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
307 priv->vdev->device->name);
308 mlx5_vdpa_arm_all_cqs(priv);
309 pthread_mutex_unlock(&priv->vq_config_lock);
310 pthread_mutex_lock(&priv->timer_lock);
312 while (!priv->timer_on)
313 pthread_cond_wait(&priv->timer_cond,
315 pthread_mutex_unlock(&priv->timer_lock);
316 priv->timer_delay_us = priv->event_mode ==
317 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
318 MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
323 priv->last_traffic_tic = current_tic;
325 pthread_mutex_unlock(&priv->vq_config_lock);
326 mlx5_vdpa_timer_sleep(priv, max);
332 mlx5_vdpa_interrupt_handler(void *cb_arg)
334 struct mlx5_vdpa_priv *priv = cb_arg;
335 #ifdef HAVE_IBV_DEVX_EVENT
337 struct mlx5dv_devx_async_event_hdr event_resp;
338 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
341 pthread_mutex_lock(&priv->vq_config_lock);
342 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
344 (ssize_t)sizeof(out.event_resp.cookie)) {
345 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
346 (uintptr_t)out.event_resp.cookie;
347 struct mlx5_vdpa_event_qp *eqp = container_of(cq,
348 struct mlx5_vdpa_event_qp, cq);
349 struct mlx5_vdpa_virtq *virtq = container_of(eqp,
350 struct mlx5_vdpa_virtq, eqp);
354 mlx5_vdpa_cq_poll(cq);
355 /* Notify guest for descs consuming. */
356 if (cq->callfd != -1)
357 eventfd_write(cq->callfd, (eventfd_t)1);
358 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
359 mlx5_vdpa_cq_arm(priv, cq);
360 pthread_mutex_unlock(&priv->vq_config_lock);
363 /* Don't arm again - timer will take control. */
364 DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
365 " Timer is %s, cq ci is %u.\n",
366 priv->vdev->device->name,
367 (int)virtq->index, cq->cq->id,
368 priv->timer_on ? "on" : "off", cq->cq_ci);
373 /* Traffic detected: make sure timer is on. */
374 priv->last_traffic_tic = rte_rdtsc();
375 pthread_mutex_lock(&priv->timer_lock);
376 if (!priv->timer_on) {
378 pthread_cond_signal(&priv->timer_cond);
380 pthread_mutex_unlock(&priv->timer_lock);
381 pthread_mutex_unlock(&priv->vq_config_lock);
385 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
387 #ifdef HAVE_IBV_DEVX_EVENT
388 struct mlx5_vdpa_priv *priv = cb_arg;
390 struct mlx5dv_devx_async_event_hdr event_resp;
391 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
393 uint32_t vq_index, i, version;
394 struct mlx5_vdpa_virtq *virtq;
397 pthread_mutex_lock(&priv->vq_config_lock);
398 while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
400 (ssize_t)sizeof(out.event_resp.cookie)) {
401 vq_index = out.event_resp.cookie & UINT32_MAX;
402 version = out.event_resp.cookie >> 32;
403 if (vq_index >= priv->nr_virtqs) {
404 DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
405 priv->vdev->device->name, vq_index);
408 virtq = &priv->virtqs[vq_index];
409 if (!virtq->enable || virtq->version != version)
411 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
413 virtq->stopped = true;
414 /* Query error info. */
415 if (mlx5_vdpa_virtq_query(priv, vq_index))
418 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
419 DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
422 /* Retry if error happens less than N times in 3 seconds. */
423 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
424 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
426 if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
427 DRV_LOG(ERR, "Failed to enable virtq %d.",
430 DRV_LOG(WARNING, "Recover virtq %d: %u.",
431 vq_index, ++virtq->n_retry);
433 /* Retry timeout, give up. */
434 DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
435 priv->vdev->device->name, vq_index);
438 /* Shift in current time to error time log end. */
439 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
440 virtq->err_time[i - 1] = virtq->err_time[i];
441 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
443 pthread_mutex_unlock(&priv->vq_config_lock);
448 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
453 /* Setup device event channel. */
454 priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);
455 if (!priv->err_chnl) {
457 DRV_LOG(ERR, "Failed to create device event channel %d.",
461 flags = fcntl(priv->err_chnl->fd, F_GETFL);
462 ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
464 DRV_LOG(ERR, "Failed to change device event channel FD.");
467 priv->err_intr_handle.fd = priv->err_chnl->fd;
468 priv->err_intr_handle.type = RTE_INTR_HANDLE_EXT;
469 if (rte_intr_callback_register(&priv->err_intr_handle,
470 mlx5_vdpa_err_interrupt_handler,
472 priv->err_intr_handle.fd = 0;
473 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
477 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
482 mlx5_vdpa_err_event_unset(priv);
487 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
489 int retries = MLX5_VDPA_INTR_RETRIES;
492 if (!priv->err_intr_handle.fd)
494 while (retries-- && ret == -EAGAIN) {
495 ret = rte_intr_callback_unregister(&priv->err_intr_handle,
496 mlx5_vdpa_err_interrupt_handler,
498 if (ret == -EAGAIN) {
499 DRV_LOG(DEBUG, "Try again to unregister fd %d "
500 "of error interrupt, retries = %d.",
501 priv->err_intr_handle.fd, retries);
505 memset(&priv->err_intr_handle, 0, sizeof(priv->err_intr_handle));
506 if (priv->err_chnl) {
507 #ifdef HAVE_IBV_DEVX_EVENT
509 struct mlx5dv_devx_async_event_hdr event_resp;
510 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
514 /* Clean all pending events. */
515 while (mlx5_glue->devx_get_event(priv->err_chnl,
516 &out.event_resp, sizeof(out.buf)) >=
517 (ssize_t)sizeof(out.event_resp.cookie))
520 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
521 priv->err_chnl = NULL;
526 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
531 /* All virtqs are in poll mode. */
533 if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
534 pthread_mutex_init(&priv->timer_lock, NULL);
535 pthread_cond_init(&priv->timer_cond, NULL);
537 ret = pthread_create(&priv->timer_tid, NULL,
538 mlx5_vdpa_poll_handle, (void *)priv);
540 DRV_LOG(ERR, "Failed to create timer thread.");
544 priv->intr_handle.fd = priv->eventc->fd;
545 priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
546 if (rte_intr_callback_register(&priv->intr_handle,
547 mlx5_vdpa_interrupt_handler, priv)) {
548 priv->intr_handle.fd = 0;
549 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
554 mlx5_vdpa_cqe_event_unset(priv);
559 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
561 int retries = MLX5_VDPA_INTR_RETRIES;
565 if (priv->intr_handle.fd) {
566 while (retries-- && ret == -EAGAIN) {
567 ret = rte_intr_callback_unregister(&priv->intr_handle,
568 mlx5_vdpa_interrupt_handler,
570 if (ret == -EAGAIN) {
571 DRV_LOG(DEBUG, "Try again to unregister fd %d "
572 "of CQ interrupt, retries = %d.",
573 priv->intr_handle.fd, retries);
577 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
579 if (priv->timer_tid) {
580 pthread_cancel(priv->timer_tid);
581 pthread_join(priv->timer_tid, &status);
587 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
590 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
592 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
594 rte_free(eqp->umem_buf);
596 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
597 mlx5_vdpa_cq_destroy(&eqp->cq);
598 memset(eqp, 0, sizeof(*eqp));
602 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
604 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
606 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
610 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
612 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
616 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
618 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
622 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
624 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
628 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
630 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
634 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
636 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
644 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
645 int callfd, struct mlx5_vdpa_event_qp *eqp)
647 struct mlx5_devx_qp_attr attr = {0};
648 uint16_t log_desc_n = rte_log2_u32(desc_n);
649 uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
650 sizeof(*eqp->db_rec) * 2;
652 if (mlx5_vdpa_event_qp_global_prepare(priv))
654 if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
657 eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
659 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
662 eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
663 if (!eqp->umem_buf) {
664 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
668 eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
669 (void *)(uintptr_t)eqp->umem_buf,
671 IBV_ACCESS_LOCAL_WRITE);
672 if (!eqp->umem_obj) {
673 DRV_LOG(ERR, "Failed to register umem for SW QP.");
676 attr.uar_index = priv->uar->page_id;
677 attr.cqn = eqp->cq.cq->id;
678 attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
679 attr.rq_size = 1 << log_desc_n;
680 attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
681 attr.sq_size = 0; /* No need SQ. */
682 attr.dbr_umem_valid = 1;
683 attr.wq_umem_id = eqp->umem_obj->umem_id;
684 attr.wq_umem_offset = 0;
685 attr.dbr_umem_id = eqp->umem_obj->umem_id;
686 attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
687 eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
689 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
692 eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
693 if (mlx5_vdpa_qps2rts(eqp))
696 rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
699 mlx5_vdpa_event_qp_destroy(eqp);