1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
7 #include <sys/eventfd.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_atomic.h>
14 #include <rte_common.h>
16 #include <rte_alarm.h>
18 #include <mlx5_common.h>
19 #include <mlx5_common_os.h>
20 #include <mlx5_common_devx.h>
21 #include <mlx5_glue.h>
23 #include "mlx5_vdpa_utils.h"
24 #include "mlx5_vdpa.h"
27 #define MLX5_VDPA_ERROR_TIME_SEC 3u
30 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
33 mlx5_glue->devx_free_uar(priv->uar);
36 #ifdef HAVE_IBV_DEVX_EVENT
39 struct mlx5dv_devx_async_event_hdr event_resp;
40 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
44 /* Clean all pending events. */
45 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
47 (ssize_t)sizeof(out.event_resp.cookie))
49 mlx5_os_devx_destroy_event_channel(priv->eventc);
55 /* Prepare all the global resources for all the event objects.*/
57 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
63 priv->eventc = mlx5_os_devx_create_event_channel(priv->ctx,
64 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
67 DRV_LOG(ERR, "Failed to create event channel %d.",
71 flags = fcntl(priv->eventc->fd, F_GETFL);
72 ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
74 DRV_LOG(ERR, "Failed to change event channel FD.");
78 * This PMD always claims the write memory barrier on UAR
79 * registers writings, it is safe to allocate UAR with any
80 * memory mapping type.
82 priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
85 DRV_LOG(ERR, "Failed to allocate UAR.");
90 mlx5_vdpa_event_qp_global_release(priv);
95 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
97 mlx5_devx_cq_destroy(&cq->cq_obj);
98 memset(cq, 0, sizeof(*cq));
101 static inline void __rte_unused
102 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
104 uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
105 uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
106 uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
107 uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
108 uint64_t db_be = rte_cpu_to_be_64(doorbell);
109 uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
112 cq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
115 *(uint64_t *)addr = db_be;
117 *(uint32_t *)addr = db_be;
119 *((uint32_t *)addr + 1) = db_be >> 32;
126 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
127 int callfd, struct mlx5_vdpa_cq *cq)
129 struct mlx5_devx_cq_attr attr = {
131 .uar_page_id = priv->uar->page_id,
133 uint16_t event_nums[1] = {0};
136 ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, log_desc_n, &attr,
141 cq->log_desc_n = log_desc_n;
142 rte_spinlock_init(&cq->sl);
143 /* Subscribe CQ event to the event channel controlled by the driver. */
144 ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
146 sizeof(event_nums), event_nums,
147 (uint64_t)(uintptr_t)cq);
149 DRV_LOG(ERR, "Failed to subscribe CQE event.");
154 /* Init CQ to ones to be in HW owner in the start. */
155 cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
156 cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
158 mlx5_vdpa_cq_arm(priv, cq);
161 mlx5_vdpa_cq_destroy(cq);
165 static inline uint32_t
166 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
168 struct mlx5_vdpa_event_qp *eqp =
169 container_of(cq, struct mlx5_vdpa_event_qp, cq);
170 const unsigned int cq_size = 1 << cq->log_desc_n;
173 uint16_t wqe_counter;
179 uint16_t next_wqe_counter = cq->cq_ci;
180 uint16_t cur_wqe_counter;
183 last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
184 cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
185 comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
188 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
190 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
192 MLX5_CQE_OPCODE(last_word.op_own) ==
196 /* Ring CQ doorbell record. */
197 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
199 /* Ring SW QP doorbell record. */
200 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
206 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
208 struct mlx5_vdpa_cq *cq;
211 for (i = 0; i < priv->nr_virtqs; i++) {
212 cq = &priv->virtqs[i].eqp.cq;
213 if (cq->cq_obj.cq && !cq->armed)
214 mlx5_vdpa_cq_arm(priv, cq);
219 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
221 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
224 priv->timer_delay_us += priv->event_us;
229 priv->timer_delay_us /= max;
233 if (priv->timer_delay_us)
234 usleep(priv->timer_delay_us);
236 /* Give-up CPU to improve polling threads scheduling. */
241 mlx5_vdpa_poll_handle(void *arg)
243 struct mlx5_vdpa_priv *priv = arg;
245 struct mlx5_vdpa_cq *cq;
247 uint64_t current_tic;
249 pthread_mutex_lock(&priv->timer_lock);
250 while (!priv->timer_on)
251 pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
252 pthread_mutex_unlock(&priv->timer_lock);
253 priv->timer_delay_us = priv->event_mode ==
254 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
255 MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
259 pthread_mutex_lock(&priv->vq_config_lock);
260 for (i = 0; i < priv->nr_virtqs; i++) {
261 cq = &priv->virtqs[i].eqp.cq;
262 if (cq->cq_obj.cq && !cq->armed) {
263 uint32_t comp = mlx5_vdpa_cq_poll(cq);
266 /* Notify guest for descs consuming. */
267 if (cq->callfd != -1)
268 eventfd_write(cq->callfd,
275 current_tic = rte_rdtsc();
277 /* No traffic ? stop timer and load interrupts. */
278 if (current_tic - priv->last_traffic_tic >=
279 rte_get_timer_hz() * priv->no_traffic_time_s) {
280 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
281 priv->vdev->device->name);
282 mlx5_vdpa_arm_all_cqs(priv);
283 pthread_mutex_unlock(&priv->vq_config_lock);
284 pthread_mutex_lock(&priv->timer_lock);
286 while (!priv->timer_on)
287 pthread_cond_wait(&priv->timer_cond,
289 pthread_mutex_unlock(&priv->timer_lock);
290 priv->timer_delay_us = priv->event_mode ==
291 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
292 MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
297 priv->last_traffic_tic = current_tic;
299 pthread_mutex_unlock(&priv->vq_config_lock);
300 mlx5_vdpa_timer_sleep(priv, max);
306 mlx5_vdpa_interrupt_handler(void *cb_arg)
308 struct mlx5_vdpa_priv *priv = cb_arg;
309 #ifdef HAVE_IBV_DEVX_EVENT
311 struct mlx5dv_devx_async_event_hdr event_resp;
312 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
315 pthread_mutex_lock(&priv->vq_config_lock);
316 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
318 (ssize_t)sizeof(out.event_resp.cookie)) {
319 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
320 (uintptr_t)out.event_resp.cookie;
321 struct mlx5_vdpa_event_qp *eqp = container_of(cq,
322 struct mlx5_vdpa_event_qp, cq);
323 struct mlx5_vdpa_virtq *virtq = container_of(eqp,
324 struct mlx5_vdpa_virtq, eqp);
328 mlx5_vdpa_cq_poll(cq);
329 /* Notify guest for descs consuming. */
330 if (cq->callfd != -1)
331 eventfd_write(cq->callfd, (eventfd_t)1);
332 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
333 mlx5_vdpa_cq_arm(priv, cq);
334 pthread_mutex_unlock(&priv->vq_config_lock);
337 /* Don't arm again - timer will take control. */
338 DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
339 " Timer is %s, cq ci is %u.\n",
340 priv->vdev->device->name,
341 (int)virtq->index, cq->cq_obj.cq->id,
342 priv->timer_on ? "on" : "off", cq->cq_ci);
347 /* Traffic detected: make sure timer is on. */
348 priv->last_traffic_tic = rte_rdtsc();
349 pthread_mutex_lock(&priv->timer_lock);
350 if (!priv->timer_on) {
352 pthread_cond_signal(&priv->timer_cond);
354 pthread_mutex_unlock(&priv->timer_lock);
355 pthread_mutex_unlock(&priv->vq_config_lock);
359 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
361 #ifdef HAVE_IBV_DEVX_EVENT
362 struct mlx5_vdpa_priv *priv = cb_arg;
364 struct mlx5dv_devx_async_event_hdr event_resp;
365 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
367 uint32_t vq_index, i, version;
368 struct mlx5_vdpa_virtq *virtq;
371 pthread_mutex_lock(&priv->vq_config_lock);
372 while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
374 (ssize_t)sizeof(out.event_resp.cookie)) {
375 vq_index = out.event_resp.cookie & UINT32_MAX;
376 version = out.event_resp.cookie >> 32;
377 if (vq_index >= priv->nr_virtqs) {
378 DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
379 priv->vdev->device->name, vq_index);
382 virtq = &priv->virtqs[vq_index];
383 if (!virtq->enable || virtq->version != version)
385 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
387 virtq->stopped = true;
388 /* Query error info. */
389 if (mlx5_vdpa_virtq_query(priv, vq_index))
392 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
393 DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
396 /* Retry if error happens less than N times in 3 seconds. */
397 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
398 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
400 if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
401 DRV_LOG(ERR, "Failed to enable virtq %d.",
404 DRV_LOG(WARNING, "Recover virtq %d: %u.",
405 vq_index, ++virtq->n_retry);
407 /* Retry timeout, give up. */
408 DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
409 priv->vdev->device->name, vq_index);
412 /* Shift in current time to error time log end. */
413 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
414 virtq->err_time[i - 1] = virtq->err_time[i];
415 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
417 pthread_mutex_unlock(&priv->vq_config_lock);
422 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
427 /* Setup device event channel. */
428 priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);
429 if (!priv->err_chnl) {
431 DRV_LOG(ERR, "Failed to create device event channel %d.",
435 flags = fcntl(priv->err_chnl->fd, F_GETFL);
436 ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
438 DRV_LOG(ERR, "Failed to change device event channel FD.");
441 priv->err_intr_handle.fd = priv->err_chnl->fd;
442 priv->err_intr_handle.type = RTE_INTR_HANDLE_EXT;
443 if (rte_intr_callback_register(&priv->err_intr_handle,
444 mlx5_vdpa_err_interrupt_handler,
446 priv->err_intr_handle.fd = 0;
447 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
451 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
456 mlx5_vdpa_err_event_unset(priv);
461 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
463 int retries = MLX5_VDPA_INTR_RETRIES;
466 if (!priv->err_intr_handle.fd)
468 while (retries-- && ret == -EAGAIN) {
469 ret = rte_intr_callback_unregister(&priv->err_intr_handle,
470 mlx5_vdpa_err_interrupt_handler,
472 if (ret == -EAGAIN) {
473 DRV_LOG(DEBUG, "Try again to unregister fd %d "
474 "of error interrupt, retries = %d.",
475 priv->err_intr_handle.fd, retries);
479 memset(&priv->err_intr_handle, 0, sizeof(priv->err_intr_handle));
480 if (priv->err_chnl) {
481 #ifdef HAVE_IBV_DEVX_EVENT
483 struct mlx5dv_devx_async_event_hdr event_resp;
484 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
488 /* Clean all pending events. */
489 while (mlx5_glue->devx_get_event(priv->err_chnl,
490 &out.event_resp, sizeof(out.buf)) >=
491 (ssize_t)sizeof(out.event_resp.cookie))
494 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
495 priv->err_chnl = NULL;
500 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
506 const struct sched_param sp = {
507 .sched_priority = sched_get_priority_max(SCHED_RR),
511 /* All virtqs are in poll mode. */
513 if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
514 pthread_mutex_init(&priv->timer_lock, NULL);
515 pthread_cond_init(&priv->timer_cond, NULL);
517 pthread_attr_init(&attr);
519 if (priv->event_core != -1)
520 CPU_SET(priv->event_core, &cpuset);
522 cpuset = rte_lcore_cpuset(rte_get_main_lcore());
523 ret = pthread_attr_setaffinity_np(&attr, sizeof(cpuset),
526 DRV_LOG(ERR, "Failed to set thread affinity.");
529 ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
531 DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
534 ret = pthread_attr_setschedparam(&attr, &sp);
536 DRV_LOG(ERR, "Failed to set thread priority.");
539 ret = pthread_create(&priv->timer_tid, &attr,
540 mlx5_vdpa_poll_handle, (void *)priv);
542 DRV_LOG(ERR, "Failed to create timer thread.");
545 snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
546 ret = pthread_setname_np(priv->timer_tid, name);
548 DRV_LOG(ERR, "Failed to set timer thread name.");
552 priv->intr_handle.fd = priv->eventc->fd;
553 priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
554 if (rte_intr_callback_register(&priv->intr_handle,
555 mlx5_vdpa_interrupt_handler, priv)) {
556 priv->intr_handle.fd = 0;
557 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
562 mlx5_vdpa_cqe_event_unset(priv);
567 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
569 int retries = MLX5_VDPA_INTR_RETRIES;
573 if (priv->intr_handle.fd) {
574 while (retries-- && ret == -EAGAIN) {
575 ret = rte_intr_callback_unregister(&priv->intr_handle,
576 mlx5_vdpa_interrupt_handler,
578 if (ret == -EAGAIN) {
579 DRV_LOG(DEBUG, "Try again to unregister fd %d "
580 "of CQ interrupt, retries = %d.",
581 priv->intr_handle.fd, retries);
585 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
587 if (priv->timer_tid) {
588 pthread_cancel(priv->timer_tid);
589 pthread_join(priv->timer_tid, &status);
595 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
598 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
600 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
602 rte_free(eqp->umem_buf);
604 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
605 mlx5_vdpa_cq_destroy(&eqp->cq);
606 memset(eqp, 0, sizeof(*eqp));
610 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
612 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
614 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
618 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
620 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
624 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
626 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
630 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
632 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
636 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
638 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
642 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
644 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
652 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
653 int callfd, struct mlx5_vdpa_event_qp *eqp)
655 struct mlx5_devx_qp_attr attr = {0};
656 uint16_t log_desc_n = rte_log2_u32(desc_n);
657 uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
658 sizeof(*eqp->db_rec) * 2;
660 if (mlx5_vdpa_event_qp_global_prepare(priv))
662 if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
665 eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
667 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
670 eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
671 if (!eqp->umem_buf) {
672 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
676 eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
677 (void *)(uintptr_t)eqp->umem_buf,
679 IBV_ACCESS_LOCAL_WRITE);
680 if (!eqp->umem_obj) {
681 DRV_LOG(ERR, "Failed to register umem for SW QP.");
684 attr.uar_index = priv->uar->page_id;
685 attr.cqn = eqp->cq.cq_obj.cq->id;
686 attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
687 attr.rq_size = 1 << log_desc_n;
688 attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
689 attr.sq_size = 0; /* No need SQ. */
690 attr.dbr_umem_valid = 1;
691 attr.wq_umem_id = eqp->umem_obj->umem_id;
692 attr.wq_umem_offset = 0;
693 attr.dbr_umem_id = eqp->umem_obj->umem_id;
694 attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
695 eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
697 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
700 eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
701 if (mlx5_vdpa_qps2rts(eqp))
704 rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
707 mlx5_vdpa_event_qp_destroy(eqp);