1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
8 #include <sys/eventfd.h>
10 #include <rte_malloc.h>
11 #include <rte_memory.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
17 #include <rte_alarm.h>
19 #include <mlx5_common.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 #include <mlx5_glue.h>
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
28 #define MLX5_VDPA_ERROR_TIME_SEC 3u
31 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
33 mlx5_devx_uar_release(&priv->uar);
34 #ifdef HAVE_IBV_DEVX_EVENT
36 mlx5_os_devx_destroy_event_channel(priv->eventc);
42 /* Prepare all the global resources for all the event objects.*/
44 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
46 priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,
47 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
50 DRV_LOG(ERR, "Failed to create event channel %d.",
54 if (mlx5_devx_uar_prepare(priv->cdev, &priv->uar) != 0) {
55 DRV_LOG(ERR, "Failed to allocate UAR.");
60 mlx5_vdpa_event_qp_global_release(priv);
65 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
67 mlx5_devx_cq_destroy(&cq->cq_obj);
68 memset(cq, 0, sizeof(*cq));
71 static inline void __rte_unused
72 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
74 uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
75 uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
76 uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
77 uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
78 uint64_t db_be = rte_cpu_to_be_64(doorbell);
80 mlx5_doorbell_ring(&priv->uar.cq_db, db_be, doorbell_hi,
81 &cq->cq_obj.db_rec[MLX5_CQ_ARM_DB], 0);
87 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
88 int callfd, struct mlx5_vdpa_virtq *virtq)
90 struct mlx5_devx_cq_attr attr = {
92 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
94 struct mlx5_vdpa_cq *cq = &virtq->eqp.cq;
95 uint16_t event_nums[1] = {0};
98 ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, log_desc_n,
99 &attr, SOCKET_ID_ANY);
103 cq->log_desc_n = log_desc_n;
104 rte_spinlock_init(&cq->sl);
105 /* Subscribe CQ event to the event channel controlled by the driver. */
106 ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc,
110 (uint64_t)(uintptr_t)virtq);
112 DRV_LOG(ERR, "Failed to subscribe CQE event.");
117 /* Init CQ to ones to be in HW owner in the start. */
118 cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
119 cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
121 mlx5_vdpa_cq_arm(priv, cq);
124 mlx5_vdpa_cq_destroy(cq);
128 static inline uint32_t
129 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
131 struct mlx5_vdpa_event_qp *eqp =
132 container_of(cq, struct mlx5_vdpa_event_qp, cq);
133 const unsigned int cq_size = 1 << cq->log_desc_n;
136 uint16_t wqe_counter;
142 uint16_t next_wqe_counter = eqp->qp_pi;
143 uint16_t cur_wqe_counter;
146 last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
147 cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
148 comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
151 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
153 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
155 MLX5_CQE_OPCODE(last_word.op_own) ==
159 /* Ring CQ doorbell record. */
160 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
163 /* Ring SW QP doorbell record. */
164 eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(eqp->qp_pi + cq_size);
170 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
172 struct mlx5_vdpa_virtq *virtq;
173 struct mlx5_vdpa_cq *cq;
176 for (i = 0; i < priv->nr_virtqs; i++) {
177 virtq = &priv->virtqs[i];
178 pthread_mutex_lock(&virtq->virtq_lock);
179 cq = &priv->virtqs[i].eqp.cq;
180 if (cq->cq_obj.cq && !cq->armed)
181 mlx5_vdpa_cq_arm(priv, cq);
182 pthread_mutex_unlock(&virtq->virtq_lock);
187 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
189 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
192 priv->timer_delay_us += priv->event_us;
197 priv->timer_delay_us /= max;
201 if (priv->timer_delay_us)
202 usleep(priv->timer_delay_us);
204 /* Give-up CPU to improve polling threads scheduling. */
208 /* Notify virtio device for specific virtq new traffic. */
210 mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
215 comp = mlx5_vdpa_cq_poll(cq);
217 if (cq->callfd != -1)
218 eventfd_write(cq->callfd, (eventfd_t)1);
225 /* Notify virtio device for any virtq new traffic. */
227 mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
229 struct mlx5_vdpa_virtq *virtq;
230 struct mlx5_vdpa_cq *cq;
235 for (i = 0; i < priv->nr_virtqs; i++) {
236 virtq = &priv->virtqs[i];
237 pthread_mutex_lock(&virtq->virtq_lock);
239 comp = mlx5_vdpa_queue_complete(cq);
240 pthread_mutex_unlock(&virtq->virtq_lock);
248 mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)
252 for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
253 struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
255 mlx5_vdpa_queue_complete(cq);
257 cq->cq_obj.cqes[0].wqe_counter =
258 rte_cpu_to_be_16(UINT16_MAX);
259 priv->virtqs[i].eqp.qp_pi = 0;
261 mlx5_vdpa_cq_arm(priv, cq);
266 /* Wait on all CQs channel for completion event. */
267 static struct mlx5_vdpa_virtq *
268 mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
270 #ifdef HAVE_IBV_DEVX_EVENT
272 struct mlx5dv_devx_async_event_hdr event_resp;
273 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
275 int ret = mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
279 return (struct mlx5_vdpa_virtq *)
280 (uintptr_t)out.event_resp.cookie;
281 DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
288 mlx5_vdpa_event_handle(void *arg)
290 struct mlx5_vdpa_priv *priv = arg;
291 struct mlx5_vdpa_virtq *virtq;
294 switch (priv->event_mode) {
295 case MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER:
296 case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
297 priv->timer_delay_us = priv->event_us;
299 max = mlx5_vdpa_queues_complete(priv);
300 if (max == 0 && priv->no_traffic_counter++ >=
301 priv->no_traffic_max) {
302 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
303 priv->vdev->device->name);
304 mlx5_vdpa_arm_all_cqs(priv);
306 virtq = mlx5_vdpa_event_wait(priv);
311 if (mlx5_vdpa_queue_complete(
312 &virtq->eqp.cq) > 0) {
313 pthread_mutex_unlock(
317 pthread_mutex_unlock(
320 priv->timer_delay_us = priv->event_us;
321 priv->no_traffic_counter = 0;
322 } else if (max != 0) {
323 priv->no_traffic_counter = 0;
325 mlx5_vdpa_timer_sleep(priv, max);
328 case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
330 virtq = mlx5_vdpa_event_wait(priv);
332 pthread_mutex_lock(&virtq->virtq_lock);
333 if (mlx5_vdpa_queue_complete(
335 mlx5_vdpa_cq_arm(priv, &virtq->eqp.cq);
336 pthread_mutex_unlock(&virtq->virtq_lock);
346 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
348 #ifdef HAVE_IBV_DEVX_EVENT
349 struct mlx5_vdpa_priv *priv = cb_arg;
351 struct mlx5dv_devx_async_event_hdr event_resp;
352 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
354 uint32_t vq_index, i, version;
355 struct mlx5_vdpa_virtq *virtq;
358 while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
360 (ssize_t)sizeof(out.event_resp.cookie)) {
361 vq_index = out.event_resp.cookie & UINT32_MAX;
362 version = out.event_resp.cookie >> 32;
363 if (vq_index >= priv->nr_virtqs) {
364 DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
365 priv->vdev->device->name, vq_index);
368 virtq = &priv->virtqs[vq_index];
369 pthread_mutex_lock(&virtq->virtq_lock);
370 if (!virtq->enable || virtq->version != version)
372 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
374 virtq->stopped = true;
375 /* Query error info. */
376 if (mlx5_vdpa_virtq_query(priv, vq_index))
379 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
380 DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
383 /* Retry if error happens less than N times in 3 seconds. */
384 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
385 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
387 if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
388 DRV_LOG(ERR, "Failed to enable virtq %d.",
391 DRV_LOG(WARNING, "Recover virtq %d: %u.",
392 vq_index, ++virtq->n_retry);
394 /* Retry timeout, give up. */
395 DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
396 priv->vdev->device->name, vq_index);
399 /* Shift in current time to error time log end. */
400 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
401 virtq->err_time[i - 1] = virtq->err_time[i];
402 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
404 pthread_mutex_unlock(&virtq->virtq_lock);
410 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
415 /* Setup device event channel. */
416 priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->cdev->ctx,
418 if (!priv->err_chnl) {
420 DRV_LOG(ERR, "Failed to create device event channel %d.",
424 flags = fcntl(priv->err_chnl->fd, F_GETFL);
425 ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
428 DRV_LOG(ERR, "Failed to change device event channel FD.");
431 priv->err_intr_handle =
432 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
433 if (priv->err_intr_handle == NULL) {
434 DRV_LOG(ERR, "Fail to allocate intr_handle");
437 if (rte_intr_fd_set(priv->err_intr_handle, priv->err_chnl->fd))
440 if (rte_intr_type_set(priv->err_intr_handle, RTE_INTR_HANDLE_EXT))
443 ret = rte_intr_callback_register(priv->err_intr_handle,
444 mlx5_vdpa_err_interrupt_handler,
447 rte_intr_fd_set(priv->err_intr_handle, 0);
448 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
453 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
458 mlx5_vdpa_err_event_unset(priv);
463 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
465 int retries = MLX5_VDPA_INTR_RETRIES;
468 if (!rte_intr_fd_get(priv->err_intr_handle))
470 while (retries-- && ret == -EAGAIN) {
471 ret = rte_intr_callback_unregister(priv->err_intr_handle,
472 mlx5_vdpa_err_interrupt_handler,
474 if (ret == -EAGAIN) {
475 DRV_LOG(DEBUG, "Try again to unregister fd %d "
476 "of error interrupt, retries = %d.",
477 rte_intr_fd_get(priv->err_intr_handle),
482 if (priv->err_chnl) {
483 #ifdef HAVE_IBV_DEVX_EVENT
485 struct mlx5dv_devx_async_event_hdr event_resp;
486 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
490 /* Clean all pending events. */
491 while (mlx5_glue->devx_get_event(priv->err_chnl,
492 &out.event_resp, sizeof(out.buf)) >=
493 (ssize_t)sizeof(out.event_resp.cookie))
496 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
497 priv->err_chnl = NULL;
499 rte_intr_instance_free(priv->err_intr_handle);
503 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
509 const struct sched_param sp = {
510 .sched_priority = sched_get_priority_max(SCHED_RR),
514 /* All virtqs are in poll mode. */
516 pthread_attr_init(&attr);
517 ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
519 DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
522 ret = pthread_attr_setschedparam(&attr, &sp);
524 DRV_LOG(ERR, "Failed to set thread priority.");
527 ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
530 DRV_LOG(ERR, "Failed to create timer thread.");
534 if (priv->event_core != -1)
535 CPU_SET(priv->event_core, &cpuset);
537 cpuset = rte_lcore_cpuset(rte_get_main_lcore());
538 ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
540 DRV_LOG(ERR, "Failed to set thread affinity.");
543 snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
544 ret = rte_thread_setname(priv->timer_tid, name);
546 DRV_LOG(DEBUG, "Cannot set timer thread name.");
551 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
553 struct mlx5_vdpa_virtq *virtq;
557 if (priv->timer_tid) {
558 pthread_cancel(priv->timer_tid);
559 pthread_join(priv->timer_tid, &status);
560 /* The mutex may stay locked after event thread cancel, initiate it. */
561 for (i = 0; i < priv->nr_virtqs; i++) {
562 virtq = &priv->virtqs[i];
563 pthread_mutex_init(&virtq->virtq_lock, NULL);
570 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
572 mlx5_devx_qp_destroy(&eqp->sw_qp);
574 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
575 mlx5_vdpa_cq_destroy(&eqp->cq);
576 memset(eqp, 0, sizeof(*eqp));
580 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
582 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
583 eqp->sw_qp.qp->id)) {
584 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
588 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
589 MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
590 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
594 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
595 eqp->sw_qp.qp->id)) {
596 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
600 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
601 MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
602 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
606 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
607 eqp->sw_qp.qp->id)) {
608 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
612 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
614 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
622 mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)
624 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_QP_2RST,
625 eqp->sw_qp.qp->id)) {
626 DRV_LOG(ERR, "Failed to modify FW QP to RST state(%u).",
630 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
631 MLX5_CMD_OP_QP_2RST, eqp->fw_qp->id)) {
632 DRV_LOG(ERR, "Failed to modify SW QP to RST state(%u).",
636 return mlx5_vdpa_qps2rts(eqp);
640 mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
641 int callfd, struct mlx5_vdpa_virtq *virtq)
643 struct mlx5_vdpa_event_qp *eqp = &virtq->eqp;
644 struct mlx5_devx_qp_attr attr = {0};
645 uint16_t log_desc_n = rte_log2_u32(desc_n);
648 if (eqp->cq.cq_obj.cq != NULL && log_desc_n == eqp->cq.log_desc_n) {
649 /* Reuse existing resources. */
650 eqp->cq.callfd = callfd;
651 /* FW will set event qp to error state in q destroy. */
652 if (!mlx5_vdpa_qps2rst2rts(eqp)) {
653 rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
654 &eqp->sw_qp.db_rec[0]);
659 mlx5_vdpa_event_qp_destroy(eqp);
660 if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, virtq) ||
663 attr.pd = priv->cdev->pdn;
665 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
666 eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
668 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
671 attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);
672 attr.cqn = eqp->cq.cq_obj.cq->id;
673 attr.num_of_receive_wqes = RTE_BIT32(log_desc_n);
674 attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
675 attr.num_of_send_wqbbs = 0; /* No need SQ. */
677 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
678 ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp),
679 attr.num_of_receive_wqes * MLX5_WSEG_SIZE,
680 &attr, SOCKET_ID_ANY);
682 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
685 if (mlx5_vdpa_qps2rts(eqp))
689 if (eqp->sw_qp.db_rec)
690 rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
691 &eqp->sw_qp.db_rec[0]);
694 mlx5_vdpa_event_qp_destroy(eqp);