1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
8 #include <sys/eventfd.h>
10 #include <rte_malloc.h>
11 #include <rte_memory.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
17 #include <rte_alarm.h>
19 #include <mlx5_common.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 #include <mlx5_glue.h>
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
28 #define MLX5_VDPA_ERROR_TIME_SEC 3u
31 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
34 mlx5_glue->devx_free_uar(priv->uar);
37 #ifdef HAVE_IBV_DEVX_EVENT
39 mlx5_os_devx_destroy_event_channel(priv->eventc);
45 /* Prepare all the global resources for all the event objects.*/
47 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
51 priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,
52 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
55 DRV_LOG(ERR, "Failed to create event channel %d.",
60 * This PMD always claims the write memory barrier on UAR
61 * registers writings, it is safe to allocate UAR with any
62 * memory mapping type.
64 priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
67 DRV_LOG(ERR, "Failed to allocate UAR.");
72 mlx5_vdpa_event_qp_global_release(priv);
77 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
79 mlx5_devx_cq_destroy(&cq->cq_obj);
80 memset(cq, 0, sizeof(*cq));
83 static inline void __rte_unused
84 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
86 uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
87 uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
88 uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
89 uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
90 uint64_t db_be = rte_cpu_to_be_64(doorbell);
91 uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
94 cq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
97 *(uint64_t *)addr = db_be;
99 *(uint32_t *)addr = db_be;
101 *((uint32_t *)addr + 1) = db_be >> 32;
108 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
109 int callfd, struct mlx5_vdpa_cq *cq)
111 struct mlx5_devx_cq_attr attr = {
113 .uar_page_id = priv->uar->page_id,
115 uint16_t event_nums[1] = {0};
118 ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, log_desc_n,
119 &attr, SOCKET_ID_ANY);
123 cq->log_desc_n = log_desc_n;
124 rte_spinlock_init(&cq->sl);
125 /* Subscribe CQ event to the event channel controlled by the driver. */
126 ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
128 sizeof(event_nums), event_nums,
129 (uint64_t)(uintptr_t)cq);
131 DRV_LOG(ERR, "Failed to subscribe CQE event.");
136 /* Init CQ to ones to be in HW owner in the start. */
137 cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
138 cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
140 mlx5_vdpa_cq_arm(priv, cq);
143 mlx5_vdpa_cq_destroy(cq);
147 static inline uint32_t
148 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
150 struct mlx5_vdpa_event_qp *eqp =
151 container_of(cq, struct mlx5_vdpa_event_qp, cq);
152 const unsigned int cq_size = 1 << cq->log_desc_n;
155 uint16_t wqe_counter;
161 uint16_t next_wqe_counter = cq->cq_ci;
162 uint16_t cur_wqe_counter;
165 last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
166 cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
167 comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
170 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
172 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
174 MLX5_CQE_OPCODE(last_word.op_own) ==
178 /* Ring CQ doorbell record. */
179 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
181 /* Ring SW QP doorbell record. */
182 eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
188 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
190 struct mlx5_vdpa_cq *cq;
193 for (i = 0; i < priv->nr_virtqs; i++) {
194 cq = &priv->virtqs[i].eqp.cq;
195 if (cq->cq_obj.cq && !cq->armed)
196 mlx5_vdpa_cq_arm(priv, cq);
201 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
203 if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
206 priv->timer_delay_us += priv->event_us;
211 priv->timer_delay_us /= max;
215 if (priv->timer_delay_us)
216 usleep(priv->timer_delay_us);
218 /* Give-up CPU to improve polling threads scheduling. */
222 /* Notify virtio device for specific virtq new traffic. */
224 mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
229 comp = mlx5_vdpa_cq_poll(cq);
231 if (cq->callfd != -1)
232 eventfd_write(cq->callfd, (eventfd_t)1);
239 /* Notify virtio device for any virtq new traffic. */
241 mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
246 for (i = 0; i < priv->nr_virtqs; i++) {
247 struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
248 uint32_t comp = mlx5_vdpa_queue_complete(cq);
256 /* Wait on all CQs channel for completion event. */
257 static struct mlx5_vdpa_cq *
258 mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
260 #ifdef HAVE_IBV_DEVX_EVENT
262 struct mlx5dv_devx_async_event_hdr event_resp;
263 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
265 int ret = mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
269 return (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;
270 DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
277 mlx5_vdpa_event_handle(void *arg)
279 struct mlx5_vdpa_priv *priv = arg;
280 struct mlx5_vdpa_cq *cq;
283 switch (priv->event_mode) {
284 case MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER:
285 case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
286 priv->timer_delay_us = priv->event_us;
288 pthread_mutex_lock(&priv->vq_config_lock);
289 max = mlx5_vdpa_queues_complete(priv);
290 if (max == 0 && priv->no_traffic_counter++ >=
291 priv->no_traffic_max) {
292 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
293 priv->vdev->device->name);
294 mlx5_vdpa_arm_all_cqs(priv);
297 (&priv->vq_config_lock);
298 cq = mlx5_vdpa_event_wait(priv);
300 (&priv->vq_config_lock);
302 mlx5_vdpa_queue_complete(cq) > 0)
305 priv->timer_delay_us = priv->event_us;
306 priv->no_traffic_counter = 0;
307 } else if (max != 0) {
308 priv->no_traffic_counter = 0;
310 pthread_mutex_unlock(&priv->vq_config_lock);
311 mlx5_vdpa_timer_sleep(priv, max);
314 case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
316 cq = mlx5_vdpa_event_wait(priv);
318 pthread_mutex_lock(&priv->vq_config_lock);
319 if (mlx5_vdpa_queue_complete(cq) > 0)
320 mlx5_vdpa_cq_arm(priv, cq);
321 pthread_mutex_unlock(&priv->vq_config_lock);
331 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
333 #ifdef HAVE_IBV_DEVX_EVENT
334 struct mlx5_vdpa_priv *priv = cb_arg;
336 struct mlx5dv_devx_async_event_hdr event_resp;
337 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
339 uint32_t vq_index, i, version;
340 struct mlx5_vdpa_virtq *virtq;
343 pthread_mutex_lock(&priv->vq_config_lock);
344 while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
346 (ssize_t)sizeof(out.event_resp.cookie)) {
347 vq_index = out.event_resp.cookie & UINT32_MAX;
348 version = out.event_resp.cookie >> 32;
349 if (vq_index >= priv->nr_virtqs) {
350 DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
351 priv->vdev->device->name, vq_index);
354 virtq = &priv->virtqs[vq_index];
355 if (!virtq->enable || virtq->version != version)
357 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
359 virtq->stopped = true;
360 /* Query error info. */
361 if (mlx5_vdpa_virtq_query(priv, vq_index))
364 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
365 DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
368 /* Retry if error happens less than N times in 3 seconds. */
369 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
370 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
372 if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
373 DRV_LOG(ERR, "Failed to enable virtq %d.",
376 DRV_LOG(WARNING, "Recover virtq %d: %u.",
377 vq_index, ++virtq->n_retry);
379 /* Retry timeout, give up. */
380 DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
381 priv->vdev->device->name, vq_index);
384 /* Shift in current time to error time log end. */
385 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
386 virtq->err_time[i - 1] = virtq->err_time[i];
387 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
389 pthread_mutex_unlock(&priv->vq_config_lock);
394 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
399 /* Setup device event channel. */
400 priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->cdev->ctx,
402 if (!priv->err_chnl) {
404 DRV_LOG(ERR, "Failed to create device event channel %d.",
408 flags = fcntl(priv->err_chnl->fd, F_GETFL);
409 ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
411 DRV_LOG(ERR, "Failed to change device event channel FD.");
415 if (rte_intr_fd_set(priv->err_intr_handle, priv->err_chnl->fd))
418 if (rte_intr_type_set(priv->err_intr_handle, RTE_INTR_HANDLE_EXT))
421 if (rte_intr_callback_register(priv->err_intr_handle,
422 mlx5_vdpa_err_interrupt_handler,
424 rte_intr_fd_set(priv->err_intr_handle, 0);
425 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
429 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
434 mlx5_vdpa_err_event_unset(priv);
439 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
441 int retries = MLX5_VDPA_INTR_RETRIES;
444 if (!rte_intr_fd_get(priv->err_intr_handle))
446 while (retries-- && ret == -EAGAIN) {
447 ret = rte_intr_callback_unregister(priv->err_intr_handle,
448 mlx5_vdpa_err_interrupt_handler,
450 if (ret == -EAGAIN) {
451 DRV_LOG(DEBUG, "Try again to unregister fd %d "
452 "of error interrupt, retries = %d.",
453 rte_intr_fd_get(priv->err_intr_handle),
458 if (priv->err_chnl) {
459 #ifdef HAVE_IBV_DEVX_EVENT
461 struct mlx5dv_devx_async_event_hdr event_resp;
462 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
466 /* Clean all pending events. */
467 while (mlx5_glue->devx_get_event(priv->err_chnl,
468 &out.event_resp, sizeof(out.buf)) >=
469 (ssize_t)sizeof(out.event_resp.cookie))
472 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
473 priv->err_chnl = NULL;
478 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
484 const struct sched_param sp = {
485 .sched_priority = sched_get_priority_max(SCHED_RR),
489 /* All virtqs are in poll mode. */
491 pthread_attr_init(&attr);
492 ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
494 DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
497 ret = pthread_attr_setschedparam(&attr, &sp);
499 DRV_LOG(ERR, "Failed to set thread priority.");
502 ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
505 DRV_LOG(ERR, "Failed to create timer thread.");
509 if (priv->event_core != -1)
510 CPU_SET(priv->event_core, &cpuset);
512 cpuset = rte_lcore_cpuset(rte_get_main_lcore());
513 ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
515 DRV_LOG(ERR, "Failed to set thread affinity.");
518 snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
519 ret = rte_thread_setname(priv->timer_tid, name);
521 DRV_LOG(DEBUG, "Cannot set timer thread name.");
526 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
530 if (priv->timer_tid) {
531 pthread_cancel(priv->timer_tid);
532 pthread_join(priv->timer_tid, &status);
538 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
540 mlx5_devx_qp_destroy(&eqp->sw_qp);
542 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
543 mlx5_vdpa_cq_destroy(&eqp->cq);
544 memset(eqp, 0, sizeof(*eqp));
548 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
550 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
551 eqp->sw_qp.qp->id)) {
552 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
556 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
557 MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
558 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
562 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
563 eqp->sw_qp.qp->id)) {
564 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
568 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
569 MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
570 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
574 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
575 eqp->sw_qp.qp->id)) {
576 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
580 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
582 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
590 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
591 int callfd, struct mlx5_vdpa_event_qp *eqp)
593 struct mlx5_devx_qp_attr attr = {0};
594 uint16_t log_desc_n = rte_log2_u32(desc_n);
597 if (mlx5_vdpa_event_qp_global_prepare(priv))
599 if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
601 attr.pd = priv->cdev->pdn;
603 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
604 eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
606 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
609 attr.uar_index = priv->uar->page_id;
610 attr.cqn = eqp->cq.cq_obj.cq->id;
611 attr.rq_size = RTE_BIT32(log_desc_n);
612 attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
613 attr.sq_size = 0; /* No need SQ. */
615 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
616 ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp), log_desc_n,
617 &attr, SOCKET_ID_ANY);
619 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
622 if (mlx5_vdpa_qps2rts(eqp))
625 rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
626 &eqp->sw_qp.db_rec[0]);
629 mlx5_vdpa_event_qp_destroy(eqp);