1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
7 #include <sys/eventfd.h>
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_atomic.h>
13 #include <rte_common.h>
16 #include <mlx5_common.h>
18 #include "mlx5_vdpa_utils.h"
19 #include "mlx5_vdpa.h"
23 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
26 mlx5_glue->devx_free_uar(priv->uar);
30 mlx5_glue->devx_destroy_event_channel(priv->eventc);
36 /* Prepare all the global resources for all the event objects.*/
38 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
44 lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
45 if (mlx5_glue->devx_query_eqn(priv->ctx, lcore, &priv->eqn)) {
47 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
50 priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
51 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
54 DRV_LOG(ERR, "Failed to create event channel %d.",
58 priv->uar = mlx5_glue->devx_alloc_uar(priv->ctx, 0);
61 DRV_LOG(ERR, "Failed to allocate UAR.");
66 mlx5_vdpa_event_qp_global_release(priv);
71 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
74 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
76 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
78 rte_free((void *)(uintptr_t)cq->umem_buf);
79 memset(cq, 0, sizeof(*cq));
83 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
85 uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
86 uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
87 uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
88 uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
89 uint64_t db_be = rte_cpu_to_be_64(doorbell);
90 uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
93 cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
96 *(uint64_t *)addr = db_be;
98 *(uint32_t *)addr = db_be;
100 *((uint32_t *)addr + 1) = db_be >> 32;
106 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
107 int callfd, struct mlx5_vdpa_cq *cq)
109 struct mlx5_devx_cq_attr attr;
110 size_t pgsize = sysconf(_SC_PAGESIZE);
113 uint16_t event_nums[1] = {0};
115 cq->log_desc_n = log_desc_n;
116 umem_size = sizeof(struct mlx5_cqe) * (1 << log_desc_n) +
117 sizeof(*cq->db_rec) * 2;
118 cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
120 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
124 cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
125 (void *)(uintptr_t)cq->umem_buf,
127 IBV_ACCESS_LOCAL_WRITE);
129 DRV_LOG(ERR, "Failed to register umem for CQ.");
132 attr.q_umem_valid = 1;
133 attr.db_umem_valid = 1;
134 attr.use_first_only = 0;
135 attr.overrun_ignore = 0;
136 attr.uar_page_id = priv->uar->page_id;
137 attr.q_umem_id = cq->umem_obj->umem_id;
138 attr.q_umem_offset = 0;
139 attr.db_umem_id = cq->umem_obj->umem_id;
140 attr.db_umem_offset = sizeof(struct mlx5_cqe) * (1 << log_desc_n);
141 attr.eqn = priv->eqn;
142 attr.log_cq_size = log_desc_n;
143 attr.log_page_size = rte_log2_u32(pgsize);
144 cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
147 cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
149 rte_spinlock_init(&cq->sl);
150 /* Subscribe CQ event to the event channel controlled by the driver. */
151 ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
154 (uint64_t)(uintptr_t)cq);
156 DRV_LOG(ERR, "Failed to subscribe CQE event.");
161 /* Init CQ to ones to be in HW owner in the start. */
162 memset((void *)(uintptr_t)cq->umem_buf, 0xFF, attr.db_umem_offset);
164 mlx5_vdpa_cq_arm(priv, cq);
167 mlx5_vdpa_cq_destroy(cq);
171 static inline void __rte_unused
172 mlx5_vdpa_cq_poll(struct mlx5_vdpa_priv *priv __rte_unused,
173 struct mlx5_vdpa_cq *cq)
175 struct mlx5_vdpa_event_qp *eqp =
176 container_of(cq, struct mlx5_vdpa_event_qp, cq);
177 const unsigned int cq_size = 1 << cq->log_desc_n;
178 const unsigned int cq_mask = cq_size - 1;
182 volatile struct mlx5_cqe *cqe = cq->cqes + (cq->cq_ci &
185 ret = check_cqe(cqe, cq_size, cq->cq_ci);
187 case MLX5_CQE_STATUS_ERR:
190 case MLX5_CQE_STATUS_SW_OWN:
193 case MLX5_CQE_STATUS_HW_OWN:
197 } while (ret != MLX5_CQE_STATUS_HW_OWN);
199 /* Ring CQ doorbell record. */
200 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
202 /* Ring SW QP doorbell record. */
203 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
207 mlx5_vdpa_interrupt_handler(void *cb_arg)
209 #ifndef HAVE_IBV_DEVX_EVENT
213 struct mlx5_vdpa_priv *priv = cb_arg;
215 struct mlx5dv_devx_async_event_hdr event_resp;
216 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
219 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
221 (ssize_t)sizeof(out.event_resp.cookie)) {
222 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
223 (uintptr_t)out.event_resp.cookie;
224 rte_spinlock_lock(&cq->sl);
225 mlx5_vdpa_cq_poll(priv, cq);
226 mlx5_vdpa_cq_arm(priv, cq);
227 if (cq->callfd != -1)
228 /* Notify guest for descriptors consuming. */
229 eventfd_write(cq->callfd, (eventfd_t)1);
230 rte_spinlock_unlock(&cq->sl);
231 DRV_LOG(DEBUG, "CQ %d event: new cq_ci = %u.", cq->cq->id,
234 #endif /* HAVE_IBV_DEVX_ASYNC */
238 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
244 /* All virtqs are in poll mode. */
246 flags = fcntl(priv->eventc->fd, F_GETFL);
247 ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
249 DRV_LOG(ERR, "Failed to change event channel FD.");
253 priv->intr_handle.fd = priv->eventc->fd;
254 priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
255 if (rte_intr_callback_register(&priv->intr_handle,
256 mlx5_vdpa_interrupt_handler, priv)) {
257 priv->intr_handle.fd = 0;
258 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
265 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
267 int retries = MLX5_VDPA_INTR_RETRIES;
270 if (priv->intr_handle.fd) {
271 while (retries-- && ret == -EAGAIN) {
272 ret = rte_intr_callback_unregister(&priv->intr_handle,
273 mlx5_vdpa_interrupt_handler,
275 if (ret == -EAGAIN) {
276 DRV_LOG(DEBUG, "Try again to unregister fd %d "
277 "of CQ interrupt, retries = %d.",
278 priv->intr_handle.fd, retries);
279 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
282 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
287 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
290 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
292 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
294 rte_free(eqp->umem_buf);
296 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
297 mlx5_vdpa_cq_destroy(&eqp->cq);
298 memset(eqp, 0, sizeof(*eqp));
302 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
304 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
306 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
310 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
312 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
316 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
318 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
322 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
324 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
328 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
330 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
334 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
336 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
344 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
345 int callfd, struct mlx5_vdpa_event_qp *eqp)
347 struct mlx5_devx_qp_attr attr = {0};
348 uint16_t log_desc_n = rte_log2_u32(desc_n);
349 uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
350 sizeof(*eqp->db_rec) * 2;
352 if (mlx5_vdpa_event_qp_global_prepare(priv))
354 if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
357 eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
359 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
362 eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
363 if (!eqp->umem_buf) {
364 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
368 eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
369 (void *)(uintptr_t)eqp->umem_buf,
371 IBV_ACCESS_LOCAL_WRITE);
372 if (!eqp->umem_obj) {
373 DRV_LOG(ERR, "Failed to register umem for SW QP.");
376 attr.uar_index = priv->uar->page_id;
377 attr.cqn = eqp->cq.cq->id;
378 attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
379 attr.rq_size = 1 << log_desc_n;
380 attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
381 attr.sq_size = 0; /* No need SQ. */
382 attr.dbr_umem_valid = 1;
383 attr.wq_umem_id = eqp->umem_obj->umem_id;
384 attr.wq_umem_offset = 0;
385 attr.dbr_umem_id = eqp->umem_obj->umem_id;
386 attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
387 eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
389 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
392 eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
393 if (mlx5_vdpa_qps2rts(eqp))
396 rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
399 mlx5_vdpa_event_qp_destroy(eqp);