1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
8 #include <rte_malloc.h>
10 #include <rte_lcore.h>
11 #include <rte_atomic.h>
12 #include <rte_common.h>
15 #include <mlx5_common.h>
17 #include "mlx5_vdpa_utils.h"
18 #include "mlx5_vdpa.h"
22 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
25 mlx5_glue->devx_free_uar(priv->uar);
29 mlx5_glue->devx_destroy_event_channel(priv->eventc);
35 /* Prepare all the global resources for all the event objects.*/
37 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
43 lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
44 if (mlx5_glue->devx_query_eqn(priv->ctx, lcore, &priv->eqn)) {
46 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
49 priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
50 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
53 DRV_LOG(ERR, "Failed to create event channel %d.",
57 priv->uar = mlx5_glue->devx_alloc_uar(priv->ctx, 0);
60 DRV_LOG(ERR, "Failed to allocate UAR.");
65 mlx5_vdpa_event_qp_global_release(priv);
70 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
73 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
75 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
77 rte_free((void *)(uintptr_t)cq->umem_buf);
78 memset(cq, 0, sizeof(*cq));
82 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
84 uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
85 uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
86 uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
87 uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
88 uint64_t db_be = rte_cpu_to_be_64(doorbell);
89 uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
92 cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
95 *(uint64_t *)addr = db_be;
97 *(uint32_t *)addr = db_be;
99 *((uint32_t *)addr + 1) = db_be >> 32;
105 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
106 int callfd, struct mlx5_vdpa_cq *cq)
108 struct mlx5_devx_cq_attr attr;
109 size_t pgsize = sysconf(_SC_PAGESIZE);
112 uint16_t event_nums[1] = {0};
114 cq->log_desc_n = log_desc_n;
115 umem_size = sizeof(struct mlx5_cqe) * (1 << log_desc_n) +
116 sizeof(*cq->db_rec) * 2;
117 cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
119 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
123 cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
124 (void *)(uintptr_t)cq->umem_buf,
126 IBV_ACCESS_LOCAL_WRITE);
128 DRV_LOG(ERR, "Failed to register umem for CQ.");
131 attr.q_umem_valid = 1;
132 attr.db_umem_valid = 1;
133 attr.use_first_only = 0;
134 attr.overrun_ignore = 0;
135 attr.uar_page_id = priv->uar->page_id;
136 attr.q_umem_id = cq->umem_obj->umem_id;
137 attr.q_umem_offset = 0;
138 attr.db_umem_id = cq->umem_obj->umem_id;
139 attr.db_umem_offset = sizeof(struct mlx5_cqe) * (1 << log_desc_n);
140 attr.eqn = priv->eqn;
141 attr.log_cq_size = log_desc_n;
142 attr.log_page_size = rte_log2_u32(pgsize);
143 cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
146 cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
148 rte_spinlock_init(&cq->sl);
149 /* Subscribe CQ event to the event channel controlled by the driver. */
150 ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
153 (uint64_t)(uintptr_t)cq);
155 DRV_LOG(ERR, "Failed to subscribe CQE event.");
159 /* Subscribe CQ event to the guest FD only if it is not in poll mode. */
161 ret = mlx5_glue->devx_subscribe_devx_event_fd(priv->eventc,
165 DRV_LOG(ERR, "Failed to subscribe CQE event fd.");
171 mlx5_vdpa_cq_arm(priv, cq);
174 mlx5_vdpa_cq_destroy(cq);
178 static inline void __rte_unused
179 mlx5_vdpa_cq_poll(struct mlx5_vdpa_priv *priv __rte_unused,
180 struct mlx5_vdpa_cq *cq)
182 struct mlx5_vdpa_event_qp *eqp =
183 container_of(cq, struct mlx5_vdpa_event_qp, cq);
184 const unsigned int cq_size = 1 << cq->log_desc_n;
185 const unsigned int cq_mask = cq_size - 1;
189 volatile struct mlx5_cqe *cqe = cq->cqes + (cq->cq_ci &
192 ret = check_cqe(cqe, cq_size, cq->cq_ci);
194 case MLX5_CQE_STATUS_ERR:
197 case MLX5_CQE_STATUS_SW_OWN:
200 case MLX5_CQE_STATUS_HW_OWN:
204 } while (ret != MLX5_CQE_STATUS_HW_OWN);
206 /* Ring CQ doorbell record. */
207 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
209 /* Ring SW QP doorbell record. */
210 eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
214 mlx5_vdpa_interrupt_handler(void *cb_arg)
216 #ifndef HAVE_IBV_DEVX_EVENT
220 struct mlx5_vdpa_priv *priv = cb_arg;
222 struct mlx5dv_devx_async_event_hdr event_resp;
223 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
226 while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
228 (ssize_t)sizeof(out.event_resp.cookie)) {
229 struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
230 (uintptr_t)out.event_resp.cookie;
231 rte_spinlock_lock(&cq->sl);
232 mlx5_vdpa_cq_poll(priv, cq);
233 mlx5_vdpa_cq_arm(priv, cq);
234 rte_spinlock_unlock(&cq->sl);
235 DRV_LOG(DEBUG, "CQ %d event: new cq_ci = %u.", cq->cq->id,
238 #endif /* HAVE_IBV_DEVX_ASYNC */
242 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
244 int flags = fcntl(priv->eventc->fd, F_GETFL);
245 int ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
247 DRV_LOG(ERR, "Failed to change event channel FD.");
251 priv->intr_handle.fd = priv->eventc->fd;
252 priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
253 if (rte_intr_callback_register(&priv->intr_handle,
254 mlx5_vdpa_interrupt_handler, priv)) {
255 priv->intr_handle.fd = 0;
256 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
263 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
265 int retries = MLX5_VDPA_INTR_RETRIES;
268 if (priv->intr_handle.fd) {
269 while (retries-- && ret == -EAGAIN) {
270 ret = rte_intr_callback_unregister(&priv->intr_handle,
271 mlx5_vdpa_interrupt_handler,
273 if (ret == -EAGAIN) {
274 DRV_LOG(DEBUG, "Try again to unregister fd %d "
275 "of CQ interrupt, retries = %d.",
276 priv->intr_handle.fd, retries);
277 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
280 memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
285 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
288 claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
290 claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
292 rte_free(eqp->umem_buf);
294 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
295 mlx5_vdpa_cq_destroy(&eqp->cq);
296 memset(eqp, 0, sizeof(*eqp));
300 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
302 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
304 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
308 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
310 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
314 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
316 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
320 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
322 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
326 if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
328 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
332 if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
334 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
342 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
343 int callfd, struct mlx5_vdpa_event_qp *eqp)
345 struct mlx5_devx_qp_attr attr = {0};
346 uint16_t log_desc_n = rte_log2_u32(desc_n);
347 uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
348 sizeof(*eqp->db_rec) * 2;
350 if (mlx5_vdpa_event_qp_global_prepare(priv))
352 if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
355 eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
357 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
360 eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
361 if (!eqp->umem_buf) {
362 DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
366 eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
367 (void *)(uintptr_t)eqp->umem_buf,
369 IBV_ACCESS_LOCAL_WRITE);
370 if (!eqp->umem_obj) {
371 DRV_LOG(ERR, "Failed to register umem for SW QP.");
374 attr.uar_index = priv->uar->page_id;
375 attr.cqn = eqp->cq.cq->id;
376 attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
377 attr.rq_size = 1 << log_desc_n;
378 attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
379 attr.sq_size = 0; /* No need SQ. */
380 attr.dbr_umem_valid = 1;
381 attr.wq_umem_id = eqp->umem_obj->umem_id;
382 attr.wq_umem_offset = 0;
383 attr.dbr_umem_id = eqp->umem_obj->umem_id;
384 attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
385 eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
387 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
390 eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
391 if (mlx5_vdpa_qps2rts(eqp))
394 rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
397 mlx5_vdpa_event_qp_destroy(eqp);