goto error;
}
if (sh->devx) {
+ uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
+
+ /* Query the EQN for this core. */
+ err = mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->eqn);
+ if (err) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to query event queue number %d.",
+ rte_errno);
+ goto error;
+ }
err = mlx5_os_get_pdn(sh->pd, &sh->pdn);
if (err) {
DRV_LOG(ERR, "Fail to extract pdn from PD");
uint32_t tick; /* Completion tick duration in nanoseconds. */
uint32_t test; /* Packet pacing test mode. */
int32_t skew; /* Scheduling skew. */
- uint32_t eqn; /* Event Queue number. */
struct rte_intr_handle intr_handle; /* Periodic interrupt. */
void *echan; /* Event Channel. */
struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
LIST_ENTRY(mlx5_dev_ctx_shared) next;
uint32_t refcnt;
uint32_t devx:1; /* Opened with DV. */
+ uint32_t eqn; /* Event Queue number. */
uint32_t max_port; /* Maximal IB device port index. */
void *ctx; /* Verbs/DV/DevX context. */
void *pd; /* Protection Domain. */
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
size_t page_size = rte_mem_page_size();
- uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
struct mlx5_devx_dbr_page *dbr_page;
int64_t dbr_offset;
- uint32_t eqn = 0;
void *buf = NULL;
uint16_t event_nums[1] = {0};
uint32_t log_cqe_n;
cq_attr.cqe_size = MLX5_CQE_SIZE_128B;
log_cqe_n = log2above(cqe_n);
cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
- /* Query the EQN for this core. */
- if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
- DRV_LOG(ERR, "Failed to query EQN for CQ.");
- goto error;
- }
- cq_attr.eqn = eqn;
buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
rxq_ctrl->socket);
if (!buf) {
rxq_data->cq_uar =
mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
/* Create CQ using DevX API. */
+ cq_attr.eqn = priv->sh->eqn;
cq_attr.uar_page_id =
mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
/* Destroy Event Queue Notification Channel. */
static void
-mlx5_txpp_destroy_eqn(struct mlx5_dev_ctx_shared *sh)
+mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh)
{
if (sh->txpp.echan) {
mlx5_glue->devx_destroy_event_channel(sh->txpp.echan);
sh->txpp.echan = NULL;
}
- sh->txpp.eqn = 0;
}
/* Create Event Queue Notification Channel. */
static int
-mlx5_txpp_create_eqn(struct mlx5_dev_ctx_shared *sh)
+mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
{
- uint32_t lcore;
-
MLX5_ASSERT(!sh->txpp.echan);
- lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
- if (mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->txpp.eqn)) {
- rte_errno = errno;
- DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
- sh->txpp.eqn = 0;
- return -rte_errno;
- }
sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx,
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
if (!sh->txpp.echan) {
- sh->txpp.eqn = 0;
rte_errno = errno;
- DRV_LOG(ERR, "Failed to create event channel %d.",
- rte_errno);
+ DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno);
return -rte_errno;
}
return 0;
cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
- cq_attr.eqn = sh->txpp.eqn;
+ cq_attr.eqn = sh->eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = 0;
cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
cq_attr.use_first_only = 1;
cq_attr.overrun_ignore = 1;
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
- cq_attr.eqn = sh->txpp.eqn;
+ cq_attr.eqn = sh->eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = 0;
cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
sh->txpp.test = !!(tx_pp < 0);
sh->txpp.skew = priv->config.tx_skew;
sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
- ret = mlx5_txpp_create_eqn(sh);
+ ret = mlx5_txpp_create_event_channel(sh);
if (ret)
goto exit;
ret = mlx5_txpp_alloc_pp_index(sh);
mlx5_txpp_destroy_rearm_queue(sh);
mlx5_txpp_destroy_clock_queue(sh);
mlx5_txpp_free_pp_index(sh);
- mlx5_txpp_destroy_eqn(sh);
+ mlx5_txpp_destroy_event_channel(sh);
sh->txpp.tick = 0;
sh->txpp.test = 0;
sh->txpp.skew = 0;
mlx5_txpp_destroy_rearm_queue(sh);
mlx5_txpp_destroy_clock_queue(sh);
mlx5_txpp_free_pp_index(sh);
- mlx5_txpp_destroy_eqn(sh);
+ mlx5_txpp_destroy_event_channel(sh);
sh->txpp.tick = 0;
sh->txpp.test = 0;
sh->txpp.skew = 0;
cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
- cq_attr.eqn = priv->sh->txpp.eqn;
+ cq_attr.eqn = priv->sh->eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);