net/mlx5: reposition event queue number field
authorMichael Baum <michaelba@nvidia.com>
Thu, 1 Oct 2020 14:09:17 +0000 (14:09 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 9 Oct 2020 11:17:42 +0000 (13:17 +0200)
The eqn field has become a field of sh directly since it is also
relevant for Tx and Rx.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_devx.c
drivers/net/mlx5/mlx5_txpp.c
drivers/net/mlx5/mlx5_txq.c

index 01ead6e..e5ca392 100644 (file)
@@ -925,6 +925,16 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                goto error;
        }
        if (sh->devx) {
+               uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
+
+               /* Query the EQN for this core. */
+               err = mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->eqn);
+               if (err) {
+                       rte_errno = errno;
+                       DRV_LOG(ERR, "Failed to query event queue number %d.",
+                               rte_errno);
+                       goto error;
+               }
                err = mlx5_os_get_pdn(sh->pd, &sh->pdn);
                if (err) {
                        DRV_LOG(ERR, "Fail to extract pdn from PD");
index bd91e16..050d3a9 100644 (file)
@@ -561,7 +561,6 @@ struct mlx5_dev_txpp {
        uint32_t tick; /* Completion tick duration in nanoseconds. */
        uint32_t test; /* Packet pacing test mode. */
        int32_t skew; /* Scheduling skew. */
-       uint32_t eqn; /* Event Queue number. */
        struct rte_intr_handle intr_handle; /* Periodic interrupt. */
        void *echan; /* Event Channel. */
        struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
@@ -603,6 +602,7 @@ struct mlx5_dev_ctx_shared {
        LIST_ENTRY(mlx5_dev_ctx_shared) next;
        uint32_t refcnt;
        uint32_t devx:1; /* Opened with DV. */
+       uint32_t eqn; /* Event Queue number. */
        uint32_t max_port; /* Maximal IB device port index. */
        void *ctx; /* Verbs/DV/DevX context. */
        void *pd; /* Protection Domain. */
index cb4a522..cddfe43 100644 (file)
@@ -350,11 +350,9 @@ rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
        struct mlx5_rxq_ctrl *rxq_ctrl =
                container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
        size_t page_size = rte_mem_page_size();
-       uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
        unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
        struct mlx5_devx_dbr_page *dbr_page;
        int64_t dbr_offset;
-       uint32_t eqn = 0;
        void *buf = NULL;
        uint16_t event_nums[1] = {0};
        uint32_t log_cqe_n;
@@ -392,12 +390,6 @@ rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
                cq_attr.cqe_size = MLX5_CQE_SIZE_128B;
        log_cqe_n = log2above(cqe_n);
        cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
-       /* Query the EQN for this core. */
-       if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
-               DRV_LOG(ERR, "Failed to query EQN for CQ.");
-               goto error;
-       }
-       cq_attr.eqn = eqn;
        buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
                                rxq_ctrl->socket);
        if (!buf) {
@@ -425,6 +417,7 @@ rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
        rxq_data->cq_uar =
                        mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
        /* Create CQ using DevX API. */
+       cq_attr.eqn = priv->sh->eqn;
        cq_attr.uar_page_id =
                        mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
        cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
index 011e479..37355fa 100644 (file)
@@ -31,36 +31,24 @@ static const char * const mlx5_txpp_stat_names[] = {
 
 /* Destroy Event Queue Notification Channel. */
 static void
-mlx5_txpp_destroy_eqn(struct mlx5_dev_ctx_shared *sh)
+mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh)
 {
        if (sh->txpp.echan) {
                mlx5_glue->devx_destroy_event_channel(sh->txpp.echan);
                sh->txpp.echan = NULL;
        }
-       sh->txpp.eqn = 0;
 }
 
 /* Create Event Queue Notification Channel. */
 static int
-mlx5_txpp_create_eqn(struct mlx5_dev_ctx_shared *sh)
+mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
 {
-       uint32_t lcore;
-
        MLX5_ASSERT(!sh->txpp.echan);
-       lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
-       if (mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->txpp.eqn)) {
-               rte_errno = errno;
-               DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
-               sh->txpp.eqn = 0;
-               return -rte_errno;
-       }
        sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx,
                        MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
        if (!sh->txpp.echan) {
-               sh->txpp.eqn = 0;
                rte_errno = errno;
-               DRV_LOG(ERR, "Failed to create event channel %d.",
-                       rte_errno);
+               DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno);
                return -rte_errno;
        }
        return 0;
@@ -285,7 +273,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
        cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
                            MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
        cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
-       cq_attr.eqn = sh->txpp.eqn;
+       cq_attr.eqn = sh->eqn;
        cq_attr.q_umem_valid = 1;
        cq_attr.q_umem_offset = 0;
        cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
@@ -525,7 +513,7 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
        cq_attr.use_first_only = 1;
        cq_attr.overrun_ignore = 1;
        cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
-       cq_attr.eqn = sh->txpp.eqn;
+       cq_attr.eqn = sh->eqn;
        cq_attr.q_umem_valid = 1;
        cq_attr.q_umem_offset = 0;
        cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
@@ -951,7 +939,7 @@ mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
        sh->txpp.test = !!(tx_pp < 0);
        sh->txpp.skew = priv->config.tx_skew;
        sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
-       ret = mlx5_txpp_create_eqn(sh);
+       ret = mlx5_txpp_create_event_channel(sh);
        if (ret)
                goto exit;
        ret = mlx5_txpp_alloc_pp_index(sh);
@@ -972,7 +960,7 @@ exit:
                mlx5_txpp_destroy_rearm_queue(sh);
                mlx5_txpp_destroy_clock_queue(sh);
                mlx5_txpp_free_pp_index(sh);
-               mlx5_txpp_destroy_eqn(sh);
+               mlx5_txpp_destroy_event_channel(sh);
                sh->txpp.tick = 0;
                sh->txpp.test = 0;
                sh->txpp.skew = 0;
@@ -994,7 +982,7 @@ mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
        mlx5_txpp_destroy_rearm_queue(sh);
        mlx5_txpp_destroy_clock_queue(sh);
        mlx5_txpp_free_pp_index(sh);
-       mlx5_txpp_destroy_eqn(sh);
+       mlx5_txpp_destroy_event_channel(sh);
        sh->txpp.tick = 0;
        sh->txpp.test = 0;
        sh->txpp.skew = 0;
index f2ecfc4..c678971 100644 (file)
@@ -985,7 +985,7 @@ mlx5_devx_cq_new(struct rte_eth_dev *dev, uint32_t cqe_n, uint16_t idx,
        cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
                            MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
        cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
-       cq_attr.eqn = priv->sh->txpp.eqn;
+       cq_attr.eqn = priv->sh->eqn;
        cq_attr.q_umem_valid = 1;
        cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
        cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);