]> git.droids-corp.org - dpdk.git/commitdiff
common/mlx5: fix redundant field in MR control structure
authorMichael Baum <michaelba@nvidia.com>
Tue, 16 Nov 2021 14:36:35 +0000 (16:36 +0200)
committerThomas Monjalon <thomas@monjalon.net>
Wed, 17 Nov 2021 09:42:20 +0000 (10:42 +0100)
Inside the MR control structure there is a pointer to the common device.
This pointer enables access to the global cache as well as hardware
objects that may be required in case a new MR needs to be created.

The purpose of adding this pointer into the MR control structure was to
avoid its transfer as a parameter to all the functions of searching MR
in the caches.
However, adding it to this structure increased the Rx and Tx data-path
structures, all the fields that followed it were slightly moved away
which caused to a reduction in performance.

This patch removes the pointer from the structure. It can be accessed
through the "dev_gen_ptr" existing field using the "container_of"
operator.

Fixes: 334ed198ab4d ("common/mlx5: remove redundant parameter in MR search")
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/common/mlx5/mlx5_common_mr.c
drivers/common/mlx5/mlx5_common_mr.h
drivers/compress/mlx5/mlx5_compress.c
drivers/crypto/mlx5/mlx5_crypto.c
drivers/net/mlx5/mlx5_rx.h
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_txq.c
drivers/regex/mlx5/mlx5_regex_control.c

index 49feea4474c8b4496b043b1a18ca1fb5171ea624..1d2a107597c1ae197f5f232156bbaa4fdffb0dc7 100644 (file)
@@ -292,8 +292,8 @@ mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
  *
  * @param mr_ctrl
  *   Pointer to MR control structure.
- * @param cdev
- *   Pointer to the mlx5 device structure.
+ * @param dev_gen_ptr
+ *   Pointer to generation number of global cache.
  * @param socket
  *   NUMA socket on which memory must be allocated.
  *
@@ -301,16 +301,15 @@ mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, struct mlx5_common_device *cdev,
+mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
                  int socket)
 {
        if (mr_ctrl == NULL) {
                rte_errno = EINVAL;
                return -rte_errno;
        }
-       mr_ctrl->cdev = cdev;
        /* Save pointer of global generation number to check memory event. */
-       mr_ctrl->dev_gen_ptr = &cdev->mr_scache.dev_gen;
+       mr_ctrl->dev_gen_ptr = dev_gen_ptr;
        /* Initialize B-tree and allocate memory for bottom-half cache table. */
        return mlx5_mr_btree_init(&mr_ctrl->cache_bh, MLX5_MR_BTREE_CACHE_N,
                                  socket);
@@ -930,7 +929,11 @@ static uint32_t
 mr_lookup_caches(struct mlx5_mr_ctrl *mr_ctrl,
                 struct mr_cache_entry *entry, uintptr_t addr)
 {
-       struct mlx5_mr_share_cache *share_cache = &mr_ctrl->cdev->mr_scache;
+       struct mlx5_mr_share_cache *share_cache =
+               container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+                            dev_gen);
+       struct mlx5_common_device *cdev =
+               container_of(share_cache, struct mlx5_common_device, mr_scache);
        struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
        uint32_t lkey;
        uint16_t idx;
@@ -955,7 +958,7 @@ mr_lookup_caches(struct mlx5_mr_ctrl *mr_ctrl,
        }
        rte_rwlock_read_unlock(&share_cache->rwlock);
        /* First time to see the address? Create a new MR. */
-       lkey = mlx5_mr_create(mr_ctrl->cdev, share_cache, entry, addr);
+       lkey = mlx5_mr_create(cdev, share_cache, entry, addr);
        /*
         * Update the local cache if successfully created a new global MR. Even
         * if failed to create one, there's no action to take in this datapath
@@ -1819,8 +1822,6 @@ mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,
 /**
  * Update bottom-half cache from the list of mempool registrations.
  *
- * @param share_cache
- *   Pointer to a global shared MR cache.
  * @param mr_ctrl
  *   Per-queue MR control handle.
  * @param entry
@@ -1834,11 +1835,13 @@ mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,
  *   MR lkey on success, UINT32_MAX on failure.
  */
 static uint32_t
-mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,
-                        struct mlx5_mr_ctrl *mr_ctrl,
+mlx5_lookup_mempool_regs(struct mlx5_mr_ctrl *mr_ctrl,
                         struct mr_cache_entry *entry,
                         struct rte_mempool *mp, uintptr_t addr)
 {
+       struct mlx5_mr_share_cache *share_cache =
+               container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+                            dev_gen);
        struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
        struct mlx5_mempool_reg *mpr;
        uint32_t lkey = UINT32_MAX;
@@ -1865,8 +1868,6 @@ mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,
 /**
  * Bottom-half lookup for the address from the mempool.
  *
- * @param share_cache
- *   Pointer to a global shared MR cache.
  * @param mr_ctrl
  *   Per-queue MR control handle.
  * @param mp
@@ -1877,8 +1878,7 @@ mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,
  *   MR lkey on success, UINT32_MAX on failure.
  */
 uint32_t
-mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
-                     struct mlx5_mr_ctrl *mr_ctrl,
+mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
                      struct rte_mempool *mp, uintptr_t addr)
 {
        struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
@@ -1891,8 +1891,7 @@ mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
        if (likely(lkey != UINT32_MAX)) {
                *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
        } else {
-               lkey = mlx5_lookup_mempool_regs(share_cache, mr_ctrl, repl,
-                                               mp, addr);
+               lkey = mlx5_lookup_mempool_regs(mr_ctrl, repl, mp, addr);
                /* Can only fail if the address is not from the mempool. */
                if (unlikely(lkey == UINT32_MAX))
                        return UINT32_MAX;
@@ -1909,7 +1908,11 @@ mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
 {
        uint32_t lkey;
        uintptr_t addr = (uintptr_t)mb->buf_addr;
-       struct mlx5_common_device *cdev = mr_ctrl->cdev;
+       struct mlx5_mr_share_cache *share_cache =
+               container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+                            dev_gen);
+       struct mlx5_common_device *cdev =
+               container_of(share_cache, struct mlx5_common_device, mr_scache);
 
        if (cdev->config.mr_mempool_reg_en) {
                struct rte_mempool *mp = NULL;
@@ -1923,8 +1926,7 @@ mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
                        mp = buf->mp;
                }
                if (mp != NULL) {
-                       lkey = mlx5_mr_mempool2mr_bh(&cdev->mr_scache,
-                                                    mr_ctrl, mp, addr);
+                       lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
                        /*
                         * Lookup can only fail on invalid input, e.g. "addr"
                         * is not from "mp" or "mp" has MEMPOOL_F_NON_IO set.
index dc7ddc351364214903def2673449d0d456fd3c19..cc885c45217957273b4907b4faa2c275196882ed 100644 (file)
@@ -66,7 +66,6 @@ struct mlx5_common_device;
 
 /* Per-queue MR control descriptor. */
 struct mlx5_mr_ctrl {
-       struct mlx5_common_device *cdev; /* Pointer to the mlx5 common device.*/
        uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
        uint32_t cur_gen; /* Generation number saved to flush caches. */
        uint16_t mru; /* Index of last hit entry in top-half cache. */
@@ -211,14 +210,13 @@ mlx5_mr_mb2mr(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)
 /* mlx5_common_mr.c */
 
 __rte_internal
-int mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl,
-                     struct mlx5_common_device *cdev, int socket);
+int mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
+                     int socket);
 __rte_internal
 void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
 void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
 __rte_internal
-uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
-                              struct mlx5_mr_ctrl *mr_ctrl,
+uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
                               struct rte_mempool *mp, uintptr_t addr);
 void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
 int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
index 9d5893e790c0ad1a6cb3f81a6d87c100e01eff5b..bb0dc3a5d2735b1e860c5a806fc8990d66455fbb 100644 (file)
@@ -206,7 +206,7 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
                return -rte_errno;
        }
        dev->data->queue_pairs[qp_id] = qp;
-       if (mlx5_mr_ctrl_init(&qp->mr_ctrl, priv->cdev,
+       if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
                              priv->dev_config.socket_id)) {
                DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
                        (uint32_t)qp_id);
index 47da3d115b62f2af0c74fc2731d607b13630fcdc..90e0df7fb22c9e2faefc720e1a6c756c1aa50986 100644 (file)
@@ -636,7 +636,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
                DRV_LOG(ERR, "Failed to create QP.");
                goto error;
        }
-       if (mlx5_mr_ctrl_init(&qp->mr_ctrl, priv->cdev,
+       if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
                              priv->dev_config.socket_id) != 0) {
                DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
                        (uint32_t)qp_id);
index 298d1b1f3265d72950fd9b7a77832c2068aa883a..9cc1a2703bb57bed7e88fff28408b091a774e143 100644 (file)
@@ -316,8 +316,7 @@ mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
        if (likely(lkey != UINT32_MAX))
                return lkey;
        mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
-       return mlx5_mr_mempool2mr_bh(&mr_ctrl->cdev->mr_scache, mr_ctrl,
-                                    mp, addr);
+       return mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
 }
 
 /**
@@ -338,7 +337,6 @@ mlx5_rx_mb2mr(struct mlx5_rxq_data *rxq, struct rte_mbuf *mb)
 {
        struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
        uintptr_t addr = (uintptr_t)mb->buf_addr;
-       struct mlx5_rxq_ctrl *rxq_ctrl;
        uint32_t lkey;
 
        /* Linear search on MR cache array. */
@@ -346,13 +344,8 @@ mlx5_rx_mb2mr(struct mlx5_rxq_data *rxq, struct rte_mbuf *mb)
                                   MLX5_MR_CACHE_N, addr);
        if (likely(lkey != UINT32_MAX))
                return lkey;
-       /*
-        * Slower search in the mempool database on miss.
-        * During queue creation rxq->sh is not yet set, so we use rxq_ctrl.
-        */
-       rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
-       return mlx5_mr_mempool2mr_bh(&rxq_ctrl->sh->cdev->mr_scache,
-                                    mr_ctrl, mb->pool, addr);
+       /* Slower search in the mempool database on miss. */
+       return mlx5_mr_mempool2mr_bh(mr_ctrl, mb->pool, addr);
 }
 
 /**
index 52b95d7070a8849bbf3803a1bfa98556c78ff84a..61ef4edff0aa6a8581da4a6bb433ea013726de3f 100644 (file)
@@ -1675,7 +1675,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
                goto error;
        }
        tmpl->type = MLX5_RXQ_TYPE_STANDARD;
-       if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl, priv->sh->cdev, socket)) {
+       if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
+                             &priv->sh->cdev->mr_scache.dev_gen, socket)) {
                /* rte_errno is already set. */
                goto error;
        }
index 2140a328a2c8420ad9125d5044a9d79dd5b14a68..4e0bf7af9ca267316158fc4b0d291b8cd4a2c839 100644 (file)
@@ -1079,7 +1079,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                rte_errno = ENOMEM;
                return NULL;
        }
-       if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl, priv->sh->cdev, socket)) {
+       if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
+                             &priv->sh->cdev->mr_scache.dev_gen, socket)) {
                /* rte_errno is already set. */
                goto error;
        }
index 9d5b4bd174c8f0e73640137b51e0313907b6a81d..6ab62a12fc3d329ed03683d25251e030be424955 100644 (file)
@@ -247,7 +247,8 @@ mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
                nb_sq_config++;
        }
 
-       ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, priv->cdev, rte_socket_id());
+       ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
+                               rte_socket_id());
        if (ret) {
                DRV_LOG(ERR, "Error setting up mr btree");
                goto err_btree;