net/mlx5: fix memory region cache init
authorXueming Li <xuemingl@mellanox.com>
Sat, 26 May 2018 13:27:35 +0000 (21:27 +0800)
committerShahaf Shuler <shahafs@mellanox.com>
Mon, 28 May 2018 14:28:43 +0000 (16:28 +0200)
MR cache init takes place on the device configuration.
When the device is re-configured multiple times, for example when
changing the number of queue on the flight, deadlock can happen.

This patch moved MR cache init from device configuration function to
probe function to make sure init only once.

Fixes: 974f1e7ef146 ("net/mlx5: add new memory region support")

Signed-off-by: Xueming Li <xuemingl@mellanox.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/mlx5/mlx5_mr.c

index dae8474..3ef02e2 100644 (file)
@@ -1193,6 +1193,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                        goto port_error;
                }
                priv->config.max_verbs_prio = verb_priorities;
+               /*
+                * Once the device is added to the list of memory event
+                * callback, its global MR cache table cannot be expanded
+                * on the fly because of deadlock. If it overflows, lookup
+                * should be done by searching MR list linearly, which is slow.
+                */
+               err = mlx5_mr_btree_init(&priv->mr.cache,
+                                        MLX5_MR_BTREE_CACHE_N * 2,
+                                        eth_dev->device->numa_node);
+               if (err) {
+                       err = rte_errno;
+                       goto port_error;
+               }
                /* Add device to memory callback list. */
                rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
                LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
index f6cebae..90488af 100644 (file)
@@ -392,17 +392,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
                if (++j == rxqs_n)
                        j = 0;
        }
-       /*
-        * Once the device is added to the list of memory event callback, its
-        * global MR cache table cannot be expanded on the fly because of
-        * deadlock. If it overflows, lookup should be done by searching MR list
-        * linearly, which is slow.
-        */
-       if (mlx5_mr_btree_init(&priv->mr.cache, MLX5_MR_BTREE_CACHE_N * 2,
-                              dev->device->numa_node)) {
-               /* rte_errno is already set. */
-               return -rte_errno;
-       }
        return 0;
 }
 
index abb1f51..08105a4 100644 (file)
@@ -191,6 +191,7 @@ mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
                rte_errno = EINVAL;
                return -rte_errno;
        }
+       assert(!bt->table && !bt->size);
        memset(bt, 0, sizeof(*bt));
        bt->table = rte_calloc_socket("B-tree table",
                                      n, sizeof(struct mlx5_mr_cache),