From: Xueming Li Date: Sat, 26 May 2018 13:27:35 +0000 (+0800) Subject: net/mlx5: fix memory region cache init X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=0ace586dee70ce1842c858e27a3590417f8fb40c;p=dpdk.git net/mlx5: fix memory region cache init MR cache init takes place on the device configuration. When the device is re-configured multiple times, for example when changing the number of queue on the flight, deadlock can happen. This patch moved MR cache init from device configuration function to probe function to make sure init only once. Fixes: 974f1e7ef146 ("net/mlx5: add new memory region support") Signed-off-by: Xueming Li Acked-by: Yongseok Koh --- diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index dae8474937..3ef02e2d20 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1193,6 +1193,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, goto port_error; } priv->config.max_verbs_prio = verb_priorities; + /* + * Once the device is added to the list of memory event + * callback, its global MR cache table cannot be expanded + * on the fly because of deadlock. If it overflows, lookup + * should be done by searching MR list linearly, which is slow. + */ + err = mlx5_mr_btree_init(&priv->mr.cache, + MLX5_MR_BTREE_CACHE_N * 2, + eth_dev->device->numa_node); + if (err) { + err = rte_errno; + goto port_error; + } /* Add device to memory callback list. */ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index f6cebae41d..90488af33b 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -392,17 +392,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev) if (++j == rxqs_n) j = 0; } - /* - * Once the device is added to the list of memory event callback, its - * global MR cache table cannot be expanded on the fly because of - * deadlock. If it overflows, lookup should be done by searching MR list - * linearly, which is slow. - */ - if (mlx5_mr_btree_init(&priv->mr.cache, MLX5_MR_BTREE_CACHE_N * 2, - dev->device->numa_node)) { - /* rte_errno is already set. */ - return -rte_errno; - } return 0; } diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index abb1f51799..08105a4430 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -191,6 +191,7 @@ mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket) rte_errno = EINVAL; return -rte_errno; } + assert(!bt->table && !bt->size); memset(bt, 0, sizeof(*bt)); bt->table = rte_calloc_socket("B-tree table", n, sizeof(struct mlx5_mr_cache),