net/mlx5: fix memory region cache init
[dpdk.git] / drivers / net / mlx5 / mlx5_ethdev.c
index 898b2e5..90488af 100644 (file)
@@ -392,21 +392,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
                if (++j == rxqs_n)
                        j = 0;
        }
-       /*
-        * Once the device is added to the list of memory event callback, its
-        * global MR cache table cannot be expanded on the fly because of
-        * deadlock. If it overflows, lookup should be done by searching MR list
-        * linearly, which is slow.
-        */
-       if (mlx5_mr_btree_init(&priv->mr.cache, MLX5_MR_BTREE_CACHE_N * 2,
-                              dev->device->numa_node)) {
-               /* rte_errno is already set. */
-               return -rte_errno;
-       }
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
-                        priv, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        return 0;
 }
 
@@ -525,6 +510,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
        };
 
        if (dev->rx_pkt_burst == mlx5_rx_burst ||
+           dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
            dev->rx_pkt_burst == mlx5_rx_burst_vec)
                return ptypes;
        return NULL;
@@ -1182,6 +1168,8 @@ mlx5_select_rx_function(struct rte_eth_dev *dev)
                rx_pkt_burst = mlx5_rx_burst_vec;
                DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
                        dev->data->port_id);
+       } else if (mlx5_mprq_enabled(dev)) {
+               rx_pkt_burst = mlx5_rx_burst_mprq;
        }
        return rx_pkt_burst;
 }