net/mlx5: fix configuration of Rx CQE compression
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 46c89c6..c0f7b1b 100644 (file)
 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
 #endif
 
+#ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
+#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
+#endif
+
 struct mlx5_args {
        int cqe_comp;
        int txq_inline;
@@ -198,7 +202,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
              ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
        /* In case mlx5_dev_stop() has not been called. */
        priv_dev_interrupt_handler_uninstall(priv, dev);
-       priv_destroy_hash_rxqs(priv);
        priv_dev_traffic_disable(priv, dev);
        /* Prevent crashes when queues are still in use. */
        dev->rx_pkt_burst = removed_rx_burst;
@@ -258,7 +261,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        memset(priv, 0, sizeof(*priv));
 }
 
-static const struct eth_dev_ops mlx5_dev_ops = {
+const struct eth_dev_ops mlx5_dev_ops = {
        .dev_configure = mlx5_dev_configure,
        .dev_start = mlx5_dev_start,
        .dev_stop = mlx5_dev_stop,
@@ -301,7 +304,6 @@ static const struct eth_dev_ops mlx5_dev_ops = {
        .rx_queue_intr_disable = mlx5_rx_intr_disable,
 };
 
-
 static const struct eth_dev_ops mlx5_dev_sec_ops = {
        .stats_get = mlx5_stats_get,
        .stats_reset = mlx5_stats_reset,
@@ -313,6 +315,42 @@ static const struct eth_dev_ops mlx5_dev_sec_ops = {
        .tx_descriptor_status = mlx5_tx_descriptor_status,
 };
 
+/* Available operators in flow isolated mode. */
+const struct eth_dev_ops mlx5_dev_ops_isolate = {
+       .dev_configure = mlx5_dev_configure,
+       .dev_start = mlx5_dev_start,
+       .dev_stop = mlx5_dev_stop,
+       .dev_set_link_down = mlx5_set_link_down,
+       .dev_set_link_up = mlx5_set_link_up,
+       .dev_close = mlx5_dev_close,
+       .link_update = mlx5_link_update,
+       .stats_get = mlx5_stats_get,
+       .stats_reset = mlx5_stats_reset,
+       .xstats_get = mlx5_xstats_get,
+       .xstats_reset = mlx5_xstats_reset,
+       .xstats_get_names = mlx5_xstats_get_names,
+       .dev_infos_get = mlx5_dev_infos_get,
+       .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
+       .vlan_filter_set = mlx5_vlan_filter_set,
+       .rx_queue_setup = mlx5_rx_queue_setup,
+       .tx_queue_setup = mlx5_tx_queue_setup,
+       .rx_queue_release = mlx5_rx_queue_release,
+       .tx_queue_release = mlx5_tx_queue_release,
+       .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
+       .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
+       .mac_addr_remove = mlx5_mac_addr_remove,
+       .mac_addr_add = mlx5_mac_addr_add,
+       .mac_addr_set = mlx5_mac_addr_set,
+       .mtu_set = mlx5_dev_set_mtu,
+       .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
+       .vlan_offload_set = mlx5_vlan_offload_set,
+       .filter_ctrl = mlx5_dev_filter_ctrl,
+       .rx_descriptor_status = mlx5_rx_descriptor_status,
+       .tx_descriptor_status = mlx5_tx_descriptor_status,
+       .rx_queue_intr_enable = mlx5_rx_intr_enable,
+       .rx_queue_intr_disable = mlx5_rx_intr_disable,
+};
+
 static struct {
        struct rte_pci_addr pci_addr; /* associated PCI address */
        uint32_t ports; /* physical ports bitfield. */
@@ -505,6 +543,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
        struct ibv_device_attr_ex device_attr;
        unsigned int sriov;
        unsigned int mps;
+       unsigned int cqe_comp;
        unsigned int tunnel_en = 0;
        int idx;
        int i;
@@ -608,6 +647,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                INFO("MPW is disabled\n");
                mps = MLX5_MPW_DISABLED;
        }
+       if (RTE_CACHE_LINE_SIZE == 128 &&
+           !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
+               cqe_comp = 0;
+       else
+               cqe_comp = 1;
        if (ibv_query_device_ex(attr_ctx, NULL, &device_attr))
                goto error;
        INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
@@ -724,7 +768,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                priv->pd = pd;
                priv->mtu = ETHER_MTU;
                priv->mps = mps; /* Enable MPW by default if supported. */
-               priv->cqe_comp = 1; /* Enable compression by default. */
+               priv->cqe_comp = cqe_comp;
                priv->tunnel_en = tunnel_en;
                /* Enable vector by default if supported. */
                priv->tx_vec_en = 1;
@@ -813,6 +857,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                                priv->txq_inline = MLX5_WQE_SIZE_MAX -
                                                   MLX5_WQE_SIZE;
                }
+               if (priv->cqe_comp && !cqe_comp) {
+                       WARN("Rx CQE compression isn't supported");
+                       priv->cqe_comp = 0;
+               }
                /* Configure the first MAC address by default. */
                if (priv_get_mac(priv, &mac.addr_bytes)) {
                        ERROR("cannot get MAC address, is mlx5_en loaded?"
@@ -978,6 +1026,9 @@ rte_mlx5_pmd_init(void)
        setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
        /* Don't map UAR to WC if BlueFlame is not used.*/
        setenv("MLX5_SHUT_UP_BF", "1", 1);
+       /* Match the size of Rx completion entry to the size of a cacheline. */
+       if (RTE_CACHE_LINE_SIZE == 128)
+               setenv("MLX5_CQE_SIZE", "128", 0);
        ibv_fork_init();
        rte_pci_register(&mlx5_driver);
 }