net/mlx4: avoid constant recreations in function
[dpdk.git] / drivers / net / mlx4 / mlx4.c
index 4e472fa..9f8ecd0 100644 (file)
 #include "mlx4.h"
 #include "mlx4_glue.h"
 #include "mlx4_flow.h"
+#include "mlx4_mr.h"
 #include "mlx4_rxtx.h"
 #include "mlx4_utils.h"
 
+struct mlx4_dev_list mlx4_mem_event_cb_list =
+       LIST_HEAD_INITIALIZER(mlx4_mem_event_cb_list);
+
+rte_rwlock_t mlx4_mem_event_rwlock = RTE_RWLOCK_INITIALIZER;
+
 /** Configuration structure for device arguments. */
 struct mlx4_conf {
        struct {
@@ -92,6 +98,20 @@ mlx4_dev_configure(struct rte_eth_dev *dev)
        if (ret)
                ERROR("%p: interrupt handler installation failed",
                      (void *)dev);
+       /*
+        * Once the device is added to the list of memory event callback, its
+        * global MR cache table cannot be expanded on the fly because of
+        * deadlock. If it overflows, lookup should be done by searching MR list
+        * linearly, which is slow.
+        */
+       if (mlx4_mr_btree_init(&priv->mr.cache, MLX4_MR_BTREE_CACHE_N * 2,
+                              dev->device->numa_node)) {
+               /* rte_errno is already set. */
+               return -rte_errno;
+       }
+       rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
+       LIST_INSERT_HEAD(&mlx4_mem_event_cb_list, priv, mem_event_cb);
+       rte_rwlock_write_unlock(&mlx4_mem_event_rwlock);
 exit:
        return ret;
 }
@@ -125,6 +145,9 @@ mlx4_dev_start(struct rte_eth_dev *dev)
                      (void *)dev, strerror(-ret));
                goto err;
        }
+#ifndef NDEBUG
+       mlx4_mr_dump_dev(dev);
+#endif
        ret = mlx4_rxq_intr_enable(priv);
        if (ret) {
                ERROR("%p: interrupt handler installation failed",
@@ -200,6 +223,7 @@ mlx4_dev_close(struct rte_eth_dev *dev)
                mlx4_rx_queue_release(dev->data->rx_queues[i]);
        for (i = 0; i != dev->data->nb_tx_queues; ++i)
                mlx4_tx_queue_release(dev->data->tx_queues[i]);
+       mlx4_mr_release(dev);
        if (priv->pd != NULL) {
                assert(priv->ctx != NULL);
                claim_zero(mlx4_glue->dealloc_pd(priv->pd));
@@ -387,6 +411,99 @@ free_kvlist:
        return ret;
 }
 
+/**
+ * Interpret RSS capabilities reported by device.
+ *
+ * This function returns the set of usable Verbs RSS hash fields, kernel
+ * quirks taken into account.
+ *
+ * @param ctx
+ *   Verbs context.
+ * @param pd
+ *   Verbs protection domain.
+ * @param device_attr_ex
+ *   Extended device attributes to interpret.
+ *
+ * @return
+ *   Usable RSS hash fields mask in Verbs format.
+ */
+static uint64_t
+mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd,
+               struct ibv_device_attr_ex *device_attr_ex)
+{
+       uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask;
+       struct ibv_cq *cq = NULL;
+       struct ibv_wq *wq = NULL;
+       struct ibv_rwq_ind_table *ind = NULL;
+       struct ibv_qp *qp = NULL;
+
+       if (!hw_rss_sup) {
+               WARN("no RSS capabilities reported; disabling support for UDP"
+                    " RSS and inner VXLAN RSS");
+               return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
+                       IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
+                       IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
+       }
+       if (!(hw_rss_sup & IBV_RX_HASH_INNER))
+               return hw_rss_sup;
+       /*
+        * Although reported as supported, missing code in some Linux
+        * versions (v4.15, v4.16) prevents the creation of hash QPs with
+        * inner capability.
+        *
+        * There is no choice but to attempt to instantiate a temporary RSS
+        * context in order to confirm its support.
+        */
+       cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0);
+       wq = cq ? mlx4_glue->create_wq
+               (ctx,
+                &(struct ibv_wq_init_attr){
+                       .wq_type = IBV_WQT_RQ,
+                       .max_wr = 1,
+                       .max_sge = 1,
+                       .pd = pd,
+                       .cq = cq,
+                }) : NULL;
+       ind = wq ? mlx4_glue->create_rwq_ind_table
+               (ctx,
+                &(struct ibv_rwq_ind_table_init_attr){
+                       .log_ind_tbl_size = 0,
+                       .ind_tbl = &wq,
+                       .comp_mask = 0,
+                }) : NULL;
+       qp = ind ? mlx4_glue->create_qp_ex
+               (ctx,
+                &(struct ibv_qp_init_attr_ex){
+                       .comp_mask =
+                               (IBV_QP_INIT_ATTR_PD |
+                                IBV_QP_INIT_ATTR_RX_HASH |
+                                IBV_QP_INIT_ATTR_IND_TABLE),
+                       .qp_type = IBV_QPT_RAW_PACKET,
+                       .pd = pd,
+                       .rwq_ind_tbl = ind,
+                       .rx_hash_conf = {
+                               .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+                               .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
+                               .rx_hash_key = mlx4_rss_hash_key_default,
+                               .rx_hash_fields_mask = hw_rss_sup,
+                       },
+                }) : NULL;
+       if (!qp) {
+               WARN("disabling unusable inner RSS capability due to kernel"
+                    " quirk");
+               hw_rss_sup &= ~IBV_RX_HASH_INNER;
+       } else {
+               claim_zero(mlx4_glue->destroy_qp(qp));
+       }
+       if (ind)
+               claim_zero(mlx4_glue->destroy_rwq_ind_table(ind));
+       if (wq)
+               claim_zero(mlx4_glue->destroy_wq(wq));
+       if (cq)
+               claim_zero(mlx4_glue->destroy_cq(cq));
+       return hw_rss_sup;
+}
+
 static struct rte_pci_driver mlx4_driver;
 
 /**
@@ -565,18 +682,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                         PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
                DEBUG("L2 tunnel checksum offloads are %ssupported",
                      priv->hw_csum_l2tun ? "" : "not ");
-               priv->hw_rss_sup = device_attr_ex.rss_caps.rx_hash_fields_mask;
-               if (!priv->hw_rss_sup) {
-                       WARN("no RSS capabilities reported; disabling support"
-                            " for UDP RSS and inner VXLAN RSS");
-                       priv->hw_rss_sup =
-                               IBV_RX_HASH_SRC_IPV4 |
-                               IBV_RX_HASH_DST_IPV4 |
-                               IBV_RX_HASH_SRC_IPV6 |
-                               IBV_RX_HASH_DST_IPV6 |
-                               IBV_RX_HASH_SRC_PORT_TCP |
-                               IBV_RX_HASH_DST_PORT_TCP;
-               }
+               priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd,
+                                                  &device_attr_ex);
                DEBUG("supported RSS hash fields mask: %016" PRIx64,
                      priv->hw_rss_sup);
                priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
@@ -654,6 +761,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                /* Update link status once if waiting for LSC. */
                if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
                        mlx4_link_update(eth_dev, 0);
+               rte_eth_dev_probing_finish(eth_dev);
                continue;
 port_error:
                rte_free(priv);
@@ -881,6 +989,8 @@ rte_mlx4_pmd_init(void)
        }
        mlx4_glue->fork_init();
        rte_pci_register(&mlx4_driver);
+       rte_mem_event_callback_register("MLX4_MEM_EVENT_CB",
+                                       mlx4_mr_mem_event_cb, NULL);
 }
 
 RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);