net/mlx5: fix devargs validation for multi-class probing
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 8614b8f..aa5f313 100644 (file)
@@ -19,6 +19,7 @@
 #include <rte_rwlock.h>
 #include <rte_spinlock.h>
 #include <rte_string_fns.h>
+#include <rte_eal_paging.h>
 #include <rte_alarm.h>
 #include <rte_cycles.h>
 
 /* Device parameter to configure implicit registration of mempool memory. */
 #define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en"
 
+/* Device parameter to configure the delay drop when creating Rxqs. */
+#define MLX5_DELAY_DROP "delay_drop"
+
 /* Shared memory between primary and secondary processes. */
 struct mlx5_shared_data *mlx5_shared_data;
 
@@ -518,7 +522,6 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
 static void
 mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
 {
-       struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
        int i;
 
        memset(&sh->cmng, 0, sizeof(sh->cmng));
@@ -531,10 +534,6 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
                TAILQ_INIT(&sh->cmng.counters[i]);
                rte_spinlock_init(&sh->cmng.csl[i]);
        }
-       if (sh->devx && !haswell_broadwell_cpu) {
-               sh->cmng.relaxed_ordering_write = attr->relaxed_ordering_write;
-               sh->cmng.relaxed_ordering_read = attr->relaxed_ordering_read;
-       }
 }
 
 /**
@@ -549,8 +548,7 @@ mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
        uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
 
        LIST_REMOVE(mng, next);
-       claim_zero(mlx5_devx_cmd_destroy(mng->dm));
-       claim_zero(mlx5_os_umem_dereg(mng->umem));
+       mlx5_os_wrapped_mkey_destroy(&mng->wm);
        mlx5_free(mem);
 }
 
@@ -984,143 +982,35 @@ mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
        return tn_offloads;
 }
 
-/*
- * Allocate Rx and Tx UARs in robust fashion.
- * This routine handles the following UAR allocation issues:
- *
- *  - tries to allocate the UAR with the most appropriate memory
- *    mapping type from the ones supported by the host
- *
- *  - tries to allocate the UAR with non-NULL base address
- *    OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as
- *    UAR base address if UAR was not the first object in the UAR page.
- *    It caused the PMD failure and we should try to get another UAR
- *    till we get the first one with non-NULL base address returned.
- */
+/* Fill all fields of UAR structure. */
 static int
-mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
-                    const struct mlx5_common_dev_config *config)
+mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh)
 {
-       uint32_t uar_mapping, retry;
-       int err = 0;
-       void *base_addr;
-
-       for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
-               /* Control the mapping type according to the settings. */
-               uar_mapping = (config->dbnc == MLX5_TXDB_NCACHED) ?
-                             MLX5DV_UAR_ALLOC_TYPE_NC :
-                             MLX5DV_UAR_ALLOC_TYPE_BF;
-#else
-               RTE_SET_USED(config);
-               /*
-                * It seems we have no way to control the memory mapping type
-                * for the UAR, the default "Write-Combining" type is supposed.
-                * The UAR initialization on queue creation queries the
-                * actual mapping type done by Verbs/kernel and setups the
-                * PMD datapath accordingly.
-                */
-               uar_mapping = 0;
-#endif
-               sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
-                                                      uar_mapping);
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
-               if (!sh->tx_uar &&
-                   uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
-                       if (config->dbnc == MLX5_TXDB_CACHED ||
-                           config->dbnc == MLX5_TXDB_HEURISTIC)
-                               DRV_LOG(WARNING, "Devarg tx_db_nc setting "
-                                                "is not supported by DevX");
-                       /*
-                        * In some environments like virtual machine
-                        * the Write Combining mapped might be not supported
-                        * and UAR allocation fails. We try "Non-Cached"
-                        * mapping for the case. The tx_burst routines take
-                        * the UAR mapping type into account on UAR setup
-                        * on queue creation.
-                        */
-                       DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)");
-                       uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
-                       sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
-                                                              uar_mapping);
-               } else if (!sh->tx_uar &&
-                          uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
-                       if (config->dbnc == MLX5_TXDB_NCACHED)
-                               DRV_LOG(WARNING, "Devarg tx_db_nc settings "
-                                                "is not supported by DevX");
-                       /*
-                        * If Verbs/kernel does not support "Non-Cached"
-                        * try the "Write-Combining".
-                        */
-                       DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)");
-                       uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
-                       sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
-                                                              uar_mapping);
-               }
-#endif
-               if (!sh->tx_uar) {
-                       DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (BF/NC)");
-                       err = ENOMEM;
-                       goto exit;
-               }
-               base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
-               if (base_addr)
-                       break;
-               /*
-                * The UARs are allocated by rdma_core within the
-                * IB device context, on context closure all UARs
-                * will be freed, should be no memory/object leakage.
-                */
-               DRV_LOG(DEBUG, "Retrying to allocate Tx DevX UAR");
-               sh->tx_uar = NULL;
-       }
-       /* Check whether we finally succeeded with valid UAR allocation. */
-       if (!sh->tx_uar) {
-               DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (NULL base)");
-               err = ENOMEM;
-               goto exit;
-       }
-       for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
-               uar_mapping = 0;
-               sh->devx_rx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
-                                                           uar_mapping);
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
-               if (!sh->devx_rx_uar &&
-                   uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
-                       /*
-                        * Rx UAR is used to control interrupts only,
-                        * should be no datapath noticeable impact,
-                        * can try "Non-Cached" mapping safely.
-                        */
-                       DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)");
-                       uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
-                       sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
-                                                  (sh->cdev->ctx, uar_mapping);
-               }
-#endif
-               if (!sh->devx_rx_uar) {
-                       DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (BF/NC)");
-                       err = ENOMEM;
-                       goto exit;
-               }
-               base_addr = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
-               if (base_addr)
-                       break;
-               /*
-                * The UARs are allocated by rdma_core within the
-                * IB device context, on context closure all UARs
-                * will be freed, should be no memory/object leakage.
-                */
-               DRV_LOG(DEBUG, "Retrying to allocate Rx DevX UAR");
-               sh->devx_rx_uar = NULL;
+       int ret;
+
+       ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar);
+       if (ret) {
+               DRV_LOG(ERR, "Failed to prepare Tx DevX UAR.");
+               return -rte_errno;
        }
-       /* Check whether we finally succeeded with valid UAR allocation. */
-       if (!sh->devx_rx_uar) {
-               DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (NULL base)");
-               err = ENOMEM;
+       MLX5_ASSERT(sh->tx_uar.obj);
+       MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj));
+       ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar);
+       if (ret) {
+               DRV_LOG(ERR, "Failed to prepare Rx DevX UAR.");
+               mlx5_devx_uar_release(&sh->tx_uar);
+               return -rte_errno;
        }
-exit:
-       return err;
+       MLX5_ASSERT(sh->rx_uar.obj);
+       MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj));
+       return 0;
+}
+
+static void
+mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh)
+{
+       mlx5_devx_uar_release(&sh->rx_uar);
+       mlx5_devx_uar_release(&sh->tx_uar);
 }
 
 /**
@@ -1329,21 +1219,17 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                        err = ENOMEM;
                        goto error;
                }
-               err = mlx5_alloc_rxtx_uars(sh, &sh->cdev->config);
+               err = mlx5_rxtx_uars_prepare(sh);
                if (err)
                        goto error;
-               MLX5_ASSERT(sh->tx_uar);
-               MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar));
-
-               MLX5_ASSERT(sh->devx_rx_uar);
-               MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar));
-       }
 #ifndef RTE_ARCH_64
-       /* Initialize UAR access locks for 32bit implementations. */
-       rte_spinlock_init(&sh->uar_lock_cq);
-       for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
-               rte_spinlock_init(&sh->uar_lock[i]);
+       } else {
+               /* Initialize UAR access locks for 32bit implementations. */
+               rte_spinlock_init(&sh->uar_lock_cq);
+               for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+                       rte_spinlock_init(&sh->uar_lock[i]);
 #endif
+       }
        mlx5_os_dev_shared_handler_install(sh);
        if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
                err = mlx5_flow_os_init_workspace_once();
@@ -1370,10 +1256,7 @@ error:
                if (sh->tis[i])
                        claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
        } while (++i < (uint32_t)sh->bond.n_port);
-       if (sh->devx_rx_uar)
-               mlx5_glue->devx_free_uar(sh->devx_rx_uar);
-       if (sh->tx_uar)
-               mlx5_glue->devx_free_uar(sh->tx_uar);
+       mlx5_rxtx_uars_release(sh);
        mlx5_free(sh);
        MLX5_ASSERT(err > 0);
        rte_errno = err;
@@ -1446,18 +1329,13 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
                mlx5_aso_flow_mtrs_mng_close(sh);
        mlx5_flow_ipool_destroy(sh);
        mlx5_os_dev_shared_handler_uninstall(sh);
-       if (sh->tx_uar) {
-               mlx5_glue->devx_free_uar(sh->tx_uar);
-               sh->tx_uar = NULL;
-       }
+       mlx5_rxtx_uars_release(sh);
        do {
                if (sh->tis[i])
                        claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
        } while (++i < sh->bond.n_port);
        if (sh->td)
                claim_zero(mlx5_devx_cmd_destroy(sh->td));
-       if (sh->devx_rx_uar)
-               mlx5_glue->devx_free_uar(sh->devx_rx_uar);
        MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
        pthread_mutex_destroy(&sh->txpp.mutex);
        mlx5_free(sh);
@@ -1607,8 +1485,8 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev)
         * UAR register table follows the process private structure. BlueFlame
         * registers for Tx queues are stored in the table.
         */
-       ppriv_size =
-               sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
+       ppriv_size = sizeof(struct mlx5_proc_priv) +
+                    priv->txqs_n * sizeof(struct mlx5_uar_data);
        ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size,
                            RTE_CACHE_LINE_SIZE, dev->device->numa_node);
        if (!ppriv) {
@@ -1617,6 +1495,8 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev)
        }
        ppriv->uar_table_sz = priv->txqs_n;
        dev->process_private = ppriv;
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               priv->sh->pppriv = ppriv;
        return 0;
 }
 
@@ -2091,10 +1971,13 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                config->decap_en = !!tmp;
        } else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
                config->allow_duplicate_pattern = !!tmp;
+       } else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
+               config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
+               config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
        } else {
-               DRV_LOG(WARNING, "%s: unknown parameter", key);
-               rte_errno = EINVAL;
-               return -rte_errno;
+               DRV_LOG(WARNING,
+                       "%s: unknown parameter, maybe it's for another class.",
+                       key);
        }
        return 0;
 }
@@ -2113,74 +1996,25 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
 int
 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
 {
-       const char **params = (const char *[]){
-               MLX5_DRIVER_KEY,
-               MLX5_RXQ_CQE_COMP_EN,
-               MLX5_RXQ_PKT_PAD_EN,
-               MLX5_RX_MPRQ_EN,
-               MLX5_RX_MPRQ_LOG_STRIDE_NUM,
-               MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
-               MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
-               MLX5_RXQS_MIN_MPRQ,
-               MLX5_TXQ_INLINE,
-               MLX5_TXQ_INLINE_MIN,
-               MLX5_TXQ_INLINE_MAX,
-               MLX5_TXQ_INLINE_MPW,
-               MLX5_TXQS_MIN_INLINE,
-               MLX5_TXQS_MAX_VEC,
-               MLX5_TXQ_MPW_EN,
-               MLX5_TXQ_MPW_HDR_DSEG_EN,
-               MLX5_TXQ_MAX_INLINE_LEN,
-               MLX5_TX_DB_NC,
-               MLX5_TX_PP,
-               MLX5_TX_SKEW,
-               MLX5_TX_VEC_EN,
-               MLX5_RX_VEC_EN,
-               MLX5_L3_VXLAN_EN,
-               MLX5_VF_NL_EN,
-               MLX5_DV_ESW_EN,
-               MLX5_DV_FLOW_EN,
-               MLX5_DV_XMETA_EN,
-               MLX5_LACP_BY_USER,
-               MLX5_MR_EXT_MEMSEG_EN,
-               MLX5_REPRESENTOR,
-               MLX5_MAX_DUMP_FILES_NUM,
-               MLX5_LRO_TIMEOUT_USEC,
-               RTE_DEVARGS_KEY_CLASS,
-               MLX5_HP_BUF_SIZE,
-               MLX5_RECLAIM_MEM,
-               MLX5_SYS_MEM_EN,
-               MLX5_DECAP_EN,
-               MLX5_ALLOW_DUPLICATE_PATTERN,
-               MLX5_MR_MEMPOOL_REG_EN,
-               NULL,
-       };
        struct rte_kvargs *kvlist;
        int ret = 0;
-       int i;
 
        if (devargs == NULL)
                return 0;
        /* Following UGLY cast is done to pass checkpatch. */
-       kvlist = rte_kvargs_parse(devargs->args, params);
+       kvlist = rte_kvargs_parse(devargs->args, NULL);
        if (kvlist == NULL) {
                rte_errno = EINVAL;
                return -rte_errno;
        }
        /* Process parameters. */
-       for (i = 0; (params[i] != NULL); ++i) {
-               if (rte_kvargs_count(kvlist, params[i])) {
-                       ret = rte_kvargs_process(kvlist, params[i],
-                                                mlx5_args_check, config);
-                       if (ret) {
-                               rte_errno = EINVAL;
-                               rte_kvargs_free(kvlist);
-                               return -rte_errno;
-                       }
-               }
+       ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config);
+       if (ret) {
+               rte_errno = EINVAL;
+               ret = -rte_errno;
        }
        rte_kvargs_free(kvlist);
-       return 0;
+       return ret;
 }
 
 /**