X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5.c;h=7487b1f87da3f4fab737f81ba1f52d58f181cf60;hb=91d1cfafc977ccb224d4632b12952360934499f2;hp=7fc2ca734571960776b13b3f82d8d14dfe709e70;hpb=fe46b20c96593ff9644097978b347286c6a4b71a;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 7fc2ca7345..7487b1f87d 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -36,7 +37,6 @@ #include "mlx5_rx.h" #include "mlx5_tx.h" #include "mlx5_autoconf.h" -#include "mlx5_mr.h" #include "mlx5_flow.h" #include "mlx5_flow_os.h" #include "rte_pmd_mlx5.h" @@ -184,6 +184,9 @@ /* Device parameter to configure implicit registration of mempool memory. */ #define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en" +/* Device parameter to configure the delay drop when creating Rxqs. */ +#define MLX5_DELAY_DROP "delay_drop" + /* Shared memory between primary and secondary processes. */ struct mlx5_shared_data *mlx5_shared_data; @@ -379,7 +382,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { }, }; - #define MLX5_FLOW_MIN_ID_POOL_SIZE 512 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16 @@ -451,7 +453,7 @@ mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh) mlx5_free(sh->aso_age_mng); return -1; } - rte_spinlock_init(&sh->aso_age_mng->resize_sl); + rte_rwlock_init(&sh->aso_age_mng->resize_rwl); rte_spinlock_init(&sh->aso_age_mng->free_sl); LIST_INIT(&sh->aso_age_mng->free); return 0; @@ -511,6 +513,46 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) } } +/** + * DV flow counter mode detect and config. + * + * @param dev + * Pointer to rte_eth_dev structure. + * + */ +void +mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused) +{ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr; + bool fallback; + +#ifndef HAVE_IBV_DEVX_ASYNC + fallback = true; +#else + fallback = false; + if (!sh->cdev->config.devx || !priv->config.dv_flow_en || + !hca_attr->flow_counters_dump || + !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) || + (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP)) + fallback = true; +#endif + if (fallback) + DRV_LOG(INFO, "Use fall-back DV counter management. Flow " + "counter dump:%d, bulk_alloc_bitmap:0x%hhx.", + hca_attr->flow_counters_dump, + hca_attr->flow_counter_bulk_alloc_bitmap); + /* Initialize fallback mode only on the port initializes sh. */ + if (sh->refcnt == 1) + sh->cmng.counter_fallback = fallback; + else if (fallback != sh->cmng.counter_fallback) + DRV_LOG(WARNING, "Port %d in sh has different fallback mode " + "with others:%d.", PORT_ID(priv), fallback); +#endif +} + /** * Initialize the counters management structure. * @@ -520,7 +562,6 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) static void mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) { - struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr; int i; memset(&sh->cmng, 0, sizeof(sh->cmng)); @@ -533,10 +574,6 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) TAILQ_INIT(&sh->cmng.counters[i]); rte_spinlock_init(&sh->cmng.csl[i]); } - if (sh->devx && !haswell_broadwell_cpu) { - sh->cmng.relaxed_ordering_write = attr->relaxed_ordering_write; - sh->cmng.relaxed_ordering_read = attr->relaxed_ordering_read; - } } /** @@ -551,8 +588,7 @@ mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; LIST_REMOVE(mng, next); - claim_zero(mlx5_devx_cmd_destroy(mng->dm)); - claim_zero(mlx5_os_umem_dereg(mng->umem)); + mlx5_os_wrapped_mkey_destroy(&mng->wm); mlx5_free(mem); } @@ -633,6 +669,7 @@ mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh) } if (sh->meter_aso_en) { rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl); + rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl); LIST_INIT(&sh->mtrmng->pools_mng.meters); } sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID; @@ -866,8 +903,7 @@ bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flex_parser_profiles *prf = - &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; + struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser; return !!prf->obj; } @@ -886,15 +922,14 @@ int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flex_parser_profiles *prf = - &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; + struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser; struct mlx5_devx_graph_node_attr node = { .modify_field_select = 0, }; uint32_t ids[8]; int ret; - if (!priv->config.hca_attr.parse_graph_flex_node) { + if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) { DRV_LOG(ERR, "Dynamic flex parser is not supported " "for device %s.", priv->dev_data->name); return -ENOTSUP; @@ -950,8 +985,7 @@ static void mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flex_parser_profiles *prf = - &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; + struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser; if (prf->obj) mlx5_devx_cmd_destroy(prf->obj); @@ -988,168 +1022,40 @@ mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr) return tn_offloads; } -/* - * Allocate Rx and Tx UARs in robust fashion. - * This routine handles the following UAR allocation issues: - * - * - tries to allocate the UAR with the most appropriate memory - * mapping type from the ones supported by the host - * - * - tries to allocate the UAR with non-NULL base address - * OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as - * UAR base address if UAR was not the first object in the UAR page. - * It caused the PMD failure and we should try to get another UAR - * till we get the first one with non-NULL base address returned. - */ +/* Fill all fields of UAR structure. */ static int -mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh, - const struct mlx5_common_dev_config *config) +mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh) { - uint32_t uar_mapping, retry; - int err = 0; - void *base_addr; - - for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) { -#ifdef MLX5DV_UAR_ALLOC_TYPE_NC - /* Control the mapping type according to the settings. */ - uar_mapping = (config->dbnc == MLX5_TXDB_NCACHED) ? - MLX5DV_UAR_ALLOC_TYPE_NC : - MLX5DV_UAR_ALLOC_TYPE_BF; -#else - RTE_SET_USED(config); - /* - * It seems we have no way to control the memory mapping type - * for the UAR, the default "Write-Combining" type is supposed. - * The UAR initialization on queue creation queries the - * actual mapping type done by Verbs/kernel and setups the - * PMD datapath accordingly. - */ - uar_mapping = 0; -#endif - sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx, - uar_mapping); -#ifdef MLX5DV_UAR_ALLOC_TYPE_NC - if (!sh->tx_uar && - uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) { - if (config->dbnc == MLX5_TXDB_CACHED || - config->dbnc == MLX5_TXDB_HEURISTIC) - DRV_LOG(WARNING, "Devarg tx_db_nc setting " - "is not supported by DevX"); - /* - * In some environments like virtual machine - * the Write Combining mapped might be not supported - * and UAR allocation fails. We try "Non-Cached" - * mapping for the case. The tx_burst routines take - * the UAR mapping type into account on UAR setup - * on queue creation. - */ - DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)"); - uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC; - sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx, - uar_mapping); - } else if (!sh->tx_uar && - uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) { - if (config->dbnc == MLX5_TXDB_NCACHED) - DRV_LOG(WARNING, "Devarg tx_db_nc settings " - "is not supported by DevX"); - /* - * If Verbs/kernel does not support "Non-Cached" - * try the "Write-Combining". - */ - DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)"); - uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF; - sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx, - uar_mapping); - } -#endif - if (!sh->tx_uar) { - DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (BF/NC)"); - err = ENOMEM; - goto exit; - } - base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar); - if (base_addr) - break; - /* - * The UARs are allocated by rdma_core within the - * IB device context, on context closure all UARs - * will be freed, should be no memory/object leakage. - */ - DRV_LOG(DEBUG, "Retrying to allocate Tx DevX UAR"); - sh->tx_uar = NULL; - } - /* Check whether we finally succeeded with valid UAR allocation. */ - if (!sh->tx_uar) { - DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (NULL base)"); - err = ENOMEM; - goto exit; - } - for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) { - uar_mapping = 0; - sh->devx_rx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx, - uar_mapping); -#ifdef MLX5DV_UAR_ALLOC_TYPE_NC - if (!sh->devx_rx_uar && - uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) { - /* - * Rx UAR is used to control interrupts only, - * should be no datapath noticeable impact, - * can try "Non-Cached" mapping safely. - */ - DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)"); - uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC; - sh->devx_rx_uar = mlx5_glue->devx_alloc_uar - (sh->cdev->ctx, uar_mapping); - } -#endif - if (!sh->devx_rx_uar) { - DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (BF/NC)"); - err = ENOMEM; - goto exit; - } - base_addr = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar); - if (base_addr) - break; - /* - * The UARs are allocated by rdma_core within the - * IB device context, on context closure all UARs - * will be freed, should be no memory/object leakage. - */ - DRV_LOG(DEBUG, "Retrying to allocate Rx DevX UAR"); - sh->devx_rx_uar = NULL; + int ret; + + ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar); + if (ret) { + DRV_LOG(ERR, "Failed to prepare Tx DevX UAR."); + return -rte_errno; } - /* Check whether we finally succeeded with valid UAR allocation. */ - if (!sh->devx_rx_uar) { - DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (NULL base)"); - err = ENOMEM; + MLX5_ASSERT(sh->tx_uar.obj); + MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj)); + ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar); + if (ret) { + DRV_LOG(ERR, "Failed to prepare Rx DevX UAR."); + mlx5_devx_uar_release(&sh->tx_uar); + return -rte_errno; } -exit: - return err; + MLX5_ASSERT(sh->rx_uar.obj); + MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj)); + return 0; } -/** - * Unregister the mempool from the protection domain. - * - * @param sh - * Pointer to the device shared context. - * @param mp - * Mempool being unregistered. - */ static void -mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh, - struct rte_mempool *mp) +mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh) { - struct mlx5_mp_id mp_id; - - mlx5_mp_id_init(&mp_id, 0); - if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0) - DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s", - mp->name, sh->cdev->pd, rte_strerror(rte_errno)); + mlx5_devx_uar_release(&sh->rx_uar); + mlx5_devx_uar_release(&sh->tx_uar); } /** - * rte_mempool_walk() callback to register mempools - * for the protection domain. + * rte_mempool_walk() callback to unregister Rx mempools. + * It used when implicit mempool registration is disabled. * * @param mp * The mempool being walked. @@ -1157,66 +1063,11 @@ mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh, * Pointer to the device shared context. */ static void -mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg) +mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg) { struct mlx5_dev_ctx_shared *sh = arg; - struct mlx5_mp_id mp_id; - int ret; - mlx5_mp_id_init(&mp_id, 0); - ret = mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp, - &mp_id); - if (ret < 0 && rte_errno != EEXIST) - DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s", - mp->name, sh->cdev->pd, rte_strerror(rte_errno)); -} - -/** - * rte_mempool_walk() callback to unregister mempools - * from the protection domain. - * - * @param mp - * The mempool being walked. - * @param arg - * Pointer to the device shared context. - */ -static void -mlx5_dev_ctx_shared_mempool_unregister_cb(struct rte_mempool *mp, void *arg) -{ - mlx5_dev_ctx_shared_mempool_unregister - ((struct mlx5_dev_ctx_shared *)arg, mp); -} - -/** - * Mempool life cycle callback for Ethernet devices. - * - * @param event - * Mempool life cycle event. - * @param mp - * Associated mempool. - * @param arg - * Pointer to a device shared context. - */ -static void -mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event, - struct rte_mempool *mp, void *arg) -{ - struct mlx5_dev_ctx_shared *sh = arg; - struct mlx5_mp_id mp_id; - - switch (event) { - case RTE_MEMPOOL_EVENT_READY: - mlx5_mp_id_init(&mp_id, 0); - if (mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp, - &mp_id) < 0) - DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s", - mp->name, sh->cdev->pd, - rte_strerror(rte_errno)); - break; - case RTE_MEMPOOL_EVENT_DESTROY: - mlx5_dev_ctx_shared_mempool_unregister(sh, mp); - break; - } + mlx5_dev_mempool_unregister(sh->cdev, mp); } /** @@ -1237,7 +1088,7 @@ mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event, struct mlx5_dev_ctx_shared *sh = arg; if (event == RTE_MEMPOOL_EVENT_DESTROY) - mlx5_dev_ctx_shared_mempool_unregister(sh, mp); + mlx5_dev_mempool_unregister(sh->cdev, mp); } int @@ -1253,17 +1104,108 @@ mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev) (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh); return ret == 0 || rte_errno == EEXIST ? 0 : ret; } - /* Callback for this shared context may be already registered. */ - ret = rte_mempool_event_callback_register - (mlx5_dev_ctx_shared_mempool_event_cb, sh); - if (ret != 0 && rte_errno != EEXIST) - return ret; - /* Register mempools only once for this shared context. */ - if (ret == 0) - rte_mempool_walk(mlx5_dev_ctx_shared_mempool_register_cb, sh); + return mlx5_dev_mempool_subscribe(sh->cdev); +} + +/** + * Set up multiple TISs with different affinities according to + * number of bonding ports + * + * @param priv + * Pointer of shared context. + * + * @return + * Zero on success, -1 otherwise. + */ +static int +mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh) +{ + int i; + struct mlx5_devx_lag_context lag_ctx = { 0 }; + struct mlx5_devx_tis_attr tis_attr = { 0 }; + + tis_attr.transport_domain = sh->td->id; + if (sh->bond.n_port) { + if (!mlx5_devx_cmd_query_lag(sh->cdev->ctx, &lag_ctx)) { + sh->lag.tx_remap_affinity[0] = + lag_ctx.tx_remap_affinity_1; + sh->lag.tx_remap_affinity[1] = + lag_ctx.tx_remap_affinity_2; + sh->lag.affinity_mode = lag_ctx.port_select_mode; + } else { + DRV_LOG(ERR, "Failed to query lag affinity."); + return -1; + } + if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) { + for (i = 0; i < sh->bond.n_port; i++) { + tis_attr.lag_tx_port_affinity = + MLX5_IFC_LAG_MAP_TIS_AFFINITY(i, + sh->bond.n_port); + sh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, + &tis_attr); + if (!sh->tis[i]) { + DRV_LOG(ERR, "Failed to TIS %d/%d for bonding device" + " %s.", i, sh->bond.n_port, + sh->ibdev_name); + return -1; + } + } + DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n", + sh->bond.n_port, lag_ctx.tx_remap_affinity_1, + lag_ctx.tx_remap_affinity_2); + return 0; + } + if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH) + DRV_LOG(INFO, "Device %s enabled HW hash based LAG.", + sh->ibdev_name); + } + tis_attr.lag_tx_port_affinity = 0; + sh->tis[0] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr); + if (!sh->tis[0]) { + DRV_LOG(ERR, "Failed to TIS 0 for bonding device" + " %s.", sh->ibdev_name); + return -1; + } return 0; } +/** + * Configure realtime timestamp format. + * + * @param sh + * Pointer to mlx5_dev_ctx_shared object. + * @param config + * Device configuration parameters. + * @param hca_attr + * Pointer to DevX HCA capabilities structure. + */ +void +mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh, + struct mlx5_dev_config *config, + struct mlx5_hca_attr *hca_attr) +{ + uint32_t dw_cnt = MLX5_ST_SZ_DW(register_mtutc); + uint32_t reg[dw_cnt]; + int ret = ENOTSUP; + + if (hca_attr->access_register_user) + ret = mlx5_devx_cmd_register_read(sh->cdev->ctx, + MLX5_REGISTER_ID_MTUTC, 0, + reg, dw_cnt); + if (!ret) { + uint32_t ts_mode; + + /* MTUTC register is read successfully. */ + ts_mode = MLX5_GET(register_mtutc, reg, time_stamp_mode); + if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME) + config->rt_timestamp = 1; + } else { + /* Kernel does not support register reading. */ + if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S)) + config->rt_timestamp = 1; + } +} + /** * Allocate shared device context. If there is multiport device the * master and representors will share this context, if there is single @@ -1291,7 +1233,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, struct mlx5_dev_ctx_shared *sh; int err = 0; uint32_t i; - struct mlx5_devx_tis_attr tis_attr = { 0 }; MLX5_ASSERT(spawn); /* Secondary process should not create the shared context. */ @@ -1308,23 +1249,22 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, MLX5_ASSERT(spawn->max_port); sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, sizeof(struct mlx5_dev_ctx_shared) + - spawn->max_port * - sizeof(struct mlx5_dev_shared_port), + spawn->max_port * sizeof(struct mlx5_dev_shared_port), RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); if (!sh) { - DRV_LOG(ERR, "shared context allocation failure"); - rte_errno = ENOMEM; + DRV_LOG(ERR, "Shared context allocation failure."); + rte_errno = ENOMEM; goto exit; } pthread_mutex_init(&sh->txpp.mutex, NULL); sh->numa_node = spawn->cdev->dev->numa_node; sh->cdev = spawn->cdev; - sh->devx = sh->cdev->config.devx; + sh->esw_mode = !!(spawn->info.master || spawn->info.representor); if (spawn->bond_info) sh->bond = *spawn->bond_info; - err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr); + err = mlx5_os_capabilities_prepare(sh); if (err) { - DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed"); + DRV_LOG(ERR, "Fail to configure device capabilities."); goto error; } sh->refcnt = 1; @@ -1335,61 +1275,36 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx), sizeof(sh->ibdev_path) - 1); /* - * Setting port_id to max unallowed value means - * there is no interrupt subhandler installed for - * the given port index i. + * Setting port_id to max unallowed value means there is no interrupt + * subhandler installed for the given port index i. */ for (i = 0; i < sh->max_port; i++) { sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; } - if (sh->devx) { + if (sh->cdev->config.devx) { sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx); if (!sh->td) { DRV_LOG(ERR, "TD allocation failure"); - err = ENOMEM; + rte_errno = ENOMEM; goto error; } - tis_attr.transport_domain = sh->td->id; - sh->tis = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr); - if (!sh->tis) { + if (mlx5_setup_tis(sh)) { DRV_LOG(ERR, "TIS allocation failure"); - err = ENOMEM; + rte_errno = ENOMEM; goto error; } - err = mlx5_alloc_rxtx_uars(sh, &sh->cdev->config); + err = mlx5_rxtx_uars_prepare(sh); if (err) goto error; - MLX5_ASSERT(sh->tx_uar); - MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar)); - - MLX5_ASSERT(sh->devx_rx_uar); - MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar)); - } #ifndef RTE_ARCH_64 - /* Initialize UAR access locks for 32bit implementations. */ - rte_spinlock_init(&sh->uar_lock_cq); - for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) - rte_spinlock_init(&sh->uar_lock[i]); + } else { + /* Initialize UAR access locks for 32bit implementations. */ + rte_spinlock_init(&sh->uar_lock_cq); + for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) + rte_spinlock_init(&sh->uar_lock[i]); #endif - /* - * Once the device is added to the list of memory event - * callback, its global MR cache table cannot be expanded - * on the fly because of deadlock. If it overflows, lookup - * should be done by searching MR list linearly, which is slow. - * - * At this point the device is not added to the memory - * event list yet, context is just being created. - */ - err = mlx5_mr_btree_init(&sh->share_cache.cache, - MLX5_MR_BTREE_CACHE_N * 2, - sh->numa_node); - if (err) { - err = rte_errno; - goto error; } - mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb, - &sh->share_cache.dereg_mr_cb); mlx5_os_dev_shared_handler_install(sh); if (LIST_EMPTY(&mlx5_dev_ctx_list)) { err = mlx5_flow_os_init_workspace_once(); @@ -1399,11 +1314,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, mlx5_flow_aging_init(sh); mlx5_flow_counters_mng_init(sh); mlx5_flow_ipool_create(sh, config); - /* Add device to memory callback list. */ - rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); - LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, - sh, mem_event_cb); - rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); /* Add context to the global device list. */ LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); rte_spinlock_init(&sh->geneve_tlv_opt_sl); @@ -1411,21 +1321,19 @@ exit: pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); return sh; error: + err = rte_errno; pthread_mutex_destroy(&sh->txpp.mutex); pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); MLX5_ASSERT(sh); - if (sh->share_cache.cache.table) - mlx5_mr_btree_free(&sh->share_cache.cache); - if (sh->tis) - claim_zero(mlx5_devx_cmd_destroy(sh->tis)); + mlx5_rxtx_uars_release(sh); + i = 0; + do { + if (sh->tis[i]) + claim_zero(mlx5_devx_cmd_destroy(sh->tis[i])); + } while (++i < (uint32_t)sh->bond.n_port); if (sh->td) claim_zero(mlx5_devx_cmd_destroy(sh->td)); - if (sh->devx_rx_uar) - mlx5_glue->devx_free_uar(sh->devx_rx_uar); - if (sh->tx_uar) - mlx5_glue->devx_free_uar(sh->tx_uar); mlx5_free(sh); - MLX5_ASSERT(err > 0); rte_errno = err; return NULL; } @@ -1441,6 +1349,7 @@ void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) { int ret; + int i = 0; pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); #ifdef RTE_LIBRTE_MLX5_DEBUG @@ -1463,31 +1372,32 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) if (--sh->refcnt) goto exit; /* Stop watching for mempool events and unregister all mempools. */ - ret = rte_mempool_event_callback_unregister - (mlx5_dev_ctx_shared_mempool_event_cb, sh); - if (ret < 0 && rte_errno == ENOENT) + if (!sh->cdev->config.mr_mempool_reg_en) { ret = rte_mempool_event_callback_unregister (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh); - if (ret == 0) - rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb, - sh); - /* Remove from memory callback device list. */ - rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); - LIST_REMOVE(sh, mem_event_cb); - rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); - /* Release created Memory Regions. */ - mlx5_mr_release_cache(&sh->share_cache); + if (ret == 0) + rte_mempool_walk + (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh); + } /* Remove context from the global device list. */ LIST_REMOVE(sh, next); - /* Release flow workspaces objects on the last device. */ - if (LIST_EMPTY(&mlx5_dev_ctx_list)) + /* Release resources on the last device removal. */ + if (LIST_EMPTY(&mlx5_dev_ctx_list)) { + mlx5_os_net_cleanup(); mlx5_flow_os_release_workspace(); + } pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); + if (sh->flex_parsers_dv) { + mlx5_list_destroy(sh->flex_parsers_dv); + sh->flex_parsers_dv = NULL; + } /* * Ensure there is no async event handler installed. * Only primary process handles async device events. **/ mlx5_flow_counters_mng_close(sh); + if (sh->ct_mng) + mlx5_flow_aso_ct_mng_close(sh); if (sh->aso_age_mng) { mlx5_flow_aso_age_mng_close(sh); sh->aso_age_mng = NULL; @@ -1496,16 +1406,13 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) mlx5_aso_flow_mtrs_mng_close(sh); mlx5_flow_ipool_destroy(sh); mlx5_os_dev_shared_handler_uninstall(sh); - if (sh->tx_uar) { - mlx5_glue->devx_free_uar(sh->tx_uar); - sh->tx_uar = NULL; - } - if (sh->tis) - claim_zero(mlx5_devx_cmd_destroy(sh->tis)); + mlx5_rxtx_uars_release(sh); + do { + if (sh->tis[i]) + claim_zero(mlx5_devx_cmd_destroy(sh->tis[i])); + } while (++i < sh->bond.n_port); if (sh->td) claim_zero(mlx5_devx_cmd_destroy(sh->td)); - if (sh->devx_rx_uar) - mlx5_glue->devx_free_uar(sh->devx_rx_uar); MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL); pthread_mutex_destroy(&sh->txpp.mutex); mlx5_free(sh); @@ -1625,10 +1532,10 @@ mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, struct rte_eth_udp_tunnel *udp_tunnel) { MLX5_ASSERT(udp_tunnel != NULL); - if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN && + if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN && udp_tunnel->udp_port == 4789) return 0; - if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE && + if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE && udp_tunnel->udp_port == 4790) return 0; return -ENOTSUP; @@ -1655,8 +1562,8 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) * UAR register table follows the process private structure. BlueFlame * registers for Tx queues are stored in the table. */ - ppriv_size = - sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); + ppriv_size = sizeof(struct mlx5_proc_priv) + + priv->txqs_n * sizeof(struct mlx5_uar_data); ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size, RTE_CACHE_LINE_SIZE, dev->device->numa_node); if (!ppriv) { @@ -1665,6 +1572,8 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) } ppriv->uar_table_sz = priv->txqs_n; dev->process_private = ppriv; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + priv->sh->pppriv = ppriv; return 0; } @@ -1727,25 +1636,22 @@ mlx5_dev_close(struct rte_eth_dev *dev) mlx5_action_handle_flush(dev); mlx5_flow_meter_flush(dev, NULL); /* Prevent crashes when queues are still in use. */ - dev->rx_pkt_burst = removed_rx_burst; - dev->tx_pkt_burst = removed_tx_burst; + dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; + dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; rte_wmb(); /* Disable datapath on secondary process. */ mlx5_mp_os_req_stop_rxtx(dev); /* Free the eCPRI flex parser resource. */ mlx5_flex_parser_ecpri_release(dev); - if (priv->rxqs != NULL) { + mlx5_flex_item_port_cleanup(dev); + if (priv->rxq_privs != NULL) { /* XXX race condition if mlx5_rx_burst() is still running. */ rte_delay_us_sleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) mlx5_rxq_release(dev, i); priv->rxqs_n = 0; - priv->rxqs = NULL; - } - if (priv->representor) { - /* Each representor has a dedicated interrupts handler */ - mlx5_free(dev->intr_handle); - dev->intr_handle = NULL; + mlx5_free(priv->rxq_privs); + priv->rxq_privs = NULL; } if (priv->txqs != NULL) { /* XXX race condition if mlx5_tx_burst() is still running. */ @@ -1765,8 +1671,6 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (priv->mreg_cp_tbl) mlx5_hlist_destroy(priv->mreg_cp_tbl); mlx5_mprq_free_mp(dev); - if (priv->sh->ct_mng) - mlx5_flow_aso_ct_mng_close(priv->sh); mlx5_os_free_shared_dr(priv); if (priv->rss_conf.rss_key != NULL) mlx5_free(priv->rss_conf.rss_key); @@ -1813,7 +1717,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* * Free the shared context in last turn, because the cleanup * routines above may use some shared fields, like - * mlx5_os_mac_addr_flush() uses ibdev_path for retrieveing + * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving * ifindex if Netlink fails. */ mlx5_free_shared_dev_ctx(priv->sh); @@ -2055,9 +1959,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque) } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { config->mprq.enabled = !!tmp; } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { - config->mprq.stride_num_n = tmp; + config->mprq.log_stride_num = tmp; } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) { - config->mprq.stride_size_n = tmp; + config->mprq.log_stride_size = tmp; } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { config->mprq.max_memcpy_len = tmp; } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { @@ -2133,7 +2037,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque) if (tmp != MLX5_RCM_NONE && tmp != MLX5_RCM_LIGHT && tmp != MLX5_RCM_AGGR) { - DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val); + DRV_LOG(ERR, "Unrecognized %s: \"%s\"", key, val); rte_errno = EINVAL; return -rte_errno; } @@ -2142,10 +2046,13 @@ mlx5_args_check(const char *key, const char *val, void *opaque) config->decap_en = !!tmp; } else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) { config->allow_duplicate_pattern = !!tmp; + } else if (strcmp(MLX5_DELAY_DROP, key) == 0) { + config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD); + config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN); } else { - DRV_LOG(WARNING, "%s: unknown parameter", key); - rte_errno = EINVAL; - return -rte_errno; + DRV_LOG(WARNING, + "%s: unknown parameter, maybe it's for another class.", + key); } return 0; } @@ -2164,74 +2071,25 @@ mlx5_args_check(const char *key, const char *val, void *opaque) int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) { - const char **params = (const char *[]){ - MLX5_DRIVER_KEY, - MLX5_RXQ_CQE_COMP_EN, - MLX5_RXQ_PKT_PAD_EN, - MLX5_RX_MPRQ_EN, - MLX5_RX_MPRQ_LOG_STRIDE_NUM, - MLX5_RX_MPRQ_LOG_STRIDE_SIZE, - MLX5_RX_MPRQ_MAX_MEMCPY_LEN, - MLX5_RXQS_MIN_MPRQ, - MLX5_TXQ_INLINE, - MLX5_TXQ_INLINE_MIN, - MLX5_TXQ_INLINE_MAX, - MLX5_TXQ_INLINE_MPW, - MLX5_TXQS_MIN_INLINE, - MLX5_TXQS_MAX_VEC, - MLX5_TXQ_MPW_EN, - MLX5_TXQ_MPW_HDR_DSEG_EN, - MLX5_TXQ_MAX_INLINE_LEN, - MLX5_TX_DB_NC, - MLX5_TX_PP, - MLX5_TX_SKEW, - MLX5_TX_VEC_EN, - MLX5_RX_VEC_EN, - MLX5_L3_VXLAN_EN, - MLX5_VF_NL_EN, - MLX5_DV_ESW_EN, - MLX5_DV_FLOW_EN, - MLX5_DV_XMETA_EN, - MLX5_LACP_BY_USER, - MLX5_MR_EXT_MEMSEG_EN, - MLX5_REPRESENTOR, - MLX5_MAX_DUMP_FILES_NUM, - MLX5_LRO_TIMEOUT_USEC, - RTE_DEVARGS_KEY_CLASS, - MLX5_HP_BUF_SIZE, - MLX5_RECLAIM_MEM, - MLX5_SYS_MEM_EN, - MLX5_DECAP_EN, - MLX5_ALLOW_DUPLICATE_PATTERN, - MLX5_MR_MEMPOOL_REG_EN, - NULL, - }; struct rte_kvargs *kvlist; int ret = 0; - int i; if (devargs == NULL) return 0; /* Following UGLY cast is done to pass checkpatch. */ - kvlist = rte_kvargs_parse(devargs->args, params); + kvlist = rte_kvargs_parse(devargs->args, NULL); if (kvlist == NULL) { rte_errno = EINVAL; return -rte_errno; } /* Process parameters. */ - for (i = 0; (params[i] != NULL); ++i) { - if (rte_kvargs_count(kvlist, params[i])) { - ret = rte_kvargs_process(kvlist, params[i], - mlx5_args_check, config); - if (ret) { - rte_errno = EINVAL; - rte_kvargs_free(kvlist); - return -rte_errno; - } - } + ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config); + if (ret) { + rte_errno = EINVAL; + ret = -rte_errno; } rte_kvargs_free(kvlist); - return 0; + return ret; } /** @@ -2254,6 +2112,8 @@ void mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, struct mlx5_dev_config *config) { + struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr; + if (config->txq_inline_min != MLX5_ARG_UNSET) { /* Application defines size of inlined data explicitly. */ if (spawn->pci_dev != NULL) { @@ -2273,9 +2133,9 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, } goto exit; } - if (config->hca_attr.eth_net_offloads) { + if (hca_attr->eth_net_offloads) { /* We have DevX enabled, inline mode queried successfully. */ - switch (config->hca_attr.wqe_inline_mode) { + switch (hca_attr->wqe_inline_mode) { case MLX5_CAP_INLINE_MODE_L2: /* outer L2 header must be inlined. */ config->txq_inline_min = MLX5_INLINE_HSIZE_L2; @@ -2284,14 +2144,14 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, /* No inline data are required by NIC. */ config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; config->hw_vlan_insert = - config->hca_attr.wqe_vlan_insert; + hca_attr->wqe_vlan_insert; DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); goto exit; case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: /* inline mode is defined by NIC vport context. */ - if (!config->hca_attr.eth_virt) + if (!hca_attr->eth_virt) break; - switch (config->hca_attr.vport_inline_mode) { + switch (hca_attr->vport_inline_mode) { case MLX5_INLINE_MODE_NONE: config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; @@ -2394,17 +2254,17 @@ mlx5_set_metadata_mask(struct rte_eth_dev *dev) break; } if (sh->dv_mark_mask && sh->dv_mark_mask != mark) - DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X", + DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X", sh->dv_mark_mask, mark); else sh->dv_mark_mask = mark; if (sh->dv_meta_mask && sh->dv_meta_mask != meta) - DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X", + DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X", sh->dv_meta_mask, meta); else sh->dv_meta_mask = meta; if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0) - DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X", + DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X", sh->dv_meta_mask, reg_c0); else sh->dv_regc0_mask = reg_c0; @@ -2435,25 +2295,26 @@ rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n) } /** - * Comparison callback to sort device data. + * Check sibling device configurations. * - * This is meant to be used with qsort(). + * Sibling devices sharing the Infiniband device context should have compatible + * configurations. This regards representors and bonding device. * - * @param a[in] - * Pointer to pointer to first data object. - * @param b[in] - * Pointer to pointer to second data object. + * @param sh + * Shared device context. + * @param config + * Configuration of the device is going to be created. + * @param dpdk_dev + * Backing DPDK device. * * @return - * 0 if both objects are equal, less than 0 if the first argument is less - * than the second, greater than 0 otherwise. + * 0 on success, EINVAL otherwise */ int -mlx5_dev_check_sibling_config(struct mlx5_priv *priv, +mlx5_dev_check_sibling_config(struct mlx5_dev_ctx_shared *sh, struct mlx5_dev_config *config, struct rte_device *dpdk_dev) { - struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_dev_config *sh_conf = NULL; uint16_t port_id; @@ -2466,7 +2327,7 @@ mlx5_dev_check_sibling_config(struct mlx5_priv *priv, struct mlx5_priv *opriv = rte_eth_devices[port_id].data->dev_private; - if (opriv && opriv != priv && opriv->sh == sh) { + if (opriv && opriv->sh == sh) { sh_conf = &opriv->config; break; } @@ -2640,8 +2501,6 @@ static struct mlx5_class_driver mlx5_net_driver = { .id_table = mlx5_pci_id_map, .probe = mlx5_os_net_probe, .remove = mlx5_net_remove, - .dma_map = mlx5_net_dma_map, - .dma_unmap = mlx5_net_dma_unmap, .probe_again = 1, .intr_lsc = 1, .intr_rmv = 1,