X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5.c;h=1e4c695f84b37397c23deb018d1824ee5420b4a6;hb=a0bfe9d56f746c749ff4cf275e88469fd952b01c;hp=8fcb78a7926a414685fc118aa24b659936ea84f0;hpb=1c5064044fbe5bfa0bc1f33041b52414964bbd89;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 8fcb78a792..1e4c695f84 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -13,16 +13,6 @@ #include #include -/* Verbs header. */ -/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-Wpedantic" -#endif -#include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-Wpedantic" -#endif - #include #include #include @@ -40,6 +30,8 @@ #include #include #include +#include +#include #include "mlx5_defs.h" #include "mlx5.h" @@ -180,16 +172,16 @@ /* Flow memory reclaim mode. */ #define MLX5_RECLAIM_MEM "reclaim_mem_mode" -static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; +/* The default memory allocator used in PMD. */ +#define MLX5_SYS_MEM_EN "sys_mem_en" +/* Decap will be used or not. */ +#define MLX5_DECAP_EN "decap_en" /* Shared memory between primary and secondary processes. */ struct mlx5_shared_data *mlx5_shared_data; -/* Spinlock for mlx5_shared_data allocation. */ -static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; - -/* Process local data for secondary processes. */ -static struct mlx5_local_data mlx5_local_data; +/** Driver-specific log messages type. */ +int mlx5_logtype; static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = LIST_HEAD_INITIALIZER(); @@ -204,8 +196,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_shift = 2, .need_lock = 0, .release_mem_en = 1, - .malloc = rte_malloc_socket, - .free = rte_free, + .malloc = mlx5_malloc, + .free = mlx5_free, .type = "mlx5_encap_decap_ipool", }, { @@ -215,8 +207,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_shift = 2, .need_lock = 0, .release_mem_en = 1, - .malloc = rte_malloc_socket, - .free = rte_free, + .malloc = mlx5_malloc, + .free = mlx5_free, .type = "mlx5_push_vlan_ipool", }, { @@ -226,8 +218,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_shift = 2, .need_lock = 0, .release_mem_en = 1, - .malloc = rte_malloc_socket, - .free = rte_free, + .malloc = mlx5_malloc, + .free = mlx5_free, .type = "mlx5_tag_ipool", }, { @@ -237,8 +229,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_shift = 2, .need_lock = 0, .release_mem_en = 1, - .malloc = rte_malloc_socket, - .free = rte_free, + .malloc = mlx5_malloc, + .free = mlx5_free, .type = "mlx5_port_id_ipool", }, { @@ -248,8 +240,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_shift = 2, .need_lock = 0, .release_mem_en = 1, - .malloc = rte_malloc_socket, - .free = rte_free, + .malloc = mlx5_malloc, + .free = mlx5_free, .type = "mlx5_jump_ipool", }, #endif @@ -260,8 +252,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_shift = 2, .need_lock = 0, .release_mem_en = 1, - .malloc = rte_malloc_socket, - .free = rte_free, + .malloc = mlx5_malloc, + .free = mlx5_free, .type = "mlx5_meter_ipool", }, { @@ -271,8 +263,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_shift = 2, .need_lock = 0, .release_mem_en = 1, - .malloc = rte_malloc_socket, - .free = rte_free, + .malloc = mlx5_malloc, + .free = mlx5_free, .type = "mlx5_mcp_ipool", }, { @@ -282,8 +274,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_shift = 2, .need_lock = 0, .release_mem_en = 1, - .malloc = rte_malloc_socket, - .free = rte_free, + .malloc = mlx5_malloc, + .free = mlx5_free, .type = "mlx5_hrxq_ipool", }, { @@ -297,8 +289,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_shift = 2, .need_lock = 0, .release_mem_en = 1, - .malloc = rte_malloc_socket, - .free = rte_free, + .malloc = mlx5_malloc, + .free = mlx5_free, .type = "mlx5_flow_handle_ipool", }, { @@ -306,8 +298,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .trunk_size = 4096, .need_lock = 1, .release_mem_en = 1, - .malloc = rte_malloc_socket, - .free = rte_free, + .malloc = mlx5_malloc, + .free = mlx5_free, .type = "rte_flow_ipool", }, }; @@ -333,15 +325,16 @@ mlx5_flow_id_pool_alloc(uint32_t max_id) struct mlx5_flow_id_pool *pool; void *mem; - pool = rte_zmalloc("id pool allocation", sizeof(*pool), - RTE_CACHE_LINE_SIZE); + pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); if (!pool) { DRV_LOG(ERR, "can't allocate id pool"); rte_errno = ENOMEM; return NULL; } - mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), - RTE_CACHE_LINE_SIZE); + mem = mlx5_malloc(MLX5_MEM_ZERO, + MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); if (!mem) { DRV_LOG(ERR, "can't allocate mem for id pool"); rte_errno = ENOMEM; @@ -354,7 +347,7 @@ mlx5_flow_id_pool_alloc(uint32_t max_id) pool->max_id = max_id; return pool; error: - rte_free(pool); + mlx5_free(pool); return NULL; } @@ -367,8 +360,8 @@ error: void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) { - rte_free(pool->free_arr); - rte_free(pool); + mlx5_free(pool->free_arr); + mlx5_free(pool); } /** @@ -420,14 +413,15 @@ mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) size = pool->curr - pool->free_arr; size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR; MLX5_ASSERT(size2 > size); - mem = rte_malloc("", size2 * sizeof(uint32_t), 0); + mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0, + SOCKET_ID_ANY); if (!mem) { DRV_LOG(ERR, "can't allocate mem for id pool"); rte_errno = ENOMEM; return -rte_errno; } memcpy(mem, pool->free_arr, size * sizeof(uint32_t)); - rte_free(pool->free_arr); + mlx5_free(pool->free_arr); pool->free_arr = mem; pool->curr = pool->free_arr + size; pool->last = pool->free_arr + size2; @@ -496,7 +490,7 @@ mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) LIST_REMOVE(mng, next); claim_zero(mlx5_devx_cmd_destroy(mng->dm)); claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); - rte_free(mem); + mlx5_free(mem); } /** @@ -544,10 +538,10 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) (pool, j)->dcs)); } TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next); - rte_free(pool); + mlx5_free(pool); pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); } - rte_free(sh->cmng.ccont[i].pools); + mlx5_free(sh->cmng.ccont[i].pools); } mng = LIST_FIRST(&sh->cmng.mem_mngs); while (mng) { @@ -649,6 +643,11 @@ mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev) uint32_t ids[8]; int ret; + if (!priv->config.hca_attr.parse_graph_flex_node) { + DRV_LOG(ERR, "Dynamic flex parser is not supported " + "for device %s.", priv->dev_data->name); + return -ENOTSUP; + } node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED; /* 8 bytes now: 4B common header + 4B message body header. */ node.header_length_base_value = 0x8; @@ -708,6 +707,141 @@ mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev) prf->obj = NULL; } +/* + * Allocate Rx and Tx UARs in robust fashion. + * This routine handles the following UAR allocation issues: + * + * - tries to allocate the UAR with the most appropriate memory + * mapping type from the ones supported by the host + * + * - tries to allocate the UAR with non-NULL base address + * OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as + * UAR base address if UAR was not the first object in the UAR page. + * It caused the PMD failure and we should try to get another UAR + * till we get the first one with non-NULL base address returned. + */ +static int +mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh, + const struct mlx5_dev_config *config) +{ + uint32_t uar_mapping, retry; + int err = 0; + + for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) { +#ifdef MLX5DV_UAR_ALLOC_TYPE_NC + /* Control the mapping type according to the settings. */ + uar_mapping = (config->dbnc == MLX5_TXDB_NCACHED) ? + MLX5DV_UAR_ALLOC_TYPE_NC : + MLX5DV_UAR_ALLOC_TYPE_BF; +#else + RTE_SET_USED(config); + /* + * It seems we have no way to control the memory mapping type + * for the UAR, the default "Write-Combining" type is supposed. + * The UAR initialization on queue creation queries the + * actual mapping type done by Verbs/kernel and setups the + * PMD datapath accordingly. + */ + uar_mapping = 0; +#endif + sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, uar_mapping); +#ifdef MLX5DV_UAR_ALLOC_TYPE_NC + if (!sh->tx_uar && + uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) { + if (config->dbnc == MLX5_TXDB_CACHED || + config->dbnc == MLX5_TXDB_HEURISTIC) + DRV_LOG(WARNING, "Devarg tx_db_nc setting " + "is not supported by DevX"); + /* + * In some environments like virtual machine + * the Write Combining mapped might be not supported + * and UAR allocation fails. We try "Non-Cached" + * mapping for the case. The tx_burst routines take + * the UAR mapping type into account on UAR setup + * on queue creation. + */ + DRV_LOG(WARNING, "Failed to allocate Tx DevX UAR (BF)"); + uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC; + sh->tx_uar = mlx5_glue->devx_alloc_uar + (sh->ctx, uar_mapping); + } else if (!sh->tx_uar && + uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) { + if (config->dbnc == MLX5_TXDB_NCACHED) + DRV_LOG(WARNING, "Devarg tx_db_nc settings " + "is not supported by DevX"); + /* + * If Verbs/kernel does not support "Non-Cached" + * try the "Write-Combining". + */ + DRV_LOG(WARNING, "Failed to allocate Tx DevX UAR (NC)"); + uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF; + sh->tx_uar = mlx5_glue->devx_alloc_uar + (sh->ctx, uar_mapping); + } +#endif + if (!sh->tx_uar) { + DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (BF/NC)"); + err = ENOMEM; + goto exit; + } + if (sh->tx_uar->base_addr) + break; + /* + * The UARs are allocated by rdma_core within the + * IB device context, on context closure all UARs + * will be freed, should be no memory/object leakage. + */ + DRV_LOG(WARNING, "Retrying to allocate Tx DevX UAR"); + sh->tx_uar = NULL; + } + /* Check whether we finally succeeded with valid UAR allocation. */ + if (!sh->tx_uar) { + DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (NULL base)"); + err = ENOMEM; + goto exit; + } + for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) { + uar_mapping = 0; + sh->devx_rx_uar = mlx5_glue->devx_alloc_uar + (sh->ctx, uar_mapping); +#ifdef MLX5DV_UAR_ALLOC_TYPE_NC + if (!sh->devx_rx_uar && + uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) { + /* + * Rx UAR is used to control interrupts only, + * should be no datapath noticeable impact, + * can try "Non-Cached" mapping safely. + */ + DRV_LOG(WARNING, "Failed to allocate Rx DevX UAR (BF)"); + uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC; + sh->devx_rx_uar = mlx5_glue->devx_alloc_uar + (sh->ctx, uar_mapping); + } +#endif + if (!sh->devx_rx_uar) { + DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (BF/NC)"); + err = ENOMEM; + goto exit; + } + if (sh->devx_rx_uar->base_addr) + break; + /* + * The UARs are allocated by rdma_core within the + * IB device context, on context closure all UARs + * will be freed, should be no memory/object leakage. + */ + DRV_LOG(WARNING, "Retrying to allocate Rx DevX UAR"); + sh->devx_rx_uar = NULL; + } + /* Check whether we finally succeeded with valid UAR allocation. */ + if (!sh->devx_rx_uar) { + DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (NULL base)"); + err = ENOMEM; + } +exit: + return err; +} + /** * Allocate shared device context. If there is multiport device the * master and representors will share this context, if there is single @@ -751,11 +885,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, } /* No device found, we have to create new shared context. */ MLX5_ASSERT(spawn->max_port); - sh = rte_zmalloc("ethdev shared ib context", + sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, sizeof(struct mlx5_dev_ctx_shared) + spawn->max_port * sizeof(struct mlx5_dev_shared_port), - RTE_CACHE_LINE_SIZE); + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); if (!sh) { DRV_LOG(ERR, "shared context allocation failure"); rte_errno = ENOMEM; @@ -809,12 +943,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, err = ENOMEM; goto error; } - sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, 0); - if (!sh->tx_uar) { - DRV_LOG(ERR, "Failed to allocate DevX UAR."); - err = ENOMEM; + err = mlx5_alloc_rxtx_uars(sh, config); + if (err) goto error; - } + MLX5_ASSERT(sh->tx_uar && sh->tx_uar->base_addr); + MLX5_ASSERT(sh->devx_rx_uar && sh->devx_rx_uar->base_addr); } sh->flow_id_pool = mlx5_flow_id_pool_alloc ((1 << HAIRPIN_FLOW_ID_BITS) - 1); @@ -870,25 +1003,23 @@ error: pthread_mutex_destroy(&sh->txpp.mutex); pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); MLX5_ASSERT(sh); - if (sh->cnt_id_tbl) { + if (sh->cnt_id_tbl) mlx5_l3t_destroy(sh->cnt_id_tbl); - sh->cnt_id_tbl = NULL; - } - if (sh->tx_uar) { - mlx5_glue->devx_free_uar(sh->tx_uar); - sh->tx_uar = NULL; - } if (sh->tis) claim_zero(mlx5_devx_cmd_destroy(sh->tis)); if (sh->td) claim_zero(mlx5_devx_cmd_destroy(sh->td)); + if (sh->devx_rx_uar) + mlx5_glue->devx_free_uar(sh->devx_rx_uar); + if (sh->tx_uar) + mlx5_glue->devx_free_uar(sh->tx_uar); if (sh->pd) claim_zero(mlx5_glue->dealloc_pd(sh->pd)); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); if (sh->flow_id_pool) mlx5_flow_id_pool_release(sh->flow_id_pool); - rte_free(sh); + mlx5_free(sh); MLX5_ASSERT(err > 0); rte_errno = err; return NULL; @@ -932,6 +1063,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) mlx5_mr_release_cache(&sh->share_cache); /* Remove context from the global device list. */ LIST_REMOVE(sh, next); + pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); /* * Ensure there is no async event handler installed. * Only primary process handles async device events. @@ -953,12 +1085,15 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) claim_zero(mlx5_devx_cmd_destroy(sh->tis)); if (sh->td) claim_zero(mlx5_devx_cmd_destroy(sh->td)); + if (sh->devx_rx_uar) + mlx5_glue->devx_free_uar(sh->devx_rx_uar); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); if (sh->flow_id_pool) mlx5_flow_id_pool_release(sh->flow_id_pool); pthread_mutex_destroy(&sh->txpp.mutex); - rte_free(sh); + mlx5_free(sh); + return; exit: pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); } @@ -992,7 +1127,7 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv) entry); MLX5_ASSERT(tbl_data); mlx5_hlist_remove(sh->flow_tbls, pos); - rte_free(tbl_data); + mlx5_free(tbl_data); } table_key.direction = 1; pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); @@ -1001,7 +1136,7 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv) entry); MLX5_ASSERT(tbl_data); mlx5_hlist_remove(sh->flow_tbls, pos); - rte_free(tbl_data); + mlx5_free(tbl_data); } table_key.direction = 0; table_key.domain = 1; @@ -1011,7 +1146,7 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv) entry); MLX5_ASSERT(tbl_data); mlx5_hlist_remove(sh->flow_tbls, pos); - rte_free(tbl_data); + mlx5_free(tbl_data); } mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL); } @@ -1055,8 +1190,9 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv) .direction = 0, } }; - struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL, - sizeof(*tbl_data), 0); + struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*tbl_data), 0, + SOCKET_ID_ANY); if (!tbl_data) { err = ENOMEM; @@ -1069,7 +1205,8 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv) rte_atomic32_init(&tbl_data->tbl.refcnt); rte_atomic32_inc(&tbl_data->tbl.refcnt); table_key.direction = 1; - tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0); + tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, + SOCKET_ID_ANY); if (!tbl_data) { err = ENOMEM; goto error; @@ -1082,7 +1219,8 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv) rte_atomic32_inc(&tbl_data->tbl.refcnt); table_key.direction = 0; table_key.domain = 1; - tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0); + tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, + SOCKET_ID_ANY); if (!tbl_data) { err = ENOMEM; goto error; @@ -1100,55 +1238,6 @@ error: return err; } -/** - * Initialize shared data between primary and secondary process. - * - * A memzone is reserved by primary process and secondary processes attach to - * the memzone. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -static int -mlx5_init_shared_data(void) -{ - const struct rte_memzone *mz; - int ret = 0; - - rte_spinlock_lock(&mlx5_shared_data_lock); - if (mlx5_shared_data == NULL) { - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - /* Allocate shared memory. */ - mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, - sizeof(*mlx5_shared_data), - SOCKET_ID_ANY, 0); - if (mz == NULL) { - DRV_LOG(ERR, - "Cannot allocate mlx5 shared data"); - ret = -rte_errno; - goto error; - } - mlx5_shared_data = mz->addr; - memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); - rte_spinlock_init(&mlx5_shared_data->lock); - } else { - /* Lookup allocated shared memory. */ - mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); - if (mz == NULL) { - DRV_LOG(ERR, - "Cannot attach mlx5 shared data"); - ret = -rte_errno; - goto error; - } - mlx5_shared_data = mz->addr; - memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); - } - } -error: - rte_spinlock_unlock(&mlx5_shared_data_lock); - return ret; -} - /** * Retrieve integer value from environment variable. * @@ -1215,8 +1304,8 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) */ ppriv_size = sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); - ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size, - RTE_CACHE_LINE_SIZE, dev->device->numa_node); + ppriv = mlx5_malloc(MLX5_MEM_RTE, ppriv_size, RTE_CACHE_LINE_SIZE, + dev->device->numa_node); if (!ppriv) { rte_errno = ENOMEM; return -rte_errno; @@ -1237,7 +1326,7 @@ mlx5_proc_priv_uninit(struct rte_eth_dev *dev) { if (!dev->process_private) return; - rte_free(dev->process_private); + mlx5_free(dev->process_private); dev->process_private = NULL; } @@ -1290,7 +1379,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) dev->tx_pkt_burst = removed_tx_burst; rte_wmb(); /* Disable datapath on secondary process. */ - mlx5_mp_req_stop_rxtx(dev); + mlx5_mp_os_req_stop_rxtx(dev); /* Free the eCPRI flex parser resource. */ mlx5_flex_parser_ecpri_release(dev); if (priv->rxqs != NULL) { @@ -1315,9 +1404,9 @@ mlx5_dev_close(struct rte_eth_dev *dev) mlx5_mprq_free_mp(dev); mlx5_os_free_shared_dr(priv); if (priv->rss_conf.rss_key != NULL) - rte_free(priv->rss_conf.rss_key); + mlx5_free(priv->rss_conf.rss_key); if (priv->reta_idx != NULL) - rte_free(priv->reta_idx); + mlx5_free(priv->reta_idx); if (priv->config.vf) mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev), dev->data->mac_addrs, @@ -1528,6 +1617,10 @@ mlx5_args_check(const char *key, const char *val, void *opaque) return -rte_errno; } config->reclaim_mode = tmp; + } else if (strcmp(MLX5_SYS_MEM_EN, key) == 0) { + config->sys_mem_en = !!tmp; + } else if (strcmp(MLX5_DECAP_EN, key) == 0) { + config->decap_en = !!tmp; } else { DRV_LOG(WARNING, "%s: unknown parameter", key); rte_errno = EINVAL; @@ -1586,6 +1679,8 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) MLX5_CLASS_ARG_NAME, MLX5_HP_BUF_SIZE, MLX5_RECLAIM_MEM, + MLX5_SYS_MEM_EN, + MLX5_DECAP_EN, NULL, }; struct rte_kvargs *kvlist; @@ -1616,60 +1711,6 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) return 0; } -/** - * PMD global initialization. - * - * Independent from individual device, this function initializes global - * per-PMD data structures distinguishing primary and secondary processes. - * Hence, each initialization is called once per a process. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -int -mlx5_init_once(void) -{ - struct mlx5_shared_data *sd; - struct mlx5_local_data *ld = &mlx5_local_data; - int ret = 0; - - if (mlx5_init_shared_data()) - return -rte_errno; - sd = mlx5_shared_data; - MLX5_ASSERT(sd); - rte_spinlock_lock(&sd->lock); - switch (rte_eal_process_type()) { - case RTE_PROC_PRIMARY: - if (sd->init_done) - break; - LIST_INIT(&sd->mem_event_cb_list); - rte_rwlock_init(&sd->mem_event_rwlock); - rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", - mlx5_mr_mem_event_cb, NULL); - ret = mlx5_mp_init_primary(MLX5_MP_NAME, - mlx5_mp_primary_handle); - if (ret) - goto out; - sd->init_done = true; - break; - case RTE_PROC_SECONDARY: - if (ld->init_done) - break; - ret = mlx5_mp_init_secondary(MLX5_MP_NAME, - mlx5_mp_secondary_handle); - if (ret) - goto out; - ++sd->secondary_cnt; - ld->init_done = true; - break; - default: - break; - } -out: - rte_spinlock_unlock(&sd->lock); - return ret; -} - /** * Configures the minimal amount of data to inline into WQE * while sending packets. @@ -2051,16 +2092,19 @@ static const struct rte_pci_id mlx5_pci_id_map[] = { } }; -struct rte_pci_driver mlx5_driver = { - .driver = { - .name = MLX5_DRIVER_NAME +static struct mlx5_pci_driver mlx5_driver = { + .driver_class = MLX5_CLASS_NET, + .pci_driver = { + .driver = { + .name = MLX5_DRIVER_NAME, + }, + .id_table = mlx5_pci_id_map, + .probe = mlx5_os_pci_probe, + .remove = mlx5_pci_remove, + .dma_map = mlx5_dma_map, + .dma_unmap = mlx5_dma_unmap, + .drv_flags = PCI_DRV_FLAGS, }, - .id_table = mlx5_pci_id_map, - .probe = mlx5_os_pci_probe, - .remove = mlx5_pci_remove, - .dma_map = mlx5_dma_map, - .dma_unmap = mlx5_dma_unmap, - .drv_flags = PCI_DRV_FLAGS, }; /* Initialize driver log type. */ @@ -2071,12 +2115,13 @@ RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE) */ RTE_INIT(rte_mlx5_pmd_init) { + mlx5_common_init(); /* Build the static tables for Verbs conversion. */ mlx5_set_ptype_table(); mlx5_set_cksum_table(); mlx5_set_swp_types_table(); if (mlx5_glue) - rte_pci_register(&mlx5_driver); + mlx5_pci_driver_register(&mlx5_driver); } RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);