X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5.c;h=60301d3244d2ca31765759e4f9d8ec1de409dae9;hb=d0b3ef1a6e3804c76f0d35b16946fc25d54443ab;hp=77aeac85c591819437b0ac94006f74de92b8f54d;hpb=8a5a0aad5d3e4f4f75ca81932eb247de94765685;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 77aeac85c5..60301d3244 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,7 @@ #include "mlx5_autoconf.h" #include "mlx5_mr.h" #include "mlx5_flow.h" +#include "mlx5_flow_os.h" #include "rte_pmd_mlx5.h" /* Device parameter to enable RX completion queue compression. */ @@ -186,118 +188,130 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER; static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { #ifdef HAVE_IBV_FLOW_DV_SUPPORT - { + [MLX5_IPOOL_DECAP_ENCAP] = { .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_encap_decap_ipool", }, - { + [MLX5_IPOOL_PUSH_VLAN] = { .size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource), .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_push_vlan_ipool", }, - { + [MLX5_IPOOL_TAG] = { .size = sizeof(struct mlx5_flow_dv_tag_resource), .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_tag_ipool", }, - { + [MLX5_IPOOL_PORT_ID] = { .size = sizeof(struct mlx5_flow_dv_port_id_action_resource), .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_port_id_ipool", }, - { + [MLX5_IPOOL_JUMP] = { .size = sizeof(struct mlx5_flow_tbl_data_entry), .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_jump_ipool", }, - { + [MLX5_IPOOL_SAMPLE] = { .size = sizeof(struct mlx5_flow_dv_sample_resource), .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_sample_ipool", }, - { + [MLX5_IPOOL_DEST_ARRAY] = { .size = sizeof(struct mlx5_flow_dv_dest_array_resource), .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_dest_array_ipool", }, + [MLX5_IPOOL_TUNNEL_ID] = { + .size = sizeof(struct mlx5_flow_tunnel), + .trunk_size = MLX5_MAX_TUNNELS, + .need_lock = 1, + .release_mem_en = 1, + .type = "mlx5_tunnel_offload", + }, + [MLX5_IPOOL_TNL_TBL_ID] = { + .size = 0, + .need_lock = 1, + .type = "mlx5_flow_tnl_tbl_ipool", + }, #endif - { + [MLX5_IPOOL_MTR] = { .size = sizeof(struct mlx5_flow_meter), .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_meter_ipool", }, - { + [MLX5_IPOOL_MCP] = { .size = sizeof(struct mlx5_flow_mreg_copy_resource), .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_mcp_ipool", }, - { + [MLX5_IPOOL_HRXQ] = { .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_hrxq_ipool", }, - { + [MLX5_IPOOL_MLX5_FLOW] = { /* * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows. * It set in run time according to PCI function configuration. @@ -306,13 +320,13 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_flow_handle_ipool", }, - { + [MLX5_IPOOL_RTE_FLOW] = { .size = sizeof(struct rte_flow), .trunk_size = 4096, .need_lock = 1, @@ -321,6 +335,22 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "rte_flow_ipool", }, + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = { + .size = 0, + .need_lock = 1, + .type = "mlx5_flow_rss_id_ipool", + }, + [MLX5_IPOOL_RSS_SHARED_ACTIONS] = { + .size = sizeof(struct mlx5_shared_action_rss), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 1, + .release_mem_en = 1, + .malloc = mlx5_malloc, + .free = mlx5_free, + .type = "mlx5_shared_action_rss", + }, }; @@ -330,124 +360,69 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 /** - * Allocate ID pool structure. + * Initialize the ASO aging management structure. * - * @param[in] max_id - * The maximum id can be allocated from the pool. + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free * * @return - * Pointer to pool object, NULL value otherwise. - */ -struct mlx5_flow_id_pool * -mlx5_flow_id_pool_alloc(uint32_t max_id) -{ - struct mlx5_flow_id_pool *pool; - void *mem; - - pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), - RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); - if (!pool) { - DRV_LOG(ERR, "can't allocate id pool"); - rte_errno = ENOMEM; - return NULL; - } - mem = mlx5_malloc(MLX5_MEM_ZERO, - MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), - RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); - if (!mem) { - DRV_LOG(ERR, "can't allocate mem for id pool"); - rte_errno = ENOMEM; - goto error; - } - pool->free_arr = mem; - pool->curr = pool->free_arr; - pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE; - pool->base_index = 0; - pool->max_id = max_id; - return pool; -error: - mlx5_free(pool); - return NULL; -} - -/** - * Release ID pool structure. - * - * @param[in] pool - * Pointer to flow id pool object to free. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -void -mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) +int +mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh) { - mlx5_free(pool->free_arr); - mlx5_free(pool); -} + int err; -/** - * Generate ID. - * - * @param[in] pool - * Pointer to flow id pool. - * @param[out] id - * The generated ID. - * - * @return - * 0 on success, error value otherwise. - */ -uint32_t -mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id) -{ - if (pool->curr == pool->free_arr) { - if (pool->base_index == pool->max_id) { - rte_errno = ENOMEM; - DRV_LOG(ERR, "no free id"); - return -rte_errno; - } - *id = ++pool->base_index; + if (sh->aso_age_mng) return 0; + sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (!sh->aso_age_mng) { + DRV_LOG(ERR, "aso_age_mng allocation was failed."); + rte_errno = ENOMEM; + return -ENOMEM; + } + err = mlx5_aso_queue_init(sh); + if (err) { + mlx5_free(sh->aso_age_mng); + return -1; } - *id = *(--pool->curr); + rte_spinlock_init(&sh->aso_age_mng->resize_sl); + rte_spinlock_init(&sh->aso_age_mng->free_sl); + LIST_INIT(&sh->aso_age_mng->free); return 0; } /** - * Release ID. + * Close and release all the resources of the ASO aging management structure. * - * @param[in] pool - * Pointer to flow id pool. - * @param[out] id - * The generated ID. - * - * @return - * 0 on success, error value otherwise. + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free. */ -uint32_t -mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) +static void +mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh) { - uint32_t size; - uint32_t size2; - void *mem; - - if (pool->curr == pool->last) { - size = pool->curr - pool->free_arr; - size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR; - MLX5_ASSERT(size2 > size); - mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0, - SOCKET_ID_ANY); - if (!mem) { - DRV_LOG(ERR, "can't allocate mem for id pool"); - rte_errno = ENOMEM; - return -rte_errno; + int i, j; + + mlx5_aso_queue_stop(sh); + mlx5_aso_queue_uninit(sh); + if (sh->aso_age_mng->pools) { + struct mlx5_aso_age_pool *pool; + + for (i = 0; i < sh->aso_age_mng->next; ++i) { + pool = sh->aso_age_mng->pools[i]; + claim_zero(mlx5_devx_cmd_destroy + (pool->flow_hit_aso_obj)); + for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) + if (pool->actions[j].dr_action) + claim_zero + (mlx5_flow_os_destroy_flow_action + (pool->actions[j].dr_action)); + mlx5_free(pool); } - memcpy(mem, pool->free_arr, size * sizeof(uint32_t)); - mlx5_free(pool->free_arr); - pool->free_arr = mem; - pool->curr = pool->free_arr + size; - pool->last = pool->free_arr + size2; + mlx5_free(sh->aso_age_mng->pools); } - *pool->curr = id; - pool->curr++; - return 0; + mlx5_free(sh->aso_age_mng); } /** @@ -466,6 +441,7 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) age_info = &sh->port[i].age_info; age_info->flags = 0; TAILQ_INIT(&age_info->aged_counters); + LIST_INIT(&age_info->aged_aso); rte_spinlock_init(&age_info->aged_sl); MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); } @@ -484,14 +460,13 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) memset(&sh->cmng, 0, sizeof(sh->cmng)); TAILQ_INIT(&sh->cmng.flow_counters); - for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { - sh->cmng.ccont[i].min_id = MLX5_CNT_BATCH_OFFSET; - sh->cmng.ccont[i].max_id = -1; - sh->cmng.ccont[i].last_pool_idx = POOL_IDX_INVALID; - TAILQ_INIT(&sh->cmng.ccont[i].pool_list); - rte_spinlock_init(&sh->cmng.ccont[i].resize_sl); - TAILQ_INIT(&sh->cmng.ccont[i].counters); - rte_spinlock_init(&sh->cmng.ccont[i].csl); + sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET; + sh->cmng.max_id = -1; + sh->cmng.last_pool_idx = POOL_IDX_INVALID; + rte_spinlock_init(&sh->cmng.pool_update_sl); + for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) { + TAILQ_INIT(&sh->cmng.counters[i]); + rte_spinlock_init(&sh->cmng.csl[i]); } } @@ -508,7 +483,7 @@ mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) LIST_REMOVE(mng, next); claim_zero(mlx5_devx_cmd_destroy(mng->dm)); - claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); + claim_zero(mlx5_os_umem_dereg(mng->umem)); mlx5_free(mem); } @@ -522,8 +497,7 @@ static void mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) { struct mlx5_counter_stats_mem_mng *mng; - int i; - int j; + int i, j; int retries = 1024; rte_errno = 0; @@ -533,34 +507,33 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) break; rte_pause(); } - for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { + + if (sh->cmng.pools) { struct mlx5_flow_counter_pool *pool; - uint32_t batch = !!(i > 1); + uint16_t n_valid = sh->cmng.n_valid; + bool fallback = sh->cmng.counter_fallback; - if (!sh->cmng.ccont[i].pools) - continue; - pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); - while (pool) { - if (batch && pool->min_dcs) + for (i = 0; i < n_valid; ++i) { + pool = sh->cmng.pools[i]; + if (!fallback && pool->min_dcs) claim_zero(mlx5_devx_cmd_destroy (pool->min_dcs)); for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { - if (MLX5_POOL_GET_CNT(pool, j)->action) + struct mlx5_flow_counter *cnt = + MLX5_POOL_GET_CNT(pool, j); + + if (cnt->action) claim_zero - (mlx5_glue->destroy_flow_action - (MLX5_POOL_GET_CNT - (pool, j)->action)); - if (!batch && MLX5_GET_POOL_CNT_EXT - (pool, j)->dcs) + (mlx5_flow_os_destroy_flow_action + (cnt->action)); + if (fallback && MLX5_POOL_GET_CNT + (pool, j)->dcs_when_free) claim_zero(mlx5_devx_cmd_destroy - (MLX5_GET_POOL_CNT_EXT - (pool, j)->dcs)); + (cnt->dcs_when_free)); } - TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next); mlx5_free(pool); - pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); } - mlx5_free(sh->cmng.ccont[i].pools); + mlx5_free(sh->cmng.pools); } mng = LIST_FIRST(&sh->cmng.mem_mngs); while (mng) { @@ -570,6 +543,25 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) memset(&sh->cmng, 0, sizeof(sh->cmng)); } +/* Send FLOW_AGED event if needed. */ +void +mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_age_info *age_info; + uint32_t i; + + for (i = 0; i < sh->max_port; i++) { + age_info = &sh->port[i].age_info; + if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW)) + continue; + if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) + rte_eth_dev_callback_process + (&rte_eth_devices[sh->port[i].devx_ih_port_id], + RTE_ETH_EVENT_FLOW_AGED, NULL); + age_info->flags = 0; + } +} + /** * Initialize the flow resources' indexed mempool. * @@ -926,6 +918,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, goto error; } sh->refcnt = 1; + sh->bond_dev = UINT16_MAX; sh->max_port = spawn->max_port; strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx), sizeof(sh->ibdev_name) - 1); @@ -940,17 +933,15 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; } - sh->pd = mlx5_glue->alloc_pd(sh->ctx); + sh->pd = mlx5_os_alloc_pd(sh->ctx); if (sh->pd == NULL) { DRV_LOG(ERR, "PD allocation failure"); err = ENOMEM; goto error; } if (sh->devx) { - uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1); - /* Query the EQN for this core. */ - err = mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->eqn); + err = mlx5_glue->devx_query_eqn(sh->ctx, 0, &sh->eqn); if (err) { rte_errno = errno; DRV_LOG(ERR, "Failed to query event queue number %d.", @@ -984,13 +975,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, MLX5_ASSERT(sh->devx_rx_uar); MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar)); } - sh->flow_id_pool = mlx5_flow_id_pool_alloc - ((1 << HAIRPIN_FLOW_ID_BITS) - 1); - if (!sh->flow_id_pool) { - DRV_LOG(ERR, "can't create flow id pool"); - err = ENOMEM; - goto error; - } #ifndef RTE_ARCH_64 /* Initialize UAR access locks for 32bit implementations. */ rte_spinlock_init(&sh->uar_lock_cq); @@ -1049,11 +1033,9 @@ error: if (sh->tx_uar) mlx5_glue->devx_free_uar(sh->tx_uar); if (sh->pd) - claim_zero(mlx5_glue->dealloc_pd(sh->pd)); + claim_zero(mlx5_os_dealloc_pd(sh->pd)); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); - if (sh->flow_id_pool) - mlx5_flow_id_pool_release(sh->flow_id_pool); mlx5_free(sh); MLX5_ASSERT(err > 0); rte_errno = err; @@ -1104,6 +1086,10 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) * Only primary process handles async device events. **/ mlx5_flow_counters_mng_close(sh); + if (sh->aso_age_mng) { + mlx5_flow_aso_age_mng_close(sh); + sh->aso_age_mng = NULL; + } mlx5_flow_ipool_destroy(sh); mlx5_os_dev_shared_handler_uninstall(sh); if (sh->cnt_id_tbl) { @@ -1115,7 +1101,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) sh->tx_uar = NULL; } if (sh->pd) - claim_zero(mlx5_glue->dealloc_pd(sh->pd)); + claim_zero(mlx5_os_dealloc_pd(sh->pd)); if (sh->tis) claim_zero(mlx5_devx_cmd_destroy(sh->tis)); if (sh->td) @@ -1124,8 +1110,6 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) mlx5_glue->devx_free_uar(sh->devx_rx_uar); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); - if (sh->flow_id_pool) - mlx5_flow_id_pool_release(sh->flow_id_pool); pthread_mutex_destroy(&sh->txpp.mutex); mlx5_free(sh); return; @@ -1134,7 +1118,7 @@ exit: } /** - * Destroy table hash list and all the root entries per domain. + * Destroy table hash list. * * @param[in] priv * Pointer to the private device data structure. @@ -1143,47 +1127,10 @@ void mlx5_free_table_hash_list(struct mlx5_priv *priv) { struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_tbl_data_entry *tbl_data; - union mlx5_flow_tbl_key table_key = { - { - .table_id = 0, - .reserved = 0, - .domain = 0, - .direction = 0, - } - }; - struct mlx5_hlist_entry *pos; if (!sh->flow_tbls) return; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - MLX5_ASSERT(tbl_data); - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_free(tbl_data); - } - table_key.direction = 1; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - MLX5_ASSERT(tbl_data); - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_free(tbl_data); - } - table_key.direction = 0; - table_key.domain = 1; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - MLX5_ASSERT(tbl_data); - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_free(tbl_data); - } - mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL); + mlx5_hlist_destroy(sh->flow_tbls); } /** @@ -1197,79 +1144,46 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv) * Zero on success, positive error code otherwise. */ int -mlx5_alloc_table_hash_list(struct mlx5_priv *priv) +mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused) { + int err = 0; + /* Tables are only used in DV and DR modes. */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT struct mlx5_dev_ctx_shared *sh = priv->sh; char s[MLX5_HLIST_NAMESIZE]; - int err = 0; MLX5_ASSERT(sh); snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); - sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE); + sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE, + 0, 0, flow_dv_tbl_create_cb, + flow_dv_tbl_match_cb, + flow_dv_tbl_remove_cb); if (!sh->flow_tbls) { DRV_LOG(ERR, "flow tables with hash creation failed."); err = ENOMEM; return err; } + sh->flow_tbls->ctx = sh; #ifndef HAVE_MLX5DV_DR + struct rte_flow_error error; + struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id]; + /* * In case we have not DR support, the zero tables should be created * because DV expect to see them even if they cannot be created by * RDMA-CORE. */ - union mlx5_flow_tbl_key table_key = { - { - .table_id = 0, - .reserved = 0, - .domain = 0, - .direction = 0, - } - }; - struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO, - sizeof(*tbl_data), 0, - SOCKET_ID_ANY); - - if (!tbl_data) { - err = ENOMEM; - goto error; - } - tbl_data->entry.key = table_key.v64; - err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); - if (err) - goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); - table_key.direction = 1; - tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, - SOCKET_ID_ANY); - if (!tbl_data) { + if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, NULL, 0, 1, &error) || + !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, NULL, 0, 1, &error) || + !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, NULL, 0, 1, &error)) { err = ENOMEM; goto error; } - tbl_data->entry.key = table_key.v64; - err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); - if (err) - goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); - table_key.direction = 0; - table_key.domain = 1; - tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, - SOCKET_ID_ANY); - if (!tbl_data) { - err = ENOMEM; - goto error; - } - tbl_data->entry.key = table_key.v64; - err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); - if (err) - goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); return err; error: mlx5_free_table_hash_list(priv); #endif /* HAVE_MLX5DV_DR */ +#endif return err; } @@ -1406,9 +1320,8 @@ mlx5_dev_close(struct rte_eth_dev *dev) * then this will return directly without any action. */ mlx5_flow_list_flush(dev, &priv->flows, true); + mlx5_shared_action_flush(dev); mlx5_flow_meter_flush(dev, NULL); - /* Free the intermediate buffers for flow creation. */ - mlx5_flow_free_intermediate(dev); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; @@ -1419,7 +1332,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) mlx5_flex_parser_ecpri_release(dev); if (priv->rxqs != NULL) { /* XXX race condition if mlx5_rx_burst() is still running. */ - usleep(1000); + rte_delay_us_sleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) mlx5_rxq_release(dev, i); priv->rxqs_n = 0; @@ -1427,15 +1340,17 @@ mlx5_dev_close(struct rte_eth_dev *dev) } if (priv->txqs != NULL) { /* XXX race condition if mlx5_tx_burst() is still running. */ - usleep(1000); + rte_delay_us_sleep(1000); for (i = 0; (i != priv->txqs_n); ++i) mlx5_txq_release(dev, i); priv->txqs_n = 0; priv->txqs = NULL; } mlx5_proc_priv_uninit(dev); + if (priv->drop_queue.hrxq) + mlx5_drop_action_destroy(dev); if (priv->mreg_cp_tbl) - mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); + mlx5_hlist_destroy(priv->mreg_cp_tbl); mlx5_mprq_free_mp(dev); mlx5_os_free_shared_dr(priv); if (priv->rss_conf.rss_key != NULL) @@ -1478,6 +1393,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (ret) DRV_LOG(WARNING, "port %u some flows still remain", dev->data->port_id); + mlx5_cache_list_destroy(&priv->hrxqs); /* * Free the shared context in last turn, because the cleanup * routines above may use some shared fields, like @@ -1552,7 +1468,14 @@ mlx5_args_check(const char *key, const char *val, void *opaque) } mod = tmp >= 0 ? tmp : -tmp; if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { + if (tmp > MLX5_CQE_RESP_FORMAT_L34H_STRIDX) { + DRV_LOG(ERR, "invalid CQE compression " + "format parameter"); + rte_errno = EINVAL; + return -rte_errno; + } config->cqe_comp = !!tmp; + config->cqe_comp_fmt = tmp; } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { config->cqe_pad = !!tmp; } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { @@ -1623,13 +1546,17 @@ mlx5_args_check(const char *key, const char *val, void *opaque) } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) { if (tmp != MLX5_XMETA_MODE_LEGACY && tmp != MLX5_XMETA_MODE_META16 && - tmp != MLX5_XMETA_MODE_META32) { + tmp != MLX5_XMETA_MODE_META32 && + tmp != MLX5_XMETA_MODE_MISS_INFO) { DRV_LOG(ERR, "invalid extensive " "metadata parameter"); rte_errno = EINVAL; return -rte_errno; } - config->dv_xmeta_en = tmp; + if (tmp != MLX5_XMETA_MODE_MISS_INFO) + config->dv_xmeta_en = tmp; + else + config->dv_miss_info = 1; } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) { config->lacp_by_user = !!tmp; } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { @@ -2112,7 +2039,7 @@ static const struct rte_pci_id mlx5_pci_id_map[] = { }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, - PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF) + PCI_DEVICE_ID_MELLANOX_CONNECTXVF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, @@ -2122,6 +2049,14 @@ static const struct rte_pci_id mlx5_pci_id_map[] = { RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX6LX) }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX7) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX7BF) + }, { .vendor_id = 0 }