X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5.c;h=b610f29a66d8a39151635d60b7d4152d9afb6308;hb=0af8a2298a4250018ffa065010bd8c78721a56c7;hp=6f77bc257eaf6991d3bc15c9263c0cf2cf44df56;hpb=151cbe3aabf9e1a71e02827e5e1ff0814275019a;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 6f77bc257e..b610f29a66 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -36,6 +36,7 @@ #include "mlx5_utils.h" #include "mlx5_rxtx.h" #include "mlx5_rx.h" +#include "mlx5_tx.h" #include "mlx5_autoconf.h" #include "mlx5_mr.h" #include "mlx5_flow.h" @@ -276,10 +277,13 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { }, #endif [MLX5_IPOOL_MTR] = { - .size = sizeof(struct mlx5_flow_meter), + /** + * The ipool index should grow continually from small to big, + * for meter idx, so not set grow_trunk to avoid meter index + * not jump continually. + */ + .size = sizeof(struct mlx5_legacy_flow_meter), .trunk_size = 64, - .grow_trunk = 3, - .grow_shift = 2, .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, @@ -348,6 +352,20 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_shared_action_rss", }, + [MLX5_IPOOL_MTR_POLICY] = { + /** + * The ipool index should grow continually from small to big, + * for policy idx, so not set grow_trunk to avoid policy index + * not jump continually. + */ + .size = sizeof(struct mlx5_flow_meter_sub_policy), + .trunk_size = 64, + .need_lock = 1, + .release_mem_en = 1, + .malloc = mlx5_malloc, + .free = mlx5_free, + .type = "mlx5_meter_policy_ipool", + }, }; @@ -399,7 +417,7 @@ mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh) rte_errno = ENOMEM; return -ENOMEM; } - err = mlx5_aso_queue_init(sh); + err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT); if (err) { mlx5_free(sh->aso_age_mng); return -1; @@ -421,8 +439,8 @@ mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh) { int i, j; - mlx5_aso_queue_stop(sh); - mlx5_aso_queue_uninit(sh); + mlx5_aso_flow_hit_queue_poll_stop(sh); + mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT); if (sh->aso_age_mng->pools) { struct mlx5_aso_age_pool *pool; @@ -560,6 +578,79 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) memset(&sh->cmng, 0, sizeof(sh->cmng)); } +/** + * Initialize the aso flow meters management structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free + */ +int +mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh) +{ + if (!sh->mtrmng) { + sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*sh->mtrmng), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (!sh->mtrmng) { + DRV_LOG(ERR, + "meter management allocation was failed."); + rte_errno = ENOMEM; + return -ENOMEM; + } + if (sh->meter_aso_en) { + rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl); + LIST_INIT(&sh->mtrmng->pools_mng.meters); + sh->mtrmng->policy_idx_tbl = + mlx5_l3t_create(MLX5_L3T_TYPE_DWORD); + } + sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID; + } + return 0; +} + +/** + * Close and release all the resources of + * the ASO flow meter management structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free. + */ +static void +mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_aso_mtr_pool *mtr_pool; + struct mlx5_flow_mtr_mng *mtrmng = sh->mtrmng; + uint32_t idx; +#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO + struct mlx5_aso_mtr *aso_mtr; + int i; +#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ + + if (sh->meter_aso_en) { + mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER); + idx = mtrmng->pools_mng.n_valid; + while (idx--) { + mtr_pool = mtrmng->pools_mng.pools[idx]; +#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO + for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) { + aso_mtr = &mtr_pool->mtrs[i]; + if (aso_mtr->fm.meter_action) + claim_zero + (mlx5_glue->destroy_flow_action + (aso_mtr->fm.meter_action)); + } +#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ + claim_zero(mlx5_devx_cmd_destroy + (mtr_pool->devx_obj)); + mtrmng->pools_mng.n_valid--; + mlx5_free(mtr_pool); + } + mlx5_free(sh->mtrmng->pools_mng.pools); + } + mlx5_free(sh->mtrmng); + sh->mtrmng = NULL; +} + /* Send FLOW_AGED event if needed. */ void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh) @@ -571,20 +662,112 @@ mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh) age_info = &sh->port[i].age_info; if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW)) continue; - if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) + MLX5_AGE_UNSET(age_info, MLX5_AGE_EVENT_NEW); + if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) { + MLX5_AGE_UNSET(age_info, MLX5_AGE_TRIGGER); rte_eth_dev_callback_process (&rte_eth_devices[sh->port[i].devx_ih_port_id], RTE_ETH_EVENT_FLOW_AGED, NULL); - age_info->flags = 0; + } } } +/* + * Initialize the ASO connection tracking structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh) +{ + int err; + + if (sh->ct_mng) + return 0; + sh->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->ct_mng), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (!sh->ct_mng) { + DRV_LOG(ERR, "ASO CT management allocation failed."); + rte_errno = ENOMEM; + return -rte_errno; + } + err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_CONNECTION_TRACKING); + if (err) { + mlx5_free(sh->ct_mng); + /* rte_errno should be extracted from the failure. */ + rte_errno = EINVAL; + return -rte_errno; + } + rte_spinlock_init(&sh->ct_mng->ct_sl); + rte_rwlock_init(&sh->ct_mng->resize_rwl); + LIST_INIT(&sh->ct_mng->free_cts); + return 0; +} + +/* + * Close and release all the resources of the + * ASO connection tracking management structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free. + */ +static void +mlx5_flow_aso_ct_mng_close(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng; + struct mlx5_aso_ct_pool *ct_pool; + struct mlx5_aso_ct_action *ct; + uint32_t idx; + uint32_t val; + uint32_t cnt; + int i; + + mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_CONNECTION_TRACKING); + idx = mng->next; + while (idx--) { + cnt = 0; + ct_pool = mng->pools[idx]; + for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) { + ct = &ct_pool->actions[i]; + val = __atomic_fetch_sub(&ct->refcnt, 1, + __ATOMIC_RELAXED); + MLX5_ASSERT(val == 1); + if (val > 1) + cnt++; +#ifdef HAVE_MLX5_DR_ACTION_ASO_CT + if (ct->dr_action_orig) + claim_zero(mlx5_glue->destroy_flow_action + (ct->dr_action_orig)); + if (ct->dr_action_rply) + claim_zero(mlx5_glue->destroy_flow_action + (ct->dr_action_rply)); +#endif + } + claim_zero(mlx5_devx_cmd_destroy(ct_pool->devx_obj)); + if (cnt) { + DRV_LOG(DEBUG, "%u ASO CT objects are being used in the pool %u", + cnt, i); + } + mlx5_free(ct_pool); + /* in case of failure. */ + mng->next--; + } + mlx5_free(mng->pools); + mlx5_free(mng); + /* Management structure must be cleared to 0s during allocation. */ + sh->ct_mng = NULL; +} + /** * Initialize the flow resources' indexed mempool. * * @param[in] sh * Pointer to mlx5_dev_ctx_shared object. - * @param[in] sh + * @param[in] config * Pointer to user dev config. */ static void @@ -1109,6 +1292,8 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) mlx5_flow_aso_age_mng_close(sh); sh->aso_age_mng = NULL; } + if (sh->mtrmng) + mlx5_aso_flow_mtrs_mng_close(sh); mlx5_flow_ipool_destroy(sh); mlx5_os_dev_shared_handler_uninstall(sh); if (sh->cnt_id_tbl) { @@ -1193,9 +1378,12 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused) * because DV expect to see them even if they cannot be created by * RDMA-CORE. */ - if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, NULL, 0, 1, &error) || - !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, NULL, 0, 1, &error) || - !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, NULL, 0, 1, &error)) { + if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, + NULL, 0, 1, 0, &error) || + !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, + NULL, 0, 1, 0, &error) || + !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, + NULL, 0, 1, 0, &error)) { err = ENOMEM; goto error; } @@ -1340,7 +1528,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) * then this will return directly without any action. */ mlx5_flow_list_flush(dev, &priv->flows, true); - mlx5_shared_action_flush(dev); + mlx5_action_handle_flush(dev); mlx5_flow_meter_flush(dev, NULL); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; @@ -1376,6 +1564,8 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (priv->mreg_cp_tbl) mlx5_hlist_destroy(priv->mreg_cp_tbl); mlx5_mprq_free_mp(dev); + if (priv->sh->ct_mng) + mlx5_flow_aso_ct_mng_close(priv->sh); mlx5_os_free_shared_dr(priv); if (priv->rss_conf.rss_key != NULL) mlx5_free(priv->rss_conf.rss_key); @@ -1519,6 +1709,7 @@ const struct eth_dev_ops mlx5_dev_ops = { .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, + .get_monitor_addr = mlx5_get_monitor_addr, }; /* Available operations from secondary process. */ @@ -1603,6 +1794,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = { .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, + .get_monitor_addr = mlx5_get_monitor_addr, }; /**