#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) {
aso_mtr = &mtr_pool->mtrs[i];
- if (aso_mtr->fm.meter_action)
+ if (aso_mtr->fm.meter_action_g)
claim_zero
(mlx5_glue->destroy_flow_action
- (aso_mtr->fm.meter_action));
+ (aso_mtr->fm.meter_action_g));
+ if (aso_mtr->fm.meter_action_y)
+ claim_zero
+ (mlx5_glue->destroy_flow_action
+ (aso_mtr->fm.meter_action_y));
}
#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
claim_zero(mlx5_devx_cmd_destroy
DRV_LOG(DEBUG, "E-Switch DV flow is not supported.");
config->dv_esw_en = 0;
}
+ if (config->dv_esw_en && !config->dv_flow_en) {
+ DRV_LOG(DEBUG,
+ "E-Switch DV flow is supported only when DV flow is enabled.");
+ config->dv_esw_en = 0;
+ }
if (config->dv_miss_info && config->dv_esw_en)
config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
if (!config->dv_esw_en &&
for (i = 0; i < sh->max_port; i++) {
sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
+ sh->port[i].nl_ih_port_id = RTE_MAX_ETHPORTS;
}
if (sh->cdev->config.devx) {
sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
mlx5_free_table_hash_list(struct mlx5_priv *priv)
{
struct mlx5_dev_ctx_shared *sh = priv->sh;
-
- if (!sh->flow_tbls)
+ struct mlx5_hlist **tbls = (priv->sh->config.dv_flow_en == 2) ?
+ &sh->groups : &sh->flow_tbls;
+ if (*tbls == NULL)
return;
- mlx5_hlist_destroy(sh->flow_tbls);
- sh->flow_tbls = NULL;
+ mlx5_hlist_destroy(*tbls);
+ *tbls = NULL;
+}
+
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+/**
+ * Allocate HW steering group hash list.
+ *
+ * @param[in] priv
+ * Pointer to the private device data structure.
+ */
+static int
+mlx5_alloc_hw_group_hash_list(struct mlx5_priv *priv)
+{
+ int err = 0;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ char s[MLX5_NAME_SIZE];
+
+ MLX5_ASSERT(sh);
+ snprintf(s, sizeof(s), "%s_flow_groups", priv->sh->ibdev_name);
+ sh->groups = mlx5_hlist_create
+ (s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
+ false, true, sh,
+ flow_hw_grp_create_cb,
+ flow_hw_grp_match_cb,
+ flow_hw_grp_remove_cb,
+ flow_hw_grp_clone_cb,
+ flow_hw_grp_clone_free_cb);
+ if (!sh->groups) {
+ DRV_LOG(ERR, "flow groups with hash creation failed.");
+ err = ENOMEM;
+ }
+ return err;
}
+#endif
+
/**
* Initialize flow table hash list and create the root tables entry
mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
{
int err = 0;
+
/* Tables are only used in DV and DR modes. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_dev_ctx_shared *sh = priv->sh;
char s[MLX5_NAME_SIZE];
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_alloc_hw_group_hash_list(priv);
MLX5_ASSERT(sh);
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
mlx5_flex_item_port_cleanup(dev);
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ flow_hw_resource_release(dev);
+#endif
if (priv->rxq_privs != NULL) {
/* XXX race condition if mlx5_rx_burst() is still running. */
rte_delay_us_sleep(1000);
if (ret)
DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
dev->data->port_id);
+ ret = mlx5_ext_rxq_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "Port %u some external RxQ still remain.",
+ dev->data->port_id);
ret = mlx5_rxq_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some Rx queues still remain",
dev->data->port_id);
if (priv->hrxqs)
mlx5_list_destroy(priv->hrxqs);
+ mlx5_free(priv->ext_rxqs);
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like