{
struct mlx5_dev_ctx_shared *sh = priv->sh;
char s[MLX5_HLIST_NAMESIZE];
- int err = 0;
+ int err;
- if (!sh->flow_tbls)
- err = mlx5_alloc_table_hash_list(priv);
- else
- DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse\n",
- (void *)sh->flow_tbls);
+ MLX5_ASSERT(sh && sh->refcnt);
+ if (sh->refcnt > 1)
+ return 0;
+ err = mlx5_alloc_table_hash_list(priv);
if (err)
return err;
/* Create tags hash list table. */
#ifdef HAVE_MLX5DV_DR
void *domain;
- if (sh->dv_refcnt) {
- /* Shared DV/DR structures is already initialized. */
- sh->dv_refcnt++;
- priv->dr_shared = 1;
- return 0;
- }
/* Reference counter is zero, we should initialize structures. */
domain = mlx5_glue->dr_create_domain(sh->ctx,
MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
}
sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
#endif /* HAVE_MLX5DV_DR */
- sh->dv_refcnt++;
- priv->dr_shared = 1;
return 0;
error:
/* Rollback the created objects. */
void
mlx5_os_free_shared_dr(struct mlx5_priv *priv)
{
- struct mlx5_dev_ctx_shared *sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
- if (!priv->dr_shared)
+ MLX5_ASSERT(sh && sh->refcnt);
+ if (sh->refcnt > 1)
return;
- priv->dr_shared = 0;
- sh = priv->sh;
- MLX5_ASSERT(sh);
#ifdef HAVE_MLX5DV_DR
- MLX5_ASSERT(sh->dv_refcnt);
- if (sh->dv_refcnt && --sh->dv_refcnt)
- return;
if (sh->rx_domain) {
mlx5_glue->dr_destroy_domain(sh->rx_domain);
sh->rx_domain = NULL;
mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
- /*
- * When DevX is supported and DV flow is enable, and dest tir is enable,
- * hairpin functions use DevX API.
- * When, in addition, DV E-Switch is enable and DevX uar offset is
- * supported, all Tx functions also use DevX API.
- * Otherwise, all Tx functions use Verbs API.
- */
- if (config->devx && config->dv_flow_en && config->dest_tir) {
- if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
- return mlx5_txq_devx_obj_new(dev, idx);
+ if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
+ return mlx5_txq_devx_obj_new(dev, idx);
#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
- if (config->dv_esw_en)
- return mlx5_txq_devx_obj_new(dev, idx);
+ if (!priv->config.dv_esw_en)
+ return mlx5_txq_devx_obj_new(dev, idx);
#endif
- }
return mlx5_txq_ibv_obj_new(dev, idx);
}
static void
mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj)
{
- struct mlx5_dev_config *config = &txq_obj->txq_ctrl->priv->config;
-
- if (config->devx && config->dv_flow_en && config->dest_tir) {
+ if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
+ mlx5_txq_devx_obj_release(txq_obj);
+ return;
+ }
#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
- if (config->dv_esw_en) {
- mlx5_txq_devx_obj_release(txq_obj);
- return;
- }
-#endif
- if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
- mlx5_txq_devx_obj_release(txq_obj);
- return;
- }
+ if (!txq_obj->txq_ctrl->priv->config.dv_esw_en) {
+ mlx5_txq_devx_obj_release(txq_obj);
+ return;
}
+#endif
mlx5_txq_ibv_obj_release(txq_obj);
}
+/**
+ * DV flow counter mode detect and config.
+ *
+ * @param dev
+ * Pointer to rte_eth_dev structure.
+ *
+ */
+static void
+mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ bool fallback;
+
+#ifndef HAVE_IBV_DEVX_ASYNC
+ fallback = true;
+#else
+ fallback = false;
+ if (!priv->config.devx || !priv->config.dv_flow_en ||
+ !priv->config.hca_attr.flow_counters_dump ||
+ !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
+ (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
+ fallback = true;
+#endif
+ if (fallback)
+ DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
+ "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
+ priv->config.hca_attr.flow_counters_dump,
+ priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
+ /* Initialize fallback mode only on the port initializes sh. */
+ if (sh->refcnt == 1)
+ sh->cmng.counter_fallback = fallback;
+ else if (fallback != sh->cmng.counter_fallback)
+ DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
+ "with others:%d.", PORT_ID(priv), fallback);
+#endif
+}
+
/**
* Spawn an Ethernet device from Verbs information.
*
DRV_LOG(INFO, "Rx CQE padding is enabled");
}
if (config->devx) {
- priv->counter_fallback = 0;
err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
if (err) {
err = -err;
goto error;
}
- if (!config->hca_attr.flow_counters_dump)
- priv->counter_fallback = 1;
-#ifndef HAVE_IBV_DEVX_ASYNC
- priv->counter_fallback = 1;
-#endif
- if (priv->counter_fallback)
- DRV_LOG(INFO, "Use fall-back DV counter management");
+ /* Check relax ordering support. */
+ if (config->hca_attr.relaxed_ordering_write &&
+ config->hca_attr.relaxed_ordering_read &&
+ !haswell_broadwell_cpu)
+ sh->cmng.relaxed_ordering = 1;
/* Check for LRO support. */
if (config->dest_tir && config->hca_attr.lro_cap &&
config->dv_flow_en) {
config->hca_attr.lro_timer_supported_periods[0];
DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
config->lro.timeout);
+ DRV_LOG(DEBUG, "LRO minimal size of TCP segment "
+ "required for coalescing is %d bytes",
+ config->hca_attr.lro_min_mss_size);
}
#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
if (config->hca_attr.qos.sup &&
goto error;
}
}
- /*
- * Initialize the dev_ops structure with DevX/Verbs function pointers.
- * When DevX is supported and both DV flow and dest tir are enabled, all
- * Rx functions use DevX API (except for drop that has not yet been
- * implemented in DevX).
- */
if (config->devx && config->dv_flow_en && config->dest_tir) {
priv->obj_ops = devx_obj_ops;
priv->obj_ops.drop_action_create =
#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify;
#else
- if (!config->dv_esw_en)
+ if (config->dv_esw_en)
priv->obj_ops.txq_obj_modify =
ibv_obj_ops.txq_obj_modify;
#endif
+ /* Use specific wrappers for Tx object. */
+ priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
+ priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
+
} else {
priv->obj_ops = ibv_obj_ops;
}
- /* The Tx objects are managed by a specific linux wrapper functions. */
- priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
- priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
/* Supported Verbs flow priority number detection. */
err = mlx5_flow_discover_priorities(eth_dev);
if (err < 0) {
goto error;
}
}
+ mlx5_flow_counter_mode_config(eth_dev);
return eth_dev;
error:
if (priv) {