/* Device parameter to enable multi-packet send WQEs. */
#define MLX5_TXQ_MPW_EN "txq_mpw_en"
+/*
+ * Device parameter to force doorbell register mapping
+ * to non-cahed region eliminating the extra write memory barrier.
+ */
+#define MLX5_TX_DB_NC "tx_db_nc"
+
/*
* Device parameter to include 2 dsegs in the title WQEBB.
* Deprecated, ignored.
#define MLX5_FLOW_MIN_ID_POOL_SIZE 512
#define MLX5_ID_GENERATION_ARRAY_FACTOR 16
+#define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096
+#define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
+
/**
* Allocate ID pool structure.
*
}
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+static int
+mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
+{
+ char *env;
+ int value;
+
+ assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ /* Get environment variable to store. */
+ env = getenv(MLX5_SHUT_UP_BF);
+ value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
+ if (config->dbnc == MLX5_ARG_UNSET)
+ setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);
+ else
+ setenv(MLX5_SHUT_UP_BF,
+ config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
+ return value;
+}
+
+static void
+mlx5_restore_doorbell_mapping_env(int value)
+{
+ assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ /* Restore the original environment variable state. */
+ if (value == MLX5_ARG_UNSET)
+ unsetenv(MLX5_SHUT_UP_BF);
+ else
+ setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1);
+}
+
/**
* Allocate shared IB device context. If there is multiport device the
* master and representors will share this context, if there is single
*
* @param[in] spawn
* Pointer to the IB device attributes (name, port, etc).
+ * @param[in] config
+ * Pointer to device configuration structure.
*
* @return
* Pointer to mlx5_ibv_shared object on success,
* otherwise NULL and rte_errno is set.
*/
static struct mlx5_ibv_shared *
-mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
+mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
+ const struct mlx5_dev_config *config)
{
struct mlx5_ibv_shared *sh;
+ int dbmap_env;
int err = 0;
uint32_t i;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_devx_tis_attr tis_attr = { 0 };
#endif
-assert(spawn);
+ assert(spawn);
/* Secondary process should not create the shared context. */
assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
pthread_mutex_lock(&mlx5_ibv_list_mutex);
rte_errno = ENOMEM;
goto exit;
}
+ /*
+ * Configure environment variable "MLX5_BF_SHUT_UP"
+ * before the device creation. The rdma_core library
+ * checks the variable at device creation and
+ * stores the result internally.
+ */
+ dbmap_env = mlx5_config_doorbell_mapping_env(config);
/* Try to open IB device with DV first, then usual Verbs. */
errno = 0;
sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev);
if (sh->ctx) {
sh->devx = 1;
DRV_LOG(DEBUG, "DevX is supported");
+ /* The device is created, no need for environment. */
+ mlx5_restore_doorbell_mapping_env(dbmap_env);
} else {
+ /* The environment variable is still configured. */
sh->ctx = mlx5_glue->open_device(spawn->ibv_dev);
- if (!sh->ctx) {
- err = errno ? errno : ENODEV;
+ err = errno ? errno : ENODEV;
+ /*
+ * The environment variable is not needed anymore,
+ * all device creation attempts are completed.
+ */
+ mlx5_restore_doorbell_mapping_env(dbmap_env);
+ if (!sh->ctx)
goto error;
- }
DRV_LOG(DEBUG, "DevX is NOT supported");
}
err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr);
pthread_mutex_unlock(&mlx5_ibv_list_mutex);
}
+/**
+ * Destroy table hash list and all the root entries per domain.
+ *
+ * @param[in] priv
+ * Pointer to the private device data structure.
+ */
+static void
+mlx5_free_table_hash_list(struct mlx5_priv *priv)
+{
+ struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ union mlx5_flow_tbl_key table_key = {
+ {
+ .table_id = 0,
+ .reserved = 0,
+ .domain = 0,
+ .direction = 0,
+ }
+ };
+ struct mlx5_hlist_entry *pos;
+
+ if (!sh->flow_tbls)
+ return;
+ pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+ if (pos) {
+ tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+ entry);
+ assert(tbl_data);
+ mlx5_hlist_remove(sh->flow_tbls, pos);
+ rte_free(tbl_data);
+ }
+ table_key.direction = 1;
+ pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+ if (pos) {
+ tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+ entry);
+ assert(tbl_data);
+ mlx5_hlist_remove(sh->flow_tbls, pos);
+ rte_free(tbl_data);
+ }
+ table_key.direction = 0;
+ table_key.domain = 1;
+ pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+ if (pos) {
+ tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+ entry);
+ assert(tbl_data);
+ mlx5_hlist_remove(sh->flow_tbls, pos);
+ rte_free(tbl_data);
+ }
+ mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
+}
+
+/**
+ * Initialize flow table hash list and create the root tables entry
+ * for each domain.
+ *
+ * @param[in] priv
+ * Pointer to the private device data structure.
+ *
+ * @return
+ * Zero on success, positive error code otherwise.
+ */
+static int
+mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
+{
+ struct mlx5_ibv_shared *sh = priv->sh;
+ char s[MLX5_HLIST_NAMESIZE];
+ int err = 0;
+
+ assert(sh);
+ snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
+ sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
+ if (!sh->flow_tbls) {
+ DRV_LOG(ERR, "flow tables with hash creation failed.\n");
+ err = ENOMEM;
+ return err;
+ }
+#ifndef HAVE_MLX5DV_DR
+ /*
+ * In case we have not DR support, the zero tables should be created
+ * because DV expect to see them even if they cannot be created by
+ * RDMA-CORE.
+ */
+ union mlx5_flow_tbl_key table_key = {
+ {
+ .table_id = 0,
+ .reserved = 0,
+ .domain = 0,
+ .direction = 0,
+ }
+ };
+ struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL,
+ sizeof(*tbl_data), 0);
+
+ if (!tbl_data) {
+ err = ENOMEM;
+ goto error;
+ }
+ tbl_data->entry.key = table_key.v64;
+ err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
+ if (err)
+ goto error;
+ rte_atomic32_init(&tbl_data->tbl.refcnt);
+ rte_atomic32_inc(&tbl_data->tbl.refcnt);
+ table_key.direction = 1;
+ tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+ if (!tbl_data) {
+ err = ENOMEM;
+ goto error;
+ }
+ tbl_data->entry.key = table_key.v64;
+ err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
+ if (err)
+ goto error;
+ rte_atomic32_init(&tbl_data->tbl.refcnt);
+ rte_atomic32_inc(&tbl_data->tbl.refcnt);
+ table_key.direction = 0;
+ table_key.domain = 1;
+ tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+ if (!tbl_data) {
+ err = ENOMEM;
+ goto error;
+ }
+ tbl_data->entry.key = table_key.v64;
+ err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
+ if (err)
+ goto error;
+ rte_atomic32_init(&tbl_data->tbl.refcnt);
+ rte_atomic32_inc(&tbl_data->tbl.refcnt);
+ return err;
+error:
+ mlx5_free_table_hash_list(priv);
+#endif /* HAVE_MLX5DV_DR */
+ return err;
+}
+
/**
* Initialize DR related data within private structure.
* Routine checks the reference counter and does actual
static int
mlx5_alloc_shared_dr(struct mlx5_priv *priv)
{
-#ifdef HAVE_MLX5DV_DR
struct mlx5_ibv_shared *sh = priv->sh;
- int err = 0;
+ char s[MLX5_HLIST_NAMESIZE];
+ int err = mlx5_alloc_table_hash_list(priv);
+
+ if (err)
+ return err;
+ /* Create tags hash list table. */
+ snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
+ sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE);
+ if (!sh->tag_table) {
+ DRV_LOG(ERR, "tags with hash creation failed.\n");
+ err = ENOMEM;
+ goto error;
+ }
+#ifdef HAVE_MLX5DV_DR
void *domain;
- assert(sh);
if (sh->dv_refcnt) {
/* Shared DV/DR structures is already initialized. */
sh->dv_refcnt++;
}
#endif
sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
+#endif /* HAVE_MLX5DV_DR */
sh->dv_refcnt++;
priv->dr_shared = 1;
return 0;
-
error:
- /* Rollback the created objects. */
+ /* Rollback the created objects. */
if (sh->rx_domain) {
mlx5_glue->dr_destroy_domain(sh->rx_domain);
sh->rx_domain = NULL;
mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
sh->pop_vlan_action = NULL;
}
+ if (sh->tag_table) {
+ /* tags should be destroyed with flow before. */
+ mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
+ sh->tag_table = NULL;
+ }
+ mlx5_free_table_hash_list(priv);
return err;
-#else
- (void)priv;
- return 0;
-#endif
}
/**
static void
mlx5_free_shared_dr(struct mlx5_priv *priv)
{
-#ifdef HAVE_MLX5DV_DR
struct mlx5_ibv_shared *sh;
if (!priv->dr_shared)
priv->dr_shared = 0;
sh = priv->sh;
assert(sh);
+#ifdef HAVE_MLX5DV_DR
assert(sh->dv_refcnt);
if (sh->dv_refcnt && --sh->dv_refcnt)
return;
sh->pop_vlan_action = NULL;
}
pthread_mutex_destroy(&sh->dv_mutex);
-#else
- (void)priv;
-#endif
+#endif /* HAVE_MLX5DV_DR */
+ if (sh->tag_table) {
+ /* tags should be destroyed with flow before. */
+ mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
+ sh->tag_table = NULL;
+ }
+ mlx5_free_table_hash_list(priv);
}
/**
DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
config->mps = !!tmp;
+ } else if (strcmp(MLX5_TX_DB_NC, key) == 0) {
+ if (tmp != MLX5_TXDB_CACHED &&
+ tmp != MLX5_TXDB_NCACHED &&
+ tmp != MLX5_TXDB_HEURISTIC) {
+ DRV_LOG(ERR, "invalid Tx doorbell "
+ "mapping parameter");
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ config->dbnc = tmp;
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
if (tmp != MLX5_XMETA_MODE_LEGACY &&
tmp != MLX5_XMETA_MODE_META16 &&
tmp != MLX5_XMETA_MODE_META32) {
- DRV_LOG(WARNING, "invalid extensive "
- "metadata parameter");
+ DRV_LOG(ERR, "invalid extensive "
+ "metadata parameter");
rte_errno = EINVAL;
return -rte_errno;
}
MLX5_TXQ_MPW_EN,
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
+ MLX5_TX_DB_NC,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
MLX5_L3_VXLAN_EN,
eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
return eth_dev;
}
- sh = mlx5_alloc_shared_ibctx(spawn);
+ /*
+ * Some parameters ("tx_db_nc" in particularly) are needed in
+ * advance to create dv/verbs device context. We proceed the
+ * devargs here to get ones, and later proceed devargs again
+ * to override some hardware settings.
+ */
+ err = mlx5_args(&config, dpdk_dev->devargs);
+ if (err) {
+ err = rte_errno;
+ DRV_LOG(ERR, "failed to process device arguments: %s",
+ strerror(rte_errno));
+ goto error;
+ }
+ sh = mlx5_alloc_shared_ibctx(spawn, &config);
if (!sh)
return NULL;
config.devx = sh->devx;
err = ENOTSUP;
goto error;
}
- } else if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
+ }
+ if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
priv->vport_id = devx_port.vport_num;
} else if (spawn->pf_bond >= 0) {
DRV_LOG(ERR, "can't deduce vport index for port %d"
}
own_domain_id = 1;
}
- err = mlx5_args(&config, dpdk_dev->devargs);
- if (err) {
- err = rte_errno;
- DRV_LOG(ERR, "failed to process device arguments: %s",
- strerror(rte_errno));
- goto error;
- }
+ /* Override some values set by hardware configuration. */
+ mlx5_args(&config, dpdk_dev->devargs);
err = mlx5_dev_check_sibling_config(priv, &config);
if (err)
goto error;
!defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
DRV_LOG(DEBUG, "counters are not supported");
#endif
-#ifndef HAVE_IBV_FLOW_DV_SUPPORT
+#if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
if (config.dv_flow_en) {
DRV_LOG(WARNING, "DV flow is not supported");
config.dv_flow_en = 0;
dev_config = (struct mlx5_dev_config){
.hw_padding = 0,
.mps = MLX5_ARG_UNSET,
+ .dbnc = MLX5_ARG_UNSET,
.rx_vec_en = 1,
.txq_inline_max = MLX5_ARG_UNSET,
.txq_inline_min = MLX5_ARG_UNSET,
.min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
},
.dv_esw_en = 1,
+ .dv_flow_en = 1,
};
/* Device specific configuration. */
switch (pci_dev->id.device_id) {