static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- {
+ [MLX5_IPOOL_DECAP_ENCAP] = {
.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
.trunk_size = 64,
.grow_trunk = 3,
.free = mlx5_free,
.type = "mlx5_encap_decap_ipool",
},
- {
+ [MLX5_IPOOL_PUSH_VLAN] = {
.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
.trunk_size = 64,
.grow_trunk = 3,
.free = mlx5_free,
.type = "mlx5_push_vlan_ipool",
},
- {
+ [MLX5_IPOOL_TAG] = {
.size = sizeof(struct mlx5_flow_dv_tag_resource),
.trunk_size = 64,
.grow_trunk = 3,
.free = mlx5_free,
.type = "mlx5_tag_ipool",
},
- {
+ [MLX5_IPOOL_PORT_ID] = {
.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
.trunk_size = 64,
.grow_trunk = 3,
.free = mlx5_free,
.type = "mlx5_port_id_ipool",
},
- {
+ [MLX5_IPOOL_JUMP] = {
.size = sizeof(struct mlx5_flow_tbl_data_entry),
.trunk_size = 64,
.grow_trunk = 3,
.free = mlx5_free,
.type = "mlx5_jump_ipool",
},
- {
+ [MLX5_IPOOL_SAMPLE] = {
.size = sizeof(struct mlx5_flow_dv_sample_resource),
.trunk_size = 64,
.grow_trunk = 3,
.free = mlx5_free,
.type = "mlx5_sample_ipool",
},
- {
+ [MLX5_IPOOL_DEST_ARRAY] = {
.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
.trunk_size = 64,
.grow_trunk = 3,
.free = mlx5_free,
.type = "mlx5_dest_array_ipool",
},
+ [MLX5_IPOOL_TUNNEL_ID] = {
+ .size = sizeof(struct mlx5_flow_tunnel),
+ .need_lock = 1,
+ .release_mem_en = 1,
+ .type = "mlx5_tunnel_offload",
+ },
+ [MLX5_IPOOL_TNL_TBL_ID] = {
+ .size = 0,
+ .need_lock = 1,
+ .type = "mlx5_flow_tnl_tbl_ipool",
+ },
#endif
- {
+ [MLX5_IPOOL_MTR] = {
.size = sizeof(struct mlx5_flow_meter),
.trunk_size = 64,
.grow_trunk = 3,
.free = mlx5_free,
.type = "mlx5_meter_ipool",
},
- {
+ [MLX5_IPOOL_MCP] = {
.size = sizeof(struct mlx5_flow_mreg_copy_resource),
.trunk_size = 64,
.grow_trunk = 3,
.free = mlx5_free,
.type = "mlx5_mcp_ipool",
},
- {
+ [MLX5_IPOOL_HRXQ] = {
.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
.trunk_size = 64,
.grow_trunk = 3,
.free = mlx5_free,
.type = "mlx5_hrxq_ipool",
},
- {
+ [MLX5_IPOOL_MLX5_FLOW] = {
/*
* MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
* It set in run time according to PCI function configuration.
.free = mlx5_free,
.type = "mlx5_flow_handle_ipool",
},
- {
+ [MLX5_IPOOL_RTE_FLOW] = {
.size = sizeof(struct rte_flow),
.trunk_size = 4096,
.need_lock = 1,
.free = mlx5_free,
.type = "rte_flow_ipool",
},
- {
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
.size = 0,
.need_lock = 1,
.type = "mlx5_flow_rss_id_ipool",
},
- {
- .size = 0,
- .need_lock = 1,
- .type = "mlx5_flow_tnl_flow_ipool",
- },
- {
- .size = 0,
+ [MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
+ .size = sizeof(struct mlx5_shared_action_rss),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
.need_lock = 1,
- .type = "mlx5_flow_tnl_tbl_ipool",
+ .release_mem_en = 1,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .type = "mlx5_shared_action_rss",
},
};
#define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096
+/**
+ * Initialize the ASO aging management structure.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_dev_ctx_shared object to free
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
+{
+ int err;
+
+ if (sh->aso_age_mng)
+ return 0;
+ sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (!sh->aso_age_mng) {
+ DRV_LOG(ERR, "aso_age_mng allocation was failed.");
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ err = mlx5_aso_queue_init(sh);
+ if (err) {
+ mlx5_free(sh->aso_age_mng);
+ return -1;
+ }
+ rte_spinlock_init(&sh->aso_age_mng->resize_sl);
+ rte_spinlock_init(&sh->aso_age_mng->free_sl);
+ LIST_INIT(&sh->aso_age_mng->free);
+ return 0;
+}
+
+/**
+ * Close and release all the resources of the ASO aging management structure.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_dev_ctx_shared object to free.
+ */
+static void
+mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh)
+{
+ int i, j;
+
+ mlx5_aso_queue_stop(sh);
+ mlx5_aso_queue_uninit(sh);
+ if (sh->aso_age_mng->pools) {
+ struct mlx5_aso_age_pool *pool;
+
+ for (i = 0; i < sh->aso_age_mng->next; ++i) {
+ pool = sh->aso_age_mng->pools[i];
+ claim_zero(mlx5_devx_cmd_destroy
+ (pool->flow_hit_aso_obj));
+ for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j)
+ if (pool->actions[j].dr_action)
+ claim_zero
+ (mlx5_glue->destroy_flow_action
+ (pool->actions[j].dr_action));
+ mlx5_free(pool);
+ }
+ mlx5_free(sh->aso_age_mng->pools);
+ }
+ mlx5_free(sh->aso_age_mng);
+}
+
/**
* Initialize the shared aging list information per port.
*
age_info = &sh->port[i].age_info;
age_info->flags = 0;
TAILQ_INIT(&age_info->aged_counters);
+ LIST_INIT(&age_info->aged_aso);
rte_spinlock_init(&age_info->aged_sl);
MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
}
memset(&sh->cmng, 0, sizeof(sh->cmng));
}
+/* Send FLOW_AGED event if needed. */
+void
+mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh)
+{
+ struct mlx5_age_info *age_info;
+ uint32_t i;
+
+ for (i = 0; i < sh->max_port; i++) {
+ age_info = &sh->port[i].age_info;
+ if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
+ continue;
+ if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER))
+ rte_eth_dev_callback_process
+ (&rte_eth_devices[sh->port[i].devx_ih_port_id],
+ RTE_ETH_EVENT_FLOW_AGED, NULL);
+ age_info->flags = 0;
+ }
+}
+
/**
* Initialize the flow resources' indexed mempool.
*
goto error;
}
sh->refcnt = 1;
+ sh->bond_dev = UINT16_MAX;
sh->max_port = spawn->max_port;
strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx),
sizeof(sh->ibdev_name) - 1);
* Only primary process handles async device events.
**/
mlx5_flow_counters_mng_close(sh);
+ if (sh->aso_age_mng) {
+ mlx5_flow_aso_age_mng_close(sh);
+ sh->aso_age_mng = NULL;
+ }
mlx5_flow_ipool_destroy(sh);
mlx5_os_dev_shared_handler_uninstall(sh);
if (sh->cnt_id_tbl) {
}
/**
- * Destroy table hash list and all the root entries per domain.
+ * Destroy table hash list.
*
* @param[in] priv
* Pointer to the private device data structure.
mlx5_free_table_hash_list(struct mlx5_priv *priv)
{
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_flow_tbl_data_entry *tbl_data;
- union mlx5_flow_tbl_key table_key = {
- {
- .table_id = 0,
- .reserved = 0,
- .domain = 0,
- .direction = 0,
- }
- };
- struct mlx5_hlist_entry *pos;
if (!sh->flow_tbls)
return;
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
- if (pos) {
- tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
- entry);
- MLX5_ASSERT(tbl_data);
- mlx5_hlist_remove(sh->flow_tbls, pos);
- mlx5_free(tbl_data);
- }
- table_key.direction = 1;
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
- if (pos) {
- tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
- entry);
- MLX5_ASSERT(tbl_data);
- mlx5_hlist_remove(sh->flow_tbls, pos);
- mlx5_free(tbl_data);
- }
- table_key.direction = 0;
- table_key.domain = 1;
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
- if (pos) {
- tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
- entry);
- MLX5_ASSERT(tbl_data);
- mlx5_hlist_remove(sh->flow_tbls, pos);
- mlx5_free(tbl_data);
- }
- mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
+ mlx5_hlist_destroy(sh->flow_tbls);
}
/**
* Zero on success, positive error code otherwise.
*/
int
-mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
+mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
{
+ int err = 0;
+ /* Tables are only used in DV and DR modes. */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_dev_ctx_shared *sh = priv->sh;
char s[MLX5_HLIST_NAMESIZE];
- int err = 0;
MLX5_ASSERT(sh);
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
- sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
+ sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
+ 0, 0, flow_dv_tbl_create_cb, NULL,
+ flow_dv_tbl_remove_cb);
if (!sh->flow_tbls) {
DRV_LOG(ERR, "flow tables with hash creation failed.");
err = ENOMEM;
return err;
}
+ sh->flow_tbls->ctx = sh;
#ifndef HAVE_MLX5DV_DR
+ struct rte_flow_error error;
+ struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id];
+
/*
* In case we have not DR support, the zero tables should be created
* because DV expect to see them even if they cannot be created by
* RDMA-CORE.
*/
- union mlx5_flow_tbl_key table_key = {
- {
- .table_id = 0,
- .reserved = 0,
- .domain = 0,
- .direction = 0,
- }
- };
- struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(*tbl_data), 0,
- SOCKET_ID_ANY);
-
- if (!tbl_data) {
+ if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, NULL, 0, 1, &error) ||
+ !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, NULL, 0, 1, &error) ||
+ !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, NULL, 0, 1, &error)) {
err = ENOMEM;
goto error;
}
- tbl_data->entry.key = table_key.v64;
- err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
- if (err)
- goto error;
- __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED);
- table_key.direction = 1;
- tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
- SOCKET_ID_ANY);
- if (!tbl_data) {
- err = ENOMEM;
- goto error;
- }
- tbl_data->entry.key = table_key.v64;
- err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
- if (err)
- goto error;
- __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED);
- table_key.direction = 0;
- table_key.domain = 1;
- tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
- SOCKET_ID_ANY);
- if (!tbl_data) {
- err = ENOMEM;
- goto error;
- }
- tbl_data->entry.key = table_key.v64;
- err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
- if (err)
- goto error;
- __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED);
return err;
error:
mlx5_free_table_hash_list(priv);
#endif /* HAVE_MLX5DV_DR */
+#endif
return err;
}
if (priv->drop_queue.hrxq)
mlx5_drop_action_destroy(dev);
if (priv->mreg_cp_tbl)
- mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL);
+ mlx5_hlist_destroy(priv->mreg_cp_tbl);
mlx5_mprq_free_mp(dev);
mlx5_os_free_shared_dr(priv);
if (priv->rss_conf.rss_key != NULL)
if (ret)
DRV_LOG(WARNING, "port %u some flows still remain",
dev->data->port_id);
+ mlx5_cache_list_destroy(&priv->hrxqs);
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like
}
mod = tmp >= 0 ? tmp : -tmp;
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
+ if (tmp > MLX5_CQE_RESP_FORMAT_L34H_STRIDX) {
+ DRV_LOG(ERR, "invalid CQE compression "
+ "format parameter");
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
config->cqe_comp = !!tmp;
+ config->cqe_comp_fmt = tmp;
} else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
config->cqe_pad = !!tmp;
} else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
},
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
- PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
+ PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
},
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,