#include "mlx5_flow_os.h"
#include "mlx5_rxtx.h"
#include "mlx5_common_os.h"
+#include "rte_pmd_mlx5.h"
static struct mlx5_flow_tunnel *
mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
},
};
+/* Key of thread specific flow workspace data. */
+static pthread_key_t key_workspace;
+
+/* Thread specific flow workspace data once initialization data. */
+static pthread_once_t key_workspace_init;
+
+
/**
* Translate tag ID to register.
*
* Item specification.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
+ * @param[in] ext_vlan_sup
+ * Whether extended VLAN features are supported or not.
* @param[out] error
* Pointer to error structure.
*
*/
int
mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
- uint64_t item_flags,
+ uint64_t item_flags, bool ext_vlan_sup,
struct rte_flow_error *error)
{
const struct rte_flow_item_eth *mask = item->mask;
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.type = RTE_BE16(0xffff),
+ .has_vlan = ext_vlan_sup ? 1 : 0,
};
int ret;
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
}
-/* Allocate unique ID for the split Q/RSS subflows. */
-static uint32_t
-flow_qrss_get_id(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t qrss_id, ret;
-
- ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
- if (ret)
- return 0;
- MLX5_ASSERT(qrss_id);
- return qrss_id;
-}
-
-/* Free unique ID for the split Q/RSS subflows. */
-static void
-flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- if (qrss_id)
- mlx5_flow_id_release(priv->qrss_id_pool, qrss_id);
-}
-
/**
* Release resource related QUEUE/RSS action split.
*
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
if (dev_handle->split_flow_id)
- flow_qrss_free_id(dev, dev_handle->split_flow_id);
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+ dev_handle->split_flow_id);
}
static int
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
}
+static int
+flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
+ uint32_t domains __rte_unused,
+ uint32_t flags __rte_unused)
+{
+ return 0;
+}
+
/* Void driver to protect from null pointer reference. */
const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
.validate = flow_null_validate,
.remove = flow_null_remove,
.destroy = flow_null_destroy,
.query = flow_null_query,
+ .sync_domain = flow_null_sync_domain,
};
/**
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
const struct rte_flow_action_raw_encap *raw_encap;
+ const struct rte_eth_hairpin_conf *conf;
if (!attr->ingress)
return 0;
queue = actions->conf;
if (queue == NULL)
return 0;
- if (mlx5_rxq_get_type(dev, queue->index) !=
- MLX5_RXQ_TYPE_HAIRPIN)
+ conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
+ if (conf != NULL && !!conf->tx_explicit)
return 0;
queue_action = 1;
action_n++;
rss = actions->conf;
if (rss == NULL || rss->queue_num == 0)
return 0;
- if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
- MLX5_RXQ_TYPE_HAIRPIN)
+ conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
+ if (conf != NULL && !!conf->tx_explicit)
return 0;
queue_action = 1;
action_n++;
cp_mreg.src = ret;
/* Check if already registered. */
MLX5_ASSERT(priv->mreg_cp_tbl);
- mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
+ mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, NULL);
if (mcp_res) {
/* For non-default rule. */
if (mark_id != MLX5_DEFAULT_COPY_ID)
goto error;
mcp_res->refcnt++;
mcp_res->hlist_ent.key = mark_id;
- ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
- &mcp_res->hlist_ent);
+ ret = !mlx5_hlist_insert(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
MLX5_ASSERT(!ret);
if (ret)
goto error;
if (!priv->mreg_cp_tbl)
return;
mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
- MLX5_DEFAULT_COPY_ID);
+ MLX5_DEFAULT_COPY_ID, NULL);
if (!mcp_res)
return;
MLX5_ASSERT(mcp_res->rix_flow);
struct rte_flow_action actions_rx[],
struct rte_flow_action actions_tx[],
struct rte_flow_item pattern_tx[],
- uint32_t *flow_id)
+ uint32_t flow_id)
{
- struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_raw_encap *raw_encap;
const struct rte_flow_action_raw_decap *raw_decap;
struct mlx5_rte_flow_action_set_tag *set_tag;
char *addr;
int encap = 0;
- mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
set_tag = (void *)actions_rx;
set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
MLX5_ASSERT(set_tag->id > REG_NON);
- set_tag->data = *flow_id;
+ set_tag->data = flow_id;
tag_action->conf = set_tag;
/* Create Tx item list. */
rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
item->type = (enum rte_flow_item_type)
MLX5_RTE_FLOW_ITEM_TYPE_TAG;
tag_item = (void *)addr;
- tag_item->data = *flow_id;
+ tag_item->data = flow_id;
tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
MLX5_ASSERT(set_tag->id > REG_NON);
item->spec = tag_item;
struct rte_flow_action actions_sfx[],
struct rte_flow_action actions_pre[])
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_action *tag_action = NULL;
struct rte_flow_item *tag_item;
struct mlx5_rte_flow_action_set_tag *set_tag;
const struct rte_flow_action_raw_decap *raw_decap;
struct mlx5_rte_flow_item_tag *tag_spec;
struct mlx5_rte_flow_item_tag *tag_mask;
- uint32_t tag_id;
+ uint32_t tag_id = 0;
bool copy_vlan = false;
/* Prepare the actions for prefix and suffix flow. */
/* Set the tag. */
set_tag = (void *)actions_pre;
set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
- /*
- * Get the id from the qrss_pool to make qrss share the id with meter.
- */
- tag_id = flow_qrss_get_id(dev);
+ mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+ &tag_id);
+ if (tag_id >= (1 << (sizeof(tag_id) * 8 - MLX5_MTR_COLOR_BITS))) {
+ DRV_LOG(ERR, "Port %u meter flow id exceed max limit.",
+ dev->data->port_id);
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], tag_id);
+ return 0;
+ } else if (!tag_id) {
+ return 0;
+ }
set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
assert(tag_action);
tag_action->conf = set_tag;
const struct rte_flow_action *qrss,
int actions_n, struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rte_flow_action_set_tag *set_tag;
struct rte_flow_action_jump *jump;
const int qrss_idx = qrss - actions;
* representors) domain even if they have coinciding
* IDs.
*/
- flow_id = flow_qrss_get_id(dev);
+ mlx5_ipool_malloc(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
if (!flow_id)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ACTION,
int qrss_action_pos,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rte_flow_action_set_tag *set_tag;
struct mlx5_rte_flow_item_tag *tag_spec;
struct mlx5_rte_flow_item_tag *tag_mask;
if (ret < 0)
return ret;
set_tag->id = ret;
- tag_id = flow_qrss_get_id(dev);
+ mlx5_ipool_malloc(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
set_tag->data = tag_id;
/* Prepare the suffix subflow items. */
tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
* These ones are included into parent flow list and will be destroyed
* by flow_drv_destroy.
*/
- flow_qrss_free_id(dev, qrss_id);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+ qrss_id);
mlx5_free(ext_actions);
return ret;
}
return tunnel;
}
+/**
+ * Adjust flow RSS workspace if needed.
+ *
+ * @param wks
+ * Pointer to thread flow work space.
+ * @param rss_desc
+ * Pointer to RSS descriptor.
+ * @param[in] nrssq_num
+ * New RSS queue number.
+ *
+ * @return
+ * 0 on success, -1 otherwise and rte_errno is set.
+ */
+static int
+flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint32_t nrssq_num)
+{
+ bool fidx = !!wks->flow_idx;
+
+ if (likely(nrssq_num <= wks->rssq_num[fidx]))
+ return 0;
+ rss_desc->queue = realloc(rss_desc->queue,
+ sizeof(rss_desc->queue[0]) * RTE_ALIGN(nrssq_num, 2));
+ if (!rss_desc->queue) {
+ rte_errno = ENOMEM;
+ return -1;
+ }
+ wks->rssq_num[fidx] = RTE_ALIGN(nrssq_num, 2);
+ return 0;
+}
+
/**
* Create a flow and add it to @p list.
*
uint8_t buffer[2048];
} items_tx;
struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
- struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
- priv->rss_desc)[!!priv->flow_idx];
+ struct mlx5_flow_rss_desc *rss_desc;
const struct rte_flow_action *p_actions_rx;
uint32_t i;
uint32_t idx = 0;
int hairpin_flow;
- uint32_t hairpin_id = 0;
struct rte_flow_attr attr_tx = { .priority = 0 };
struct rte_flow_attr attr_factor = {0};
const struct rte_flow_action *actions;
struct rte_flow_action *translated_actions = NULL;
struct mlx5_flow_tunnel *tunnel;
struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
- int ret = flow_shared_actions_translate(original_actions,
- shared_actions,
- &shared_actions_n,
- &translated_actions, error);
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ bool fidx = !!wks->flow_idx;
+ int ret;
+ MLX5_ASSERT(wks);
+ rss_desc = &wks->rss_desc[fidx];
+ ret = flow_shared_actions_translate(original_actions,
+ shared_actions,
+ &shared_actions_n,
+ &translated_actions, error);
if (ret < 0) {
MLX5_ASSERT(translated_actions == NULL);
return 0;
external, hairpin_flow, error);
if (ret < 0)
goto error_before_hairpin_split;
+ flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
+ if (!flow) {
+ rte_errno = ENOMEM;
+ goto error_before_hairpin_split;
+ }
if (hairpin_flow > 0) {
if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
rte_errno = EINVAL;
}
flow_hairpin_split(dev, actions, actions_rx.actions,
actions_hairpin_tx.actions, items_tx.items,
- &hairpin_id);
+ idx);
p_actions_rx = actions_rx.actions;
}
- flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
- if (!flow) {
- rte_errno = ENOMEM;
- goto error_before_flow;
- }
flow->drv_type = flow_get_drv_type(dev, &attr_factor);
- if (hairpin_id != 0)
- flow->hairpin_flow_id = hairpin_id;
MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
flow->drv_type < MLX5_FLOW_TYPE_MAX);
- memset(rss_desc, 0, sizeof(*rss_desc));
+ memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
rss = flow_get_rss_action(p_actions_rx);
if (rss) {
+ if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
+ return 0;
/*
* The following information is required by
* mlx5_flow_hashfields_adjust() in advance.
* need to be translated before another calling.
* No need to use ping-pong buffer to save memory here.
*/
- if (priv->flow_idx) {
- MLX5_ASSERT(!priv->flow_nested_idx);
- priv->flow_nested_idx = priv->flow_idx;
+ if (fidx) {
+ MLX5_ASSERT(!wks->flow_nested_idx);
+ wks->flow_nested_idx = fidx;
}
for (i = 0; i < buf->entries; ++i) {
/*
if (ret < 0)
goto error;
}
- if (list)
+ if (list) {
+ rte_spinlock_lock(&priv->flow_list_lock);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
flow, next);
+ rte_spinlock_unlock(&priv->flow_list_lock);
+ }
flow_rxq_flags_set(dev, flow);
rte_free(translated_actions);
/* Nested flow creation index recovery. */
- priv->flow_idx = priv->flow_nested_idx;
- if (priv->flow_nested_idx)
- priv->flow_nested_idx = 0;
+ wks->flow_idx = wks->flow_nested_idx;
+ if (wks->flow_nested_idx)
+ wks->flow_nested_idx = 0;
tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
if (tunnel) {
flow->tunnel = 1;
flow_drv_destroy(dev, flow);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
rte_errno = ret; /* Restore rte_errno. */
-error_before_flow:
ret = rte_errno;
- if (hairpin_id)
- mlx5_flow_id_release(priv->sh->flow_id_pool,
- hairpin_id);
rte_errno = ret;
- priv->flow_idx = priv->flow_nested_idx;
- if (priv->flow_nested_idx)
- priv->flow_nested_idx = 0;
+ wks->flow_idx = wks->flow_nested_idx;
+ if (wks->flow_nested_idx)
+ wks->flow_nested_idx = 0;
error_before_hairpin_split:
rte_free(translated_actions);
return 0;
*/
if (dev->data->dev_started)
flow_rxq_flags_trim(dev, flow);
- if (flow->hairpin_flow_id)
- mlx5_flow_id_release(priv->sh->flow_id_pool,
- flow->hairpin_flow_id);
flow_drv_destroy(dev, flow);
- if (list)
+ if (list) {
+ rte_spinlock_lock(&priv->flow_list_lock);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
flow_idx, flow, next);
+ rte_spinlock_unlock(&priv->flow_list_lock);
+ }
flow_mreg_del_copy_action(dev, flow);
if (flow->fdir) {
LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
}
/**
- * Allocate intermediate resources for flow creation.
- *
- * @param dev
- * Pointer to Ethernet device.
+ * Release key of thread specific flow workspace data.
*/
-void
-mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
+static void
+flow_release_workspace(void *data)
{
- struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_workspace *wks = data;
- if (!priv->inter_flows) {
- priv->inter_flows = mlx5_malloc(MLX5_MEM_ZERO,
- MLX5_NUM_MAX_DEV_FLOWS *
- sizeof(struct mlx5_flow) +
- (sizeof(struct mlx5_flow_rss_desc) +
- sizeof(uint16_t) * UINT16_MAX) * 2, 0,
- SOCKET_ID_ANY);
- if (!priv->inter_flows) {
- DRV_LOG(ERR, "can't allocate intermediate memory.");
- return;
- }
- }
- priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows)
- [MLX5_NUM_MAX_DEV_FLOWS];
- /* Reset the index. */
- priv->flow_idx = 0;
- priv->flow_nested_idx = 0;
+ if (!wks)
+ return;
+ free(wks->rss_desc[0].queue);
+ free(wks->rss_desc[1].queue);
+ free(wks);
+}
+
+/**
+ * Initialize key of thread specific flow workspace data.
+ */
+static void
+flow_alloc_workspace(void)
+{
+ if (pthread_key_create(&key_workspace, flow_release_workspace))
+ DRV_LOG(ERR, "Can't create flow workspace data thread key.");
}
/**
- * Free intermediate resources for flows.
+ * Get thread specific flow workspace.
*
- * @param dev
- * Pointer to Ethernet device.
+ * @return pointer to thread specific flowworkspace data, NULL on error.
*/
-void
-mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
+struct mlx5_flow_workspace*
+mlx5_flow_get_thread_workspace(void)
{
- struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_workspace *data;
- mlx5_free(priv->inter_flows);
- priv->inter_flows = NULL;
+ if (pthread_once(&key_workspace_init, flow_alloc_workspace)) {
+ DRV_LOG(ERR, "Failed to init flow workspace data thread key.");
+ return NULL;
+ }
+ data = pthread_getspecific(key_workspace);
+ if (!data) {
+ data = calloc(1, sizeof(*data));
+ if (!data) {
+ DRV_LOG(ERR, "Failed to allocate flow workspace "
+ "memory.");
+ return NULL;
+ }
+ data->rss_desc[0].queue = calloc(1,
+ sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
+ if (!data->rss_desc[0].queue)
+ goto err;
+ data->rss_desc[1].queue = calloc(1,
+ sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
+ if (!data->rss_desc[1].queue)
+ goto err;
+ data->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM;
+ data->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM;
+ if (pthread_setspecific(key_workspace, data)) {
+ DRV_LOG(ERR, "Failed to set flow workspace to thread.");
+ goto err;
+ }
+ }
+ return data;
+err:
+ if (data->rss_desc[0].queue)
+ free(data->rss_desc[0].queue);
+ if (data->rss_desc[1].queue)
+ free(data->rss_desc[1].queue);
+ free(data);
+ return NULL;
}
/**
union mlx5_flow_tbl_key table_key = {
{
.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
- .reserved = 0,
+ .dummy = 0,
.domain = !!mbits.transfer,
.direction = 0,
}
};
- he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+ he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
return he ?
container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
}
uint32_t group, uint32_t *table,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hlist_entry *he;
struct tunnel_tbl_entry *tte;
union tunnel_tbl_key key = {
struct mlx5_hlist *group_hash;
group_hash = tunnel ? tunnel->groups : thub->groups;
- he = mlx5_hlist_lookup(group_hash, key.val);
+ he = mlx5_hlist_lookup(group_hash, key.val, NULL);
if (!he) {
- int ret;
tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
sizeof(*tte), 0,
SOCKET_ID_ANY);
if (!tte)
goto err;
tte->hash.key = key.val;
- ret = mlx5_flow_id_get(thub->table_ids, &tte->flow_table);
- if (ret) {
- mlx5_free(tte);
+ mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+ &tte->flow_table);
+ if (tte->flow_table >= MLX5_MAX_TABLES) {
+ DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
+ tte->flow_table);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+ tte->flow_table);
+ goto err;
+ } else if (!tte->flow_table) {
goto err;
}
tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
return 0;
err:
+ if (tte)
+ mlx5_free(tte);
return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
NULL, "tunnel group index not supported");
}
mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
struct mlx5_flow_tunnel *tunnel)
{
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids;
+ struct mlx5_priv *priv = dev->data->dev_private;
DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
dev->data->port_id, tunnel->tunnel_id);
RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
LIST_REMOVE(tunnel, chain);
- mlx5_flow_id_release(id_pool, tunnel->tunnel_id);
- mlx5_hlist_destroy(tunnel->groups, NULL, NULL);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
+ tunnel->tunnel_id);
+ mlx5_hlist_destroy(tunnel->groups);
mlx5_free(tunnel);
}
mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
const struct rte_flow_tunnel *app_tunnel)
{
- int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tunnel *tunnel;
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids;
uint32_t id;
- ret = mlx5_flow_id_get(id_pool, &id);
- if (ret)
+ mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+ &id);
+ if (id >= MLX5_MAX_TUNNELS) {
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+ DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
+ return NULL;
+ } else if (!id) {
return NULL;
+ }
/**
* mlx5 flow tunnel is an auxlilary data structure
* It's not part of IO. No need to allocate it from
tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
0, SOCKET_ID_ANY);
if (!tunnel) {
- mlx5_flow_id_pool_release(id_pool);
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
return NULL;
}
- tunnel->groups = mlx5_hlist_create("tunnel groups", 1024);
+ tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
+ NULL, NULL, NULL);
if (!tunnel->groups) {
- mlx5_flow_id_pool_release(id_pool);
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
mlx5_free(tunnel);
return NULL;
}
return;
if (!LIST_EMPTY(&thub->tunnels))
DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
- mlx5_flow_id_pool_release(thub->tunnel_ids);
- mlx5_flow_id_pool_release(thub->table_ids);
- mlx5_hlist_destroy(thub->groups, NULL, NULL);
+ mlx5_hlist_destroy(thub->groups);
mlx5_free(thub);
}
if (!thub)
return -ENOMEM;
LIST_INIT(&thub->tunnels);
- thub->tunnel_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TUNNELS);
- if (!thub->tunnel_ids) {
- err = -rte_errno;
- goto err;
- }
- thub->table_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TABLES);
- if (!thub->table_ids) {
- err = -rte_errno;
- goto err;
- }
- thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES);
+ thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
+ 0, NULL, NULL, NULL);
if (!thub->groups) {
err = -rte_errno;
goto err;
err:
if (thub->groups)
- mlx5_hlist_destroy(thub->groups, NULL, NULL);
- if (thub->table_ids)
- mlx5_flow_id_pool_release(thub->table_ids);
- if (thub->tunnel_ids)
- mlx5_flow_id_pool_release(thub->tunnel_ids);
+ mlx5_hlist_destroy(thub->groups);
if (thub)
mlx5_free(thub);
return err;
}
+
+#ifndef HAVE_MLX5DV_DR
+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
+#else
+#define MLX5_DOMAIN_SYNC_FLOW \
+ (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
+#endif
+
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct mlx5_flow_driver_ops *fops;
+ int ret;
+ struct rte_flow_attr attr = { .transfer = 0 };
+
+ fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+ ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
+ if (ret > 0)
+ ret = -ret;
+ return ret;
+}