struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
struct mlx5_flow_tunnel *tun;
+ rte_spinlock_lock(&thub->sl);
LIST_FOREACH(tun, &thub->tunnels, chain) {
- if (&tun->item == pmd_items)
+ if (&tun->item == pmd_items) {
+ LIST_REMOVE(tun, chain);
break;
+ }
}
+ rte_spinlock_unlock(&thub->sl);
if (!tun || num_items != 1)
return rte_flow_error_set(err, EINVAL,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
struct mlx5_flow_tunnel *tun;
+ rte_spinlock_lock(&thub->sl);
LIST_FOREACH(tun, &thub->tunnels, chain) {
- if (&tun->action == pmd_actions)
+ if (&tun->action == pmd_actions) {
+ LIST_REMOVE(tun, chain);
break;
+ }
}
+ rte_spinlock_unlock(&thub->sl);
if (!tun || num_actions != 1)
return rte_flow_error_set(err, EINVAL,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
.get_restore_info = mlx5_flow_tunnel_get_restore_info,
};
-/* Convert FDIR request to Generic flow. */
-struct mlx5_fdir {
- struct rte_flow_attr attr;
- struct rte_flow_item items[4];
- struct rte_flow_item_eth l2;
- struct rte_flow_item_eth l2_mask;
- union {
- struct rte_flow_item_ipv4 ipv4;
- struct rte_flow_item_ipv6 ipv6;
- } l3;
- union {
- struct rte_flow_item_ipv4 ipv4;
- struct rte_flow_item_ipv6 ipv6;
- } l3_mask;
- union {
- struct rte_flow_item_udp udp;
- struct rte_flow_item_tcp tcp;
- } l4;
- union {
- struct rte_flow_item_udp udp;
- struct rte_flow_item_tcp tcp;
- } l4_mask;
- struct rte_flow_action actions[2];
- struct rte_flow_action_queue queue;
-};
-
/* Tunnel information. */
struct mlx5_flow_tunnel_info {
uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
}
-/* Allocate unique ID for the split Q/RSS subflows. */
-static uint32_t
-flow_qrss_get_id(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t qrss_id, ret;
-
- ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
- if (ret)
- return 0;
- MLX5_ASSERT(qrss_id);
- return qrss_id;
-}
-
-/* Free unique ID for the split Q/RSS subflows. */
-static void
-flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- if (qrss_id)
- mlx5_flow_id_release(priv->qrss_id_pool, qrss_id);
-}
-
/**
* Release resource related QUEUE/RSS action split.
*
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
if (dev_handle->split_flow_id)
- flow_qrss_free_id(dev, dev_handle->split_flow_id);
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+ dev_handle->split_flow_id);
}
static int
return fops->apply(dev, flow, error);
}
-/**
- * Flow driver remove API. This abstracts calling driver specific functions.
- * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
- * on device. All the resources of the flow should be freed by calling
- * flow_drv_destroy().
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in, out] flow
- * Pointer to flow structure.
- */
-static inline void
-flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- const struct mlx5_flow_driver_ops *fops;
- enum mlx5_flow_drv_type type = flow->drv_type;
-
- MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
- fops = flow_get_drv_ops(type);
- fops->remove(dev, flow);
-}
-
/**
* Flow driver destroy API. This abstracts calling driver specific functions.
* Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
uint32_t flow_idx);
-/**
- * Add a flow of copying flow metadata registers in RX_CP_TBL.
- *
- * As mark_id is unique, if there's already a registered flow for the mark_id,
- * return by increasing the reference counter of the resource. Otherwise, create
- * the resource (mcp_res) and flow.
- *
- * Flow looks like,
- * - If ingress port is ANY and reg_c[1] is mark_id,
- * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
- *
- * For default flow (zero mark_id), flow is like,
- * - If ingress port is ANY,
- * reg_b := reg_c[0] and jump to RX_ACT_TBL.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param mark_id
- * ID of MARK action, zero means default flow for META.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * Associated resource on success, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_flow_mreg_copy_resource *
-flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
- struct rte_flow_error *error)
+struct mlx5_hlist_entry *
+flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
+ void *cb_ctx)
{
+ struct rte_eth_dev *dev = list->ctx;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct rte_flow_error *error = ctx->error;
+ uint32_t idx = 0;
+ int ret;
+ uint32_t mark_id = key;
struct rte_flow_attr attr = {
.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
.ingress = 1,
struct rte_flow_action actions[] = {
[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
};
- struct mlx5_flow_mreg_copy_resource *mcp_res;
- uint32_t idx = 0;
- int ret;
/* Fill the register fileds in the flow. */
ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return NULL;
cp_mreg.src = ret;
- /* Check if already registered. */
- MLX5_ASSERT(priv->mreg_cp_tbl);
- mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
- if (mcp_res) {
- /* For non-default rule. */
- if (mark_id != MLX5_DEFAULT_COPY_ID)
- mcp_res->refcnt++;
- MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
- mcp_res->refcnt == 1);
- return mcp_res;
- }
/* Provide the full width of FLAG specific value. */
if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
*/
mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
actions, false, error);
- if (!mcp_res->rix_flow)
- goto error;
- mcp_res->refcnt++;
- mcp_res->hlist_ent.key = mark_id;
- ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
- &mcp_res->hlist_ent);
- MLX5_ASSERT(!ret);
- if (ret)
- goto error;
- return mcp_res;
-error:
- if (mcp_res->rix_flow)
- flow_list_destroy(dev, NULL, mcp_res->rix_flow);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
- return NULL;
+ if (!mcp_res->rix_flow) {
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
+ return NULL;
+ }
+ return &mcp_res->hlist_ent;
}
/**
- * Release flow in RX_CP_TBL.
+ * Add a flow of copying flow metadata registers in RX_CP_TBL.
+ *
+ * As mark_id is unique, if there's already a registered flow for the mark_id,
+ * return by increasing the reference counter of the resource. Otherwise, create
+ * the resource (mcp_res) and flow.
+ *
+ * Flow looks like,
+ * - If ingress port is ANY and reg_c[1] is mark_id,
+ * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * For default flow (zero mark_id), flow is like,
+ * - If ingress port is ANY,
+ * reg_b := reg_c[0] and jump to RX_ACT_TBL.
*
* @param dev
* Pointer to Ethernet device.
- * @flow
- * Parent flow for wich copying is provided.
+ * @param mark_id
+ * ID of MARK action, zero means default flow for META.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * Associated resource on success, NULL otherwise and rte_errno is set.
*/
-static void
-flow_mreg_del_copy_action(struct rte_eth_dev *dev,
- struct rte_flow *flow)
+static struct mlx5_flow_mreg_copy_resource *
+flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
+ struct rte_flow_error *error)
{
- struct mlx5_flow_mreg_copy_resource *mcp_res;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hlist_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .dev = dev,
+ .error = error,
+ };
- if (!flow->rix_mreg_copy)
- return;
- mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
- flow->rix_mreg_copy);
- if (!mcp_res || !priv->mreg_cp_tbl)
- return;
- if (flow->copy_applied) {
- MLX5_ASSERT(mcp_res->appcnt);
- flow->copy_applied = 0;
- --mcp_res->appcnt;
- if (!mcp_res->appcnt) {
- struct rte_flow *mcp_flow = mlx5_ipool_get
- (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
- mcp_res->rix_flow);
-
- if (mcp_flow)
- flow_drv_remove(dev, mcp_flow);
- }
- }
- /*
- * We do not check availability of metadata registers here,
- * because copy resources are not allocated in this case.
- */
- if (--mcp_res->refcnt)
- return;
- MLX5_ASSERT(mcp_res->rix_flow);
- flow_list_destroy(dev, NULL, mcp_res->rix_flow);
- mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
- flow->rix_mreg_copy = 0;
+ /* Check if already registered. */
+ MLX5_ASSERT(priv->mreg_cp_tbl);
+ entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
+ if (!entry)
+ return NULL;
+ return container_of(entry, struct mlx5_flow_mreg_copy_resource,
+ hlist_ent);
}
-/**
- * Start flow in RX_CP_TBL.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @flow
- * Parent flow for wich copying is provided.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_mreg_start_copy_action(struct rte_eth_dev *dev,
- struct rte_flow *flow)
+void
+flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
{
- struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_flow_mreg_copy_resource *mcp_res =
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+ struct rte_eth_dev *dev = list->ctx;
struct mlx5_priv *priv = dev->data->dev_private;
- int ret;
- if (!flow->rix_mreg_copy || flow->copy_applied)
- return 0;
- mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
- flow->rix_mreg_copy);
- if (!mcp_res)
- return 0;
- if (!mcp_res->appcnt) {
- struct rte_flow *mcp_flow = mlx5_ipool_get
- (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
- mcp_res->rix_flow);
-
- if (mcp_flow) {
- ret = flow_drv_apply(dev, mcp_flow, NULL);
- if (ret)
- return ret;
- }
- }
- ++mcp_res->appcnt;
- flow->copy_applied = 1;
- return 0;
+ MLX5_ASSERT(mcp_res->rix_flow);
+ flow_list_destroy(dev, NULL, mcp_res->rix_flow);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
}
/**
- * Stop flow in RX_CP_TBL.
+ * Release flow in RX_CP_TBL.
*
* @param dev
* Pointer to Ethernet device.
* Parent flow for wich copying is provided.
*/
static void
-flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
- struct rte_flow *flow)
+flow_mreg_del_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
{
struct mlx5_flow_mreg_copy_resource *mcp_res;
struct mlx5_priv *priv = dev->data->dev_private;
- if (!flow->rix_mreg_copy || !flow->copy_applied)
+ if (!flow->rix_mreg_copy)
return;
mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
flow->rix_mreg_copy);
- if (!mcp_res)
+ if (!mcp_res || !priv->mreg_cp_tbl)
return;
- MLX5_ASSERT(mcp_res->appcnt);
- --mcp_res->appcnt;
- flow->copy_applied = 0;
- if (!mcp_res->appcnt) {
- struct rte_flow *mcp_flow = mlx5_ipool_get
- (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
- mcp_res->rix_flow);
-
- if (mcp_flow)
- flow_drv_remove(dev, mcp_flow);
- }
+ MLX5_ASSERT(mcp_res->rix_flow);
+ mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
+ flow->rix_mreg_copy = 0;
}
/**
* Remove the default copy action from RX_CP_TBL.
*
+ * This functions is called in the mlx5_dev_start(). No thread safe
+ * is guaranteed.
+ *
* @param dev
* Pointer to Ethernet device.
*/
static void
flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
{
- struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_hlist_entry *entry;
struct mlx5_priv *priv = dev->data->dev_private;
/* Check if default flow is registered. */
if (!priv->mreg_cp_tbl)
return;
- mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
- MLX5_DEFAULT_COPY_ID);
- if (!mcp_res)
+ entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
+ MLX5_DEFAULT_COPY_ID, NULL);
+ if (!entry)
return;
- MLX5_ASSERT(mcp_res->rix_flow);
- flow_list_destroy(dev, NULL, mcp_res->rix_flow);
- mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
+ mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
}
/**
* Add the default copy action in in RX_CP_TBL.
*
+ * This functions is called in the mlx5_dev_start(). No thread safe
+ * is guaranteed.
+ *
* @param dev
* Pointer to Ethernet device.
* @param[out] error
!mlx5_flow_ext_mreg_supported(dev) ||
!priv->sh->dv_regc0_mask)
return 0;
+ /*
+ * Add default mreg copy flow may be called multiple time, but
+ * only be called once in stop. Avoid register it twice.
+ */
+ if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
+ return 0;
mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
if (!mcp_res)
return -rte_errno;
if (!mcp_res)
return -rte_errno;
flow->rix_mreg_copy = mcp_res->idx;
- if (dev->data->dev_started) {
- mcp_res->appcnt++;
- flow->copy_applied = 1;
- }
return 0;
case RTE_FLOW_ACTION_TYPE_MARK:
mark = (const struct rte_flow_action_mark *)
if (!mcp_res)
return -rte_errno;
flow->rix_mreg_copy = mcp_res->idx;
- if (dev->data->dev_started) {
- mcp_res->appcnt++;
- flow->copy_applied = 1;
- }
return 0;
default:
break;
struct rte_flow_action actions_rx[],
struct rte_flow_action actions_tx[],
struct rte_flow_item pattern_tx[],
- uint32_t *flow_id)
+ uint32_t flow_id)
{
- struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_raw_encap *raw_encap;
const struct rte_flow_action_raw_decap *raw_decap;
struct mlx5_rte_flow_action_set_tag *set_tag;
char *addr;
int encap = 0;
- mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
set_tag = (void *)actions_rx;
set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
MLX5_ASSERT(set_tag->id > REG_NON);
- set_tag->data = *flow_id;
+ set_tag->data = flow_id;
tag_action->conf = set_tag;
/* Create Tx item list. */
rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
item->type = (enum rte_flow_item_type)
MLX5_RTE_FLOW_ITEM_TYPE_TAG;
tag_item = (void *)addr;
- tag_item->data = *flow_id;
+ tag_item->data = flow_id;
tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
MLX5_ASSERT(set_tag->id > REG_NON);
item->spec = tag_item;
struct rte_flow_action actions_sfx[],
struct rte_flow_action actions_pre[])
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_action *tag_action = NULL;
struct rte_flow_item *tag_item;
struct mlx5_rte_flow_action_set_tag *set_tag;
const struct rte_flow_action_raw_decap *raw_decap;
struct mlx5_rte_flow_item_tag *tag_spec;
struct mlx5_rte_flow_item_tag *tag_mask;
- uint32_t tag_id;
+ uint32_t tag_id = 0;
bool copy_vlan = false;
/* Prepare the actions for prefix and suffix flow. */
/* Set the tag. */
set_tag = (void *)actions_pre;
set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
- /*
- * Get the id from the qrss_pool to make qrss share the id with meter.
- */
- tag_id = flow_qrss_get_id(dev);
+ mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+ &tag_id);
+ if (tag_id >= (1 << (sizeof(tag_id) * 8 - MLX5_MTR_COLOR_BITS))) {
+ DRV_LOG(ERR, "Port %u meter flow id exceed max limit.",
+ dev->data->port_id);
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], tag_id);
+ return 0;
+ } else if (!tag_id) {
+ return 0;
+ }
set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
assert(tag_action);
tag_action->conf = set_tag;
const struct rte_flow_action *qrss,
int actions_n, struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rte_flow_action_set_tag *set_tag;
struct rte_flow_action_jump *jump;
const int qrss_idx = qrss - actions;
* representors) domain even if they have coinciding
* IDs.
*/
- flow_id = flow_qrss_get_id(dev);
+ mlx5_ipool_malloc(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
if (!flow_id)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ACTION,
int qrss_action_pos,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rte_flow_action_set_tag *set_tag;
struct mlx5_rte_flow_item_tag *tag_spec;
struct mlx5_rte_flow_item_tag *tag_mask;
if (ret < 0)
return ret;
set_tag->id = ret;
- tag_id = flow_qrss_get_id(dev);
+ mlx5_ipool_malloc(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
set_tag->data = tag_id;
/* Prepare the suffix subflow items. */
tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
* These ones are included into parent flow list and will be destroyed
* by flow_drv_destroy.
*/
- flow_qrss_free_id(dev, qrss_id);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+ qrss_id);
mlx5_free(ext_actions);
return ret;
}
uint32_t i;
uint32_t idx = 0;
int hairpin_flow;
- uint32_t hairpin_id = 0;
struct rte_flow_attr attr_tx = { .priority = 0 };
struct rte_flow_attr attr_factor = {0};
const struct rte_flow_action *actions;
external, hairpin_flow, error);
if (ret < 0)
goto error_before_hairpin_split;
+ flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
+ if (!flow) {
+ rte_errno = ENOMEM;
+ goto error_before_hairpin_split;
+ }
if (hairpin_flow > 0) {
if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
rte_errno = EINVAL;
}
flow_hairpin_split(dev, actions, actions_rx.actions,
actions_hairpin_tx.actions, items_tx.items,
- &hairpin_id);
+ idx);
p_actions_rx = actions_rx.actions;
}
- flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
- if (!flow) {
- rte_errno = ENOMEM;
- goto error_before_flow;
- }
flow->drv_type = flow_get_drv_type(dev, &attr_factor);
- if (hairpin_id != 0)
- flow->hairpin_flow_id = hairpin_id;
MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
flow->drv_type < MLX5_FLOW_TYPE_MAX);
memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
if (ret < 0)
goto error;
}
- if (list)
+ if (list) {
+ rte_spinlock_lock(&priv->flow_list_lock);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
flow, next);
+ rte_spinlock_unlock(&priv->flow_list_lock);
+ }
flow_rxq_flags_set(dev, flow);
rte_free(translated_actions);
/* Nested flow creation index recovery. */
flow_drv_destroy(dev, flow);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
rte_errno = ret; /* Restore rte_errno. */
-error_before_flow:
ret = rte_errno;
- if (hairpin_id)
- mlx5_flow_id_release(priv->sh->flow_id_pool,
- hairpin_id);
rte_errno = ret;
wks->flow_idx = wks->flow_nested_idx;
if (wks->flow_nested_idx)
uint32_t flow_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_fdir_flow *priv_fdir_flow = NULL;
struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
[MLX5_IPOOL_RTE_FLOW], flow_idx);
*/
if (dev->data->dev_started)
flow_rxq_flags_trim(dev, flow);
- if (flow->hairpin_flow_id)
- mlx5_flow_id_release(priv->sh->flow_id_pool,
- flow->hairpin_flow_id);
flow_drv_destroy(dev, flow);
- if (list)
+ if (list) {
+ rte_spinlock_lock(&priv->flow_list_lock);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
flow_idx, flow, next);
- flow_mreg_del_copy_action(dev, flow);
- if (flow->fdir) {
- LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
- if (priv_fdir_flow->rix_flow == flow_idx)
- break;
- }
- if (priv_fdir_flow) {
- LIST_REMOVE(priv_fdir_flow, next);
- mlx5_free(priv_fdir_flow->fdir);
- mlx5_free(priv_fdir_flow);
- }
+ rte_spinlock_unlock(&priv->flow_list_lock);
}
+ flow_mreg_del_copy_action(dev, flow);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
if (flow->tunnel) {
struct mlx5_flow_tunnel *tunnel;
+
+ rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);
tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
RTE_VERIFY(tunnel);
+ LIST_REMOVE(tunnel, chain);
+ rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);
if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
mlx5_flow_tunnel_free(dev, tunnel);
}
}
}
-/**
- * Remove all flows.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param list
- * Pointer to the Indexed flow list.
- */
-void
-mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = NULL;
- uint32_t idx;
-
- ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx,
- flow, next) {
- flow_drv_remove(dev, flow);
- flow_mreg_stop_copy_action(dev, flow);
- }
- flow_mreg_del_default_copy_action(dev);
- flow_rxq_flags_clear(dev);
-}
-
-/**
- * Add all flows.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param list
- * Pointer to the Indexed flow list.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = NULL;
- struct rte_flow_error error;
- uint32_t idx;
- int ret = 0;
-
- /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
- ret = flow_mreg_add_default_copy_action(dev, &error);
- if (ret < 0)
- return -rte_errno;
- /* Apply Flows created by application. */
- ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx,
- flow, next) {
- ret = flow_mreg_start_copy_action(dev, flow);
- if (ret < 0)
- goto error;
- ret = flow_drv_apply(dev, flow, &error);
- if (ret < 0)
- goto error;
- flow_rxq_flags_set(dev, flow);
- }
- return 0;
-error:
- ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_flow_stop(dev, list);
- rte_errno = ret; /* Restore rte_errno. */
- return -rte_errno;
-}
-
/**
* Stop all default actions for flows.
*
return 0;
}
-/**
- * Convert a flow director filter to a generic flow.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param fdir_filter
- * Flow director filter to add.
- * @param attributes
- * Generic flow parameters structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_fdir_filter_convert(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter,
- struct mlx5_fdir *attributes)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- const struct rte_eth_fdir_input *input = &fdir_filter->input;
- const struct rte_eth_fdir_masks *mask =
- &dev->data->dev_conf.fdir_conf.mask;
-
- /* Validate queue number. */
- if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
- DRV_LOG(ERR, "port %u invalid queue number %d",
- dev->data->port_id, fdir_filter->action.rx_queue);
- rte_errno = EINVAL;
- return -rte_errno;
- }
- attributes->attr.ingress = 1;
- attributes->items[0] = (struct rte_flow_item) {
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- .spec = &attributes->l2,
- .mask = &attributes->l2_mask,
- };
- switch (fdir_filter->action.behavior) {
- case RTE_ETH_FDIR_ACCEPT:
- attributes->actions[0] = (struct rte_flow_action){
- .type = RTE_FLOW_ACTION_TYPE_QUEUE,
- .conf = &attributes->queue,
- };
- break;
- case RTE_ETH_FDIR_REJECT:
- attributes->actions[0] = (struct rte_flow_action){
- .type = RTE_FLOW_ACTION_TYPE_DROP,
- };
- break;
- default:
- DRV_LOG(ERR, "port %u invalid behavior %d",
- dev->data->port_id,
- fdir_filter->action.behavior);
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
- attributes->queue.index = fdir_filter->action.rx_queue;
- /* Handle L3. */
- switch (fdir_filter->input.flow_type) {
- case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
- .src_addr = input->flow.ip4_flow.src_ip,
- .dst_addr = input->flow.ip4_flow.dst_ip,
- .time_to_live = input->flow.ip4_flow.ttl,
- .type_of_service = input->flow.ip4_flow.tos,
- };
- attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
- .src_addr = mask->ipv4_mask.src_ip,
- .dst_addr = mask->ipv4_mask.dst_ip,
- .time_to_live = mask->ipv4_mask.ttl,
- .type_of_service = mask->ipv4_mask.tos,
- .next_proto_id = mask->ipv4_mask.proto,
- };
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV4,
- .spec = &attributes->l3,
- .mask = &attributes->l3_mask,
- };
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
- attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
- .hop_limits = input->flow.ipv6_flow.hop_limits,
- .proto = input->flow.ipv6_flow.proto,
- };
-
- memcpy(attributes->l3.ipv6.hdr.src_addr,
- input->flow.ipv6_flow.src_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
- memcpy(attributes->l3.ipv6.hdr.dst_addr,
- input->flow.ipv6_flow.dst_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
- memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
- mask->ipv6_mask.src_ip,
- RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
- memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
- mask->ipv6_mask.dst_ip,
- RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV6,
- .spec = &attributes->l3,
- .mask = &attributes->l3_mask,
- };
- break;
- default:
- DRV_LOG(ERR, "port %u invalid flow type%d",
- dev->data->port_id, fdir_filter->input.flow_type);
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
- /* Handle L4. */
- switch (fdir_filter->input.flow_type) {
- case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- attributes->l4.udp.hdr = (struct rte_udp_hdr){
- .src_port = input->flow.udp4_flow.src_port,
- .dst_port = input->flow.udp4_flow.dst_port,
- };
- attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
- .src_port = mask->src_port_mask,
- .dst_port = mask->dst_port_mask,
- };
- attributes->items[2] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_UDP,
- .spec = &attributes->l4,
- .mask = &attributes->l4_mask,
- };
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
- .src_port = input->flow.tcp4_flow.src_port,
- .dst_port = input->flow.tcp4_flow.dst_port,
- };
- attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
- .src_port = mask->src_port_mask,
- .dst_port = mask->dst_port_mask,
- };
- attributes->items[2] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_TCP,
- .spec = &attributes->l4,
- .mask = &attributes->l4_mask,
- };
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- attributes->l4.udp.hdr = (struct rte_udp_hdr){
- .src_port = input->flow.udp6_flow.src_port,
- .dst_port = input->flow.udp6_flow.dst_port,
- };
- attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
- .src_port = mask->src_port_mask,
- .dst_port = mask->dst_port_mask,
- };
- attributes->items[2] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_UDP,
- .spec = &attributes->l4,
- .mask = &attributes->l4_mask,
- };
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
- .src_port = input->flow.tcp6_flow.src_port,
- .dst_port = input->flow.tcp6_flow.dst_port,
- };
- attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
- .src_port = mask->src_port_mask,
- .dst_port = mask->dst_port_mask,
- };
- attributes->items[2] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_TCP,
- .spec = &attributes->l4,
- .mask = &attributes->l4_mask,
- };
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
- break;
- default:
- DRV_LOG(ERR, "port %u invalid flow type%d",
- dev->data->port_id, fdir_filter->input.flow_type);
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
- return 0;
-}
-
-#define FLOW_FDIR_CMP(f1, f2, fld) \
- memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
-
-/**
- * Compare two FDIR flows. If items and actions are identical, the two flows are
- * regarded as same.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param f1
- * FDIR flow to compare.
- * @param f2
- * FDIR flow to compare.
- *
- * @return
- * Zero on match, 1 otherwise.
- */
-static int
-flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
-{
- if (FLOW_FDIR_CMP(f1, f2, attr) ||
- FLOW_FDIR_CMP(f1, f2, l2) ||
- FLOW_FDIR_CMP(f1, f2, l2_mask) ||
- FLOW_FDIR_CMP(f1, f2, l3) ||
- FLOW_FDIR_CMP(f1, f2, l3_mask) ||
- FLOW_FDIR_CMP(f1, f2, l4) ||
- FLOW_FDIR_CMP(f1, f2, l4_mask) ||
- FLOW_FDIR_CMP(f1, f2, actions[0].type))
- return 1;
- if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
- FLOW_FDIR_CMP(f1, f2, queue))
- return 1;
- return 0;
-}
-
-/**
- * Search device flow list to find out a matched FDIR flow.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param fdir_flow
- * FDIR flow to lookup.
- *
- * @return
- * Index of flow if found, 0 otherwise.
- */
-static uint32_t
-flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t flow_idx = 0;
- struct mlx5_fdir_flow *priv_fdir_flow = NULL;
-
- MLX5_ASSERT(fdir_flow);
- LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
- if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) {
- DRV_LOG(DEBUG, "port %u found FDIR flow %u",
- dev->data->port_id, flow_idx);
- flow_idx = priv_fdir_flow->rix_flow;
- break;
- }
- }
- return flow_idx;
-}
-
-/**
- * Add new flow director filter and store it in list.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param fdir_filter
- * Flow director filter to add.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_fdir_filter_add(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_fdir *fdir_flow;
- struct rte_flow *flow;
- struct mlx5_fdir_flow *priv_fdir_flow = NULL;
- uint32_t flow_idx;
- int ret;
-
- fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*fdir_flow), 0,
- SOCKET_ID_ANY);
- if (!fdir_flow) {
- rte_errno = ENOMEM;
- return -rte_errno;
- }
- ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
- if (ret)
- goto error;
- flow_idx = flow_fdir_filter_lookup(dev, fdir_flow);
- if (flow_idx) {
- rte_errno = EEXIST;
- goto error;
- }
- priv_fdir_flow = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(struct mlx5_fdir_flow),
- 0, SOCKET_ID_ANY);
- if (!priv_fdir_flow) {
- rte_errno = ENOMEM;
- goto error;
- }
- flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
- fdir_flow->items, fdir_flow->actions, true,
- NULL);
- flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
- if (!flow)
- goto error;
- flow->fdir = 1;
- priv_fdir_flow->fdir = fdir_flow;
- priv_fdir_flow->rix_flow = flow_idx;
- LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next);
- DRV_LOG(DEBUG, "port %u created FDIR flow %p",
- dev->data->port_id, (void *)flow);
- return 0;
-error:
- mlx5_free(priv_fdir_flow);
- mlx5_free(fdir_flow);
- return -rte_errno;
-}
-
-/**
- * Delete specific filter.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param fdir_filter
- * Filter to be deleted.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_fdir_filter_delete(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t flow_idx;
- struct mlx5_fdir fdir_flow = {
- .attr.group = 0,
- };
- struct mlx5_fdir_flow *priv_fdir_flow = NULL;
- int ret;
-
- ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
- if (ret)
- return -rte_errno;
- LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
- /* Find the fdir in priv list */
- if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow))
- break;
- }
- if (!priv_fdir_flow)
- return 0;
- LIST_REMOVE(priv_fdir_flow, next);
- flow_idx = priv_fdir_flow->rix_flow;
- flow_list_destroy(dev, &priv->flows, flow_idx);
- mlx5_free(priv_fdir_flow->fdir);
- mlx5_free(priv_fdir_flow);
- DRV_LOG(DEBUG, "port %u deleted FDIR flow %u",
- dev->data->port_id, flow_idx);
- return 0;
-}
-
-/**
- * Update queue for specific filter.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param fdir_filter
- * Filter to be updated.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_fdir_filter_update(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter)
-{
- int ret;
-
- ret = flow_fdir_filter_delete(dev, fdir_filter);
- if (ret)
- return ret;
- return flow_fdir_filter_add(dev, fdir_filter);
-}
-
-/**
- * Flush all filters.
- *
- * @param dev
- * Pointer to Ethernet device.
- */
-static void
-flow_fdir_filter_flush(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_fdir_flow *priv_fdir_flow = NULL;
-
- while (!LIST_EMPTY(&priv->fdir_flows)) {
- priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
- LIST_REMOVE(priv_fdir_flow, next);
- flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow);
- mlx5_free(priv_fdir_flow->fdir);
- mlx5_free(priv_fdir_flow);
- }
-}
-
-/**
- * Get flow director information.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param[out] fdir_info
- * Resulting flow director information.
- */
-static void
-flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
-{
- struct rte_eth_fdir_masks *mask =
- &dev->data->dev_conf.fdir_conf.mask;
-
- fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
- fdir_info->guarant_spc = 0;
- rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
- fdir_info->max_flexpayload = 0;
- fdir_info->flow_types_mask[0] = 0;
- fdir_info->flex_payload_unit = 0;
- fdir_info->max_flex_payload_segment_num = 0;
- fdir_info->flex_payload_limit = 0;
- memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
-}
-
-/**
- * Deal with flow director operations.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param filter_op
- * Operation to perform.
- * @param arg
- * Pointer to operation-specific structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
- void *arg)
-{
- enum rte_fdir_mode fdir_mode =
- dev->data->dev_conf.fdir_conf.mode;
-
- if (filter_op == RTE_ETH_FILTER_NOP)
- return 0;
- if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
- fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
- DRV_LOG(ERR, "port %u flow director mode %d not supported",
- dev->data->port_id, fdir_mode);
- rte_errno = EINVAL;
- return -rte_errno;
- }
- switch (filter_op) {
- case RTE_ETH_FILTER_ADD:
- return flow_fdir_filter_add(dev, arg);
- case RTE_ETH_FILTER_UPDATE:
- return flow_fdir_filter_update(dev, arg);
- case RTE_ETH_FILTER_DELETE:
- return flow_fdir_filter_delete(dev, arg);
- case RTE_ETH_FILTER_FLUSH:
- flow_fdir_filter_flush(dev);
- break;
- case RTE_ETH_FILTER_INFO:
- flow_fdir_info_get(dev, arg);
- break;
- default:
- DRV_LOG(DEBUG, "port %u unknown operation %u",
- dev->data->port_id, filter_op);
- rte_errno = EINVAL;
- return -rte_errno;
- }
- return 0;
-}
-
/**
* Manage filter operations.
*
}
*(const void **)arg = &mlx5_flow_ops;
return 0;
- case RTE_ETH_FILTER_FDIR:
- return flow_fdir_ctrl_func(dev, filter_op, arg);
default:
DRV_LOG(ERR, "port %u filter type (%d) not supported",
dev->data->port_id, filter_type);
}
rte_spinlock_unlock(&age_info->aged_sl);
}
- for (i = 0; i < sh->max_port; i++) {
- age_info = &sh->port[i].age_info;
- if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
- continue;
- if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER))
- rte_eth_dev_callback_process
- (&rte_eth_devices[sh->port[i].devx_ih_port_id],
- RTE_ETH_EVENT_FLOW_AGED, NULL);
- age_info->flags = 0;
- }
+ mlx5_age_event_prepare(sh);
}
/**
union mlx5_flow_tbl_key table_key = {
{
.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
- .reserved = 0,
+ .dummy = 0,
.domain = !!mbits.transfer,
.direction = 0,
}
};
- he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+ he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
return he ?
container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
}
+static void
+mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+ tunnel_flow_tbl_to_id(tte->flow_table));
+ mlx5_free(tte);
+}
+
+static struct mlx5_hlist_entry *
+mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
+ uint64_t key __rte_unused,
+ void *ctx __rte_unused)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct tunnel_tbl_entry *tte;
+
+ tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
+ sizeof(*tte), 0,
+ SOCKET_ID_ANY);
+ if (!tte)
+ goto err;
+ mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+ &tte->flow_table);
+ if (tte->flow_table >= MLX5_MAX_TABLES) {
+ DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
+ tte->flow_table);
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+ tte->flow_table);
+ goto err;
+ } else if (!tte->flow_table) {
+ goto err;
+ }
+ tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
+ return &tte->hash;
+err:
+ if (tte)
+ mlx5_free(tte);
+ return NULL;
+}
+
static uint32_t
tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
struct mlx5_hlist *group_hash;
group_hash = tunnel ? tunnel->groups : thub->groups;
- he = mlx5_hlist_lookup(group_hash, key.val);
- if (!he) {
- int ret;
- tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
- sizeof(*tte), 0,
- SOCKET_ID_ANY);
- if (!tte)
- goto err;
- tte->hash.key = key.val;
- ret = mlx5_flow_id_get(thub->table_ids, &tte->flow_table);
- if (ret) {
- mlx5_free(tte);
- goto err;
- }
- tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
- mlx5_hlist_insert(group_hash, &tte->hash);
- } else {
- tte = container_of(he, typeof(*tte), hash);
- }
+ he = mlx5_hlist_register(group_hash, key.val, NULL);
+ if (!he)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "tunnel group index not supported");
+ tte = container_of(he, typeof(*tte), hash);
*table = tte->flow_table;
DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
dev->data->port_id, key.tunnel_id, group, *table);
return 0;
-
-err:
- return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL, "tunnel group index not supported");
}
static int
mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
struct mlx5_flow_tunnel *tunnel)
{
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids;
+ struct mlx5_priv *priv = dev->data->dev_private;
DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
dev->data->port_id, tunnel->tunnel_id);
RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
- LIST_REMOVE(tunnel, chain);
- mlx5_flow_id_release(id_pool, tunnel->tunnel_id);
- mlx5_hlist_destroy(tunnel->groups, NULL, NULL);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
+ tunnel->tunnel_id);
+ mlx5_hlist_destroy(tunnel->groups);
mlx5_free(tunnel);
}
mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
const struct rte_flow_tunnel *app_tunnel)
{
- int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tunnel *tunnel;
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids;
uint32_t id;
- ret = mlx5_flow_id_get(id_pool, &id);
- if (ret)
+ mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+ &id);
+ if (id >= MLX5_MAX_TUNNELS) {
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+ DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
+ return NULL;
+ } else if (!id) {
return NULL;
+ }
/**
* mlx5 flow tunnel is an auxlilary data structure
* It's not part of IO. No need to allocate it from
tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
0, SOCKET_ID_ANY);
if (!tunnel) {
- mlx5_flow_id_pool_release(id_pool);
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
return NULL;
}
- tunnel->groups = mlx5_hlist_create("tunnel groups", 1024);
+ tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
+ mlx5_flow_tunnel_grp2tbl_create_cb,
+ NULL,
+ mlx5_flow_tunnel_grp2tbl_remove_cb);
if (!tunnel->groups) {
- mlx5_flow_id_pool_release(id_pool);
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
mlx5_free(tunnel);
return NULL;
}
+ tunnel->groups->ctx = priv->sh;
/* initiate new PMD tunnel */
memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
tunnel->tunnel_id = id;
struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
struct mlx5_flow_tunnel *tun;
+ rte_spinlock_lock(&thub->sl);
LIST_FOREACH(tun, &thub->tunnels, chain) {
if (!memcmp(app_tunnel, &tun->app_tunnel,
sizeof(*app_tunnel))) {
ret = -ENOMEM;
}
}
+ rte_spinlock_unlock(&thub->sl);
if (tun)
__atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
return;
if (!LIST_EMPTY(&thub->tunnels))
DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
- mlx5_flow_id_pool_release(thub->tunnel_ids);
- mlx5_flow_id_pool_release(thub->table_ids);
- mlx5_hlist_destroy(thub->groups, NULL, NULL);
+ mlx5_hlist_destroy(thub->groups);
mlx5_free(thub);
}
if (!thub)
return -ENOMEM;
LIST_INIT(&thub->tunnels);
- thub->tunnel_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TUNNELS);
- if (!thub->tunnel_ids) {
- err = -rte_errno;
- goto err;
- }
- thub->table_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TABLES);
- if (!thub->table_ids) {
- err = -rte_errno;
- goto err;
- }
- thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES);
+ rte_spinlock_init(&thub->sl);
+ thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
+ 0, mlx5_flow_tunnel_grp2tbl_create_cb,
+ NULL,
+ mlx5_flow_tunnel_grp2tbl_remove_cb);
if (!thub->groups) {
err = -rte_errno;
goto err;
}
+ thub->groups->ctx = sh;
sh->tunnel_hub = thub;
return 0;
err:
if (thub->groups)
- mlx5_hlist_destroy(thub->groups, NULL, NULL);
- if (thub->table_ids)
- mlx5_flow_id_pool_release(thub->table_ids);
- if (thub->tunnel_ids)
- mlx5_flow_id_pool_release(thub->tunnel_ids);
+ mlx5_hlist_destroy(thub->groups);
if (thub)
mlx5_free(thub);
return err;