* RSS types bit-field associated with this node
* (see ETH_RSS_* definitions).
*/
- uint8_t optional;
- /**< optional expand field. Default 0 to expand, 1 not go deeper. */
+ uint64_t node_flags;
+ /**<
+ * Bit-fields that define how the node is used in the expansion.
+ * (see MLX5_EXPANSION_NODE_* definitions).
+ */
};
+/* Optional expand field. The expansion alg will not go deeper. */
+#define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
+
+/* The node is not added implicitly as expansion to the flow pattern.
+ * If the node type does not match the flow pattern item type, the
+ * expansion alg will go deeper to its next items.
+ * In the current implementation, the list of next nodes indexes can
+ * have up to one node with this flag set and it has to be the last
+ * node index (before the list terminator).
+ */
+#define MLX5_EXPANSION_NODE_EXPLICIT (UINT64_C(1) << 1)
+
/** Object returned by mlx5_flow_expand_rss(). */
struct mlx5_flow_expand_rss {
uint32_t entries;
case RTE_FLOW_ITEM_TYPE_GRE:
case RTE_FLOW_ITEM_TYPE_GENEVE:
case RTE_FLOW_ITEM_TYPE_MPLS:
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ case RTE_FLOW_ITEM_TYPE_GRE_KEY:
+ case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ case RTE_FLOW_ITEM_TYPE_GTP:
return true;
default:
break;
return ret;
}
+static const int *
+mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
+ const int *next_node)
+{
+ const struct mlx5_flow_expand_node *node = NULL;
+ const int *next = next_node;
+
+ while (next && *next) {
+ /*
+ * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT
+ * flag set, because they were not found in the flow pattern.
+ */
+ node = &graph[*next];
+ if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT))
+ break;
+ next = node->next;
+ }
+ return next;
+}
+
#define MLX5_RSS_EXP_ELT_N 16
/**
continue;
}
last_item = item;
- for (i = 0; node->next && node->next[i]; ++i) {
+ i = 0;
+ while (node->next && node->next[i]) {
next = &graph[node->next[i]];
if (next->type == item->type)
break;
+ if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
+ node = next;
+ i = 0;
+ } else {
+ ++i;
+ }
}
if (next)
node = next;
}
}
memset(flow_items, 0, sizeof(flow_items));
- next_node = node->next;
+ next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+ node->next);
stack[stack_pos] = next_node;
node = next_node ? &graph[*next_node] : NULL;
while (node) {
addr = (void *)(((uintptr_t)addr) + n);
}
/* Go deeper. */
- if (!node->optional && node->next) {
- next_node = node->next;
+ if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) &&
+ node->next) {
+ next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+ node->next);
if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
rte_errno = E2BIG;
return -rte_errno;
stack[stack_pos] = next_node;
} else if (*(next_node + 1)) {
/* Follow up with the next possibility. */
+ next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+ ++next_node);
+ } else if (!stack_pos) {
+ /*
+ * Completing the traverse over the different paths.
+ * The next_node is advanced to the terminator.
+ */
++next_node;
} else {
/* Move to the next path. */
- if (stack_pos)
+ while (stack_pos) {
next_node = stack[--stack_pos];
- next_node++;
+ next_node++;
+ if (*next_node)
+ break;
+ }
+ next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+ next_node);
stack[stack_pos] = next_node;
}
- node = *next_node ? &graph[*next_node] : NULL;
+ node = next_node && *next_node ? &graph[*next_node] : NULL;
};
return lsize;
}
enum mlx5_expansion {
MLX5_EXPANSION_ROOT,
MLX5_EXPANSION_ROOT_OUTER,
- MLX5_EXPANSION_ROOT_ETH_VLAN,
- MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
MLX5_EXPANSION_OUTER_ETH,
- MLX5_EXPANSION_OUTER_ETH_VLAN,
MLX5_EXPANSION_OUTER_VLAN,
MLX5_EXPANSION_OUTER_IPV4,
MLX5_EXPANSION_OUTER_IPV4_UDP,
MLX5_EXPANSION_GRE_KEY,
MLX5_EXPANSION_MPLS,
MLX5_EXPANSION_ETH,
- MLX5_EXPANSION_ETH_VLAN,
MLX5_EXPANSION_VLAN,
MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV4_UDP,
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_IPV6_UDP,
MLX5_EXPANSION_IPV6_TCP,
+ MLX5_EXPANSION_IPV6_FRAG_EXT,
+ MLX5_EXPANSION_GTP
};
/** Supported expansion of items. */
MLX5_EXPANSION_OUTER_IPV6),
.type = RTE_FLOW_ITEM_TYPE_END,
},
- [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
- .type = RTE_FLOW_ITEM_TYPE_END,
- },
- [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT
- (MLX5_EXPANSION_OUTER_ETH_VLAN),
- .type = RTE_FLOW_ITEM_TYPE_END,
- },
[MLX5_EXPANSION_OUTER_ETH] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
- MLX5_EXPANSION_OUTER_IPV6),
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- .rss_types = 0,
- },
- [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
.type = RTE_FLOW_ITEM_TYPE_ETH,
.rss_types = 0,
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
MLX5_EXPANSION_OUTER_IPV6),
.type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
},
[MLX5_EXPANSION_OUTER_IPV4] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT
[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
- MLX5_EXPANSION_MPLS),
+ MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_GTP),
.type = RTE_FLOW_ITEM_TYPE_UDP,
.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
},
[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
- MLX5_EXPANSION_MPLS),
+ MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_GTP),
.type = RTE_FLOW_ITEM_TYPE_UDP,
.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
},
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_MPLS),
.type = RTE_FLOW_ITEM_TYPE_GRE_KEY,
- .optional = 1,
+ .node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
},
[MLX5_EXPANSION_NVGRE] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_ETH),
.type = RTE_FLOW_ITEM_TYPE_MPLS,
+ .node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
},
[MLX5_EXPANSION_ETH] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
- MLX5_EXPANSION_IPV6),
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- },
- [MLX5_EXPANSION_ETH_VLAN] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
.type = RTE_FLOW_ITEM_TYPE_ETH,
},
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
},
[MLX5_EXPANSION_IPV4] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
},
[MLX5_EXPANSION_IPV6] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
- MLX5_EXPANSION_IPV6_TCP),
+ MLX5_EXPANSION_IPV6_TCP,
+ MLX5_EXPANSION_IPV6_FRAG_EXT),
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
ETH_RSS_NONFRAG_IPV6_OTHER,
.type = RTE_FLOW_ITEM_TYPE_TCP,
.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
},
+ [MLX5_EXPANSION_IPV6_FRAG_EXT] = {
+ .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
+ },
+ [MLX5_EXPANSION_GTP] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_GTP,
+ },
};
static struct rte_flow_action_handle *
data->dynf_meta = 1;
data->flow_meta_mask = rte_flow_dynf_metadata_mask;
data->flow_meta_offset = rte_flow_dynf_metadata_offs;
- data->flow_meta_port_mask = (uint32_t)~0;
- if (priv->config.dv_xmeta_en == MLX5_XMETA_MODE_META16)
- data->flow_meta_port_mask >>= 16;
+ data->flow_meta_port_mask = priv->sh->dv_meta_mask;
}
}
}
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L2 layer should not follow VLAN");
+ if (item_flags & MLX5_FLOW_LAYER_GTP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L2 layer should not follow GTP");
if (!mask)
mask = &rte_flow_item_eth_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
*
* @param[in] dev
* Pointer to the Ethernet device structure.
+ * @param[in] udp_dport
+ * UDP destination port
* @param[in] item
* Item specification.
* @param[in] item_flags
*/
int
mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
+ uint16_t udp_dport,
const struct rte_flow_item *item,
uint64_t item_flags,
const struct rte_flow_attr *attr,
"no outer UDP layer found");
if (!mask)
mask = &rte_flow_item_vxlan_mask;
- /* FDB domain & NIC domain non-zero group */
- if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
- valid_mask = &nic_mask;
- /* Group zero in NIC domain */
- if (!attr->group && !attr->transfer && priv->sh->tunnel_header_0_1)
- valid_mask = &nic_mask;
+
+ if (priv->sh->steering_format_version !=
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
+ !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) {
+ /* FDB domain & NIC domain non-zero group */
+ if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
+ valid_mask = &nic_mask;
+ /* Group zero in NIC domain */
+ if (!attr->group && !attr->transfer &&
+ priv->sh->tunnel_header_0_1)
+ valid_mask = &nic_mask;
+ }
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)valid_mask,
}
static unsigned int
-find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
+find_graph_root(uint32_t rss_level)
{
- const struct rte_flow_item *item;
- unsigned int has_vlan = 0;
-
- for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- has_vlan = 1;
- break;
- }
- }
- if (has_vlan)
- return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
- MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
return rss_level < 2 ? MLX5_EXPANSION_ROOT :
MLX5_EXPANSION_ROOT_OUTER;
}
uint32_t flow_idx);
int
-flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *cb_ctx __rte_unused)
+flow_dv_mreg_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_mreg_copy_resource *mcp_res =
- container_of(entry, typeof(*mcp_res), hlist_ent);
+ container_of(entry, typeof(*mcp_res), hlist_ent);
- return mcp_res->mark_id != key;
+ return mcp_res->mark_id != *(uint32_t *)(ctx->data);
}
-struct mlx5_hlist_entry *
-flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
- void *cb_ctx)
+struct mlx5_list_entry *
+flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct rte_eth_dev *dev = list->ctx;
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_mreg_copy_resource *mcp_res;
struct rte_flow_error *error = ctx->error;
uint32_t idx = 0;
int ret;
- uint32_t mark_id = key;
+ uint32_t mark_id = *(uint32_t *)(ctx->data);
struct rte_flow_attr attr = {
.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
.ingress = 1,
return &mcp_res->hlist_ent;
}
+struct mlx5_list_entry *
+flow_dv_mreg_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx __rte_unused)
+{
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ uint32_t idx = 0;
+
+ mcp_res = mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
+ if (!mcp_res) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ memcpy(mcp_res, oentry, sizeof(*mcp_res));
+ mcp_res->idx = idx;
+ return &mcp_res->hlist_ent;
+}
+
+void
+flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res =
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
+}
+
/**
* Add a flow of copying flow metadata registers in RX_CP_TBL.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.dev = dev,
.error = error,
+ .data = &mark_id,
};
/* Check if already registered. */
}
void
-flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
+flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
struct mlx5_flow_mreg_copy_resource *mcp_res =
- container_of(entry, typeof(*mcp_res), hlist_ent);
- struct rte_eth_dev *dev = list->ctx;
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_priv *priv = dev->data->dev_private;
MLX5_ASSERT(mcp_res->rix_flow);
static void
flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
{
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_cb_ctx ctx;
+ uint32_t mark_id;
/* Check if default flow is registered. */
if (!priv->mreg_cp_tbl)
return;
- entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
- MLX5_DEFAULT_COPY_ID, NULL);
+ mark_id = MLX5_DEFAULT_COPY_ID;
+ ctx.data = &mark_id;
+ entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx);
if (!entry)
return;
mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_flow_cb_ctx ctx;
+ uint32_t mark_id;
/* Check whether extensive metadata feature is engaged. */
if (!priv->config.dv_flow_en ||
* Add default mreg copy flow may be called multiple time, but
* only be called once in stop. Avoid register it twice.
*/
- if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
+ mark_id = MLX5_DEFAULT_COPY_ID;
+ ctx.data = &mark_id;
+ if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx))
return 0;
- mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
+ mcp_res = flow_mreg_add_copy_action(dev, mark_id, error);
if (!mcp_res)
return -rte_errno;
return 0;
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0};
uint32_t i;
- /**
+ /*
* This is a tmp dev_flow,
* no need to register any matcher for it in translate.
*/
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
struct mlx5_flow dev_flow = {0};
struct mlx5_flow_handle dev_handle = { {0} };
+ uint8_t fate = final_policy->act_cnt[i].fate_action;
- if (final_policy->is_rss) {
- const void *rss_act =
+ if (fate == MLX5_FLOW_FATE_SHARED_RSS) {
+ const struct rte_flow_action_rss *rss_act =
final_policy->act_cnt[i].rss->conf;
struct rte_flow_action rss_actions[2] = {
[0] = {
.type = RTE_FLOW_ACTION_TYPE_RSS,
- .conf = rss_act
+ .conf = rss_act,
},
[1] = {
.type = RTE_FLOW_ACTION_TYPE_END,
- .conf = NULL
+ .conf = NULL,
}
};
rss_desc_v[i].hash_fields ?
rss_desc_v[i].queue_num : 1;
rss_desc_v[i].tunnel =
- !!(dev_flow.handle->layers &
- MLX5_FLOW_LAYER_TUNNEL);
- } else {
+ !!(dev_flow.handle->layers &
+ MLX5_FLOW_LAYER_TUNNEL);
+ /* Use the RSS queues in the containers. */
+ rss_desc_v[i].queue =
+ (uint16_t *)(uintptr_t)rss_act->queue;
+ rss_desc[i] = &rss_desc_v[i];
+ } else if (fate == MLX5_FLOW_FATE_QUEUE) {
/* This is queue action. */
rss_desc_v[i] = wks->rss_desc;
rss_desc_v[i].key_len = 0;
rss_desc_v[i].queue =
&final_policy->act_cnt[i].queue;
rss_desc_v[i].queue_num = 1;
+ rss_desc[i] = &rss_desc_v[i];
+ } else {
+ rss_desc[i] = NULL;
}
- rss_desc[i] = &rss_desc_v[i];
}
sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
flow, policy, rss_desc);
} else {
enum mlx5_meter_domain mtr_domain =
attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
- attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
- MLX5_MTR_DOMAIN_INGRESS;
+ (attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+ MLX5_MTR_DOMAIN_INGRESS);
sub_policy = policy->sub_policys[mtr_domain][0];
}
- if (!sub_policy) {
+ if (!sub_policy)
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to get meter sub-policy.");
- goto exit;
- }
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to get meter sub-policy.");
exit:
return sub_policy;
}
} else {
enum mlx5_meter_domain mtr_domain =
attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
- attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
- MLX5_MTR_DOMAIN_INGRESS;
+ (attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+ MLX5_MTR_DOMAIN_INGRESS);
sub_policy =
&priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy;
actions_pre++;
if (!tag_action)
return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "No tag action space.");
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "No tag action space.");
if (!mtr_flow_id) {
tag_action->type = RTE_FLOW_ACTION_TYPE_VOID;
goto exit;
int *modify_after_mirror)
{
const struct rte_flow_action_sample *sample;
+ const struct rte_flow_action_raw_decap *decap;
int actions_n = 0;
uint32_t ratio = 0;
int sub_type = 0;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
- case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
case RTE_FLOW_ACTION_TYPE_METER:
if (fdb_mirror)
*modify_after_mirror = 1;
break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap = actions->conf;
+ while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
+ actions_n++;
+ if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ const struct rte_flow_action_raw_encap *encap =
+ actions->conf;
+ if (decap->size <=
+ MLX5_ENCAPSULATION_DECISION_SIZE &&
+ encap->size >
+ MLX5_ENCAPSULATION_DECISION_SIZE)
+ /* L3 encap. */
+ break;
+ }
+ if (fdb_mirror)
+ *modify_after_mirror = 1;
+ break;
default:
break;
}
int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
union {
struct mlx5_flow_expand_rss buf;
- uint8_t buffer[2048];
+ uint8_t buffer[4096];
} expand_buffer;
union {
struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
if (rss && rss->types) {
unsigned int graph_root;
- graph_root = find_graph_root(items, rss->level);
+ graph_root = find_graph_root(rss->level);
ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
items, rss->types,
mlx5_support_expansion, graph_root);
struct rte_flow_attr *attr,
bool *is_rss,
uint8_t *domain_bitmap,
- bool *is_def_policy,
+ uint8_t *policy_mode,
struct rte_mtr_error *error)
{
const struct mlx5_flow_driver_ops *fops;
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->validate_mtr_acts(dev, actions, attr,
- is_rss, domain_bitmap, is_def_policy, error);
+ return fops->validate_mtr_acts(dev, actions, attr, is_rss,
+ domain_bitmap, policy_mode, error);
}
/**
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_hlist_entry *he;
+ struct mlx5_list_entry *he;
union tunnel_offload_mark mbits = { .val = mark };
union mlx5_flow_tbl_key table_key = {
{
.is_egress = 0,
}
};
- he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = &table_key.v64,
+ };
+
+ he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, &ctx);
return he ?
container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
}
static void
-mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry)
+mlx5_flow_tunnel_grp2tbl_remove_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
}
static int
-mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *cb_ctx __rte_unused)
+mlx5_flow_tunnel_grp2tbl_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
union tunnel_tbl_key tbl = {
- .val = key,
+ .val = *(uint64_t *)(ctx->data),
};
struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
}
-static struct mlx5_hlist_entry *
-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
- void *ctx __rte_unused)
+static struct mlx5_list_entry *
+mlx5_flow_tunnel_grp2tbl_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct tunnel_tbl_entry *tte;
union tunnel_tbl_key tbl = {
- .val = key,
+ .val = *(uint64_t *)(ctx->data),
};
tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
return NULL;
}
+static struct mlx5_list_entry *
+mlx5_flow_tunnel_grp2tbl_clone_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *oentry,
+ void *cb_ctx __rte_unused)
+{
+ struct tunnel_tbl_entry *tte = mlx5_malloc(MLX5_MEM_SYS, sizeof(*tte),
+ 0, SOCKET_ID_ANY);
+
+ if (!tte)
+ return NULL;
+ memcpy(tte, oentry, sizeof(*tte));
+ return &tte->hash;
+}
+
+static void
+mlx5_flow_tunnel_grp2tbl_clone_free_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
+{
+ struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+ mlx5_free(tte);
+}
+
static uint32_t
tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
struct rte_flow_error *error)
{
- struct mlx5_hlist_entry *he;
+ struct mlx5_list_entry *he;
struct tunnel_tbl_entry *tte;
union tunnel_tbl_key key = {
.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
};
struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
struct mlx5_hlist *group_hash;
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = &key.val,
+ };
group_hash = tunnel ? tunnel->groups : thub->groups;
- he = mlx5_hlist_register(group_hash, key.val, NULL);
+ he = mlx5_hlist_register(group_hash, key.val, &ctx);
if (!he)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
return NULL;
}
- tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
+ tunnel->groups = mlx5_hlist_create("tunnel groups", 64, false, true,
+ priv->sh,
mlx5_flow_tunnel_grp2tbl_create_cb,
mlx5_flow_tunnel_grp2tbl_match_cb,
- mlx5_flow_tunnel_grp2tbl_remove_cb);
+ mlx5_flow_tunnel_grp2tbl_remove_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_free_cb);
if (!tunnel->groups) {
mlx5_ipool_free(ipool, id);
return NULL;
}
- tunnel->groups->ctx = priv->sh;
/* initiate new PMD tunnel */
memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
tunnel->tunnel_id = id;
return -ENOMEM;
LIST_INIT(&thub->tunnels);
rte_spinlock_init(&thub->sl);
- thub->groups = mlx5_hlist_create("flow groups",
- rte_align32pow2(MLX5_MAX_TABLES), 0,
- 0, mlx5_flow_tunnel_grp2tbl_create_cb,
+ thub->groups = mlx5_hlist_create("flow groups", 64,
+ false, true, sh,
+ mlx5_flow_tunnel_grp2tbl_create_cb,
mlx5_flow_tunnel_grp2tbl_match_cb,
- mlx5_flow_tunnel_grp2tbl_remove_cb);
+ mlx5_flow_tunnel_grp2tbl_remove_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_free_cb);
if (!thub->groups) {
err = -rte_errno;
goto err;
}
- thub->groups->ctx = sh;
sh->tunnel_hub = thub;
return 0;