static void
mlx5_dbg__print_pattern(const struct rte_flow_item *item);
+static const struct mlx5_flow_expand_node *
+mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
+ unsigned int item_idx,
+ const struct mlx5_flow_expand_node graph[],
+ const struct mlx5_flow_expand_node *node);
+
static bool
mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
{
const int *stack[MLX5_RSS_EXP_ELT_N];
int stack_pos = 0;
struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
- unsigned int i;
+ unsigned int i, item_idx, last_expand_item_idx = 0;
size_t lsize;
size_t user_pattern_size = 0;
void *addr = NULL;
struct rte_flow_item missed_item;
int missed = 0;
int elt = 0;
- const struct rte_flow_item *last_item = NULL;
+ const struct rte_flow_item *last_expand_item = NULL;
memset(&missed_item, 0, sizeof(missed_item));
lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
buf->entries = 0;
addr = buf->entry[0].pattern;
- for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ for (item = pattern, item_idx = 0;
+ item->type != RTE_FLOW_ITEM_TYPE_END;
+ item++, item_idx++) {
if (!mlx5_flow_is_rss_expandable_item(item)) {
user_pattern_size += sizeof(*item);
continue;
}
- last_item = item;
+ last_expand_item = item;
+ last_expand_item_idx = item_idx;
i = 0;
while (node->next && node->next[i]) {
next = &graph[node->next[i]];
* Check if the last valid item has spec set, need complete pattern,
* and the pattern can be used for expansion.
*/
- missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
+ missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item);
if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
/* Item type END indicates expansion is not required. */
return lsize;
addr = (void *)(((uintptr_t)addr) +
elt * sizeof(*item));
}
+ } else if (last_expand_item != NULL) {
+ node = mlx5_flow_expand_rss_adjust_node(pattern,
+ last_expand_item_idx, graph, node);
}
memset(flow_items, 0, sizeof(flow_items));
next_node = mlx5_flow_expand_rss_skip_explicit(graph,
MLX5_EXPANSION_OUTER_IPV6_UDP,
MLX5_EXPANSION_OUTER_IPV6_TCP,
MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_STD_VXLAN,
+ MLX5_EXPANSION_L3_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
MLX5_EXPANSION_GRE,
MLX5_EXPANSION_NVGRE,
MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_VXLAN,
},
+ [MLX5_EXPANSION_STD_VXLAN] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ },
+ [MLX5_EXPANSION_L3_VXLAN] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ },
[MLX5_EXPANSION_VXLAN_GPE] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
MLX5_EXPANSION_IPV4,
* Pointer to device flow rule attributes.
* @param[in] subpriority
* The priority based on the items.
+ * @param[in] external
+ * Flow is user flow.
* @return
* The matcher priority of the flow.
*/
uint16_t
mlx5_get_matcher_priority(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- uint32_t subpriority)
+ uint32_t subpriority, bool external)
{
uint16_t priority = (uint16_t)attr->priority;
struct mlx5_priv *priv = dev->data->dev_private;
if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
priority = priv->config.flow_prio - 1;
return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
+ } else if (!external && attr->transfer && attr->group == 0 &&
+ attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
+ return (priv->config.flow_prio - 1) * 3;
}
if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
}
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
+ mem_mng->umem = mlx5_os_umem_reg(sh->cdev->ctx, mem, size,
IBV_ACCESS_LOCAL_WRITE);
if (!mem_mng->umem) {
rte_errno = errno;
mkey_attr.addr = (uintptr_t)mem;
mkey_attr.size = size;
mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
- mkey_attr.pd = sh->pdn;
+ mkey_attr.pd = sh->cdev->pdn;
mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
- mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
+ mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr);
if (!mem_mng->dm) {
mlx5_os_umem_dereg(mem_mng->umem);
rte_errno = errno;
}
printf("END\n");
}
+
+static int
+mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item)
+{
+ const struct rte_flow_item_udp *spec = udp_item->spec;
+ const struct rte_flow_item_udp *mask = udp_item->mask;
+ uint16_t udp_dport = 0;
+
+ if (spec != NULL) {
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port &
+ mask->hdr.dst_port);
+ }
+ return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN);
+}
+
+static const struct mlx5_flow_expand_node *
+mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
+ unsigned int item_idx,
+ const struct mlx5_flow_expand_node graph[],
+ const struct mlx5_flow_expand_node *node)
+{
+ const struct rte_flow_item *item = pattern + item_idx, *prev_item;
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ MLX5_ASSERT(item_idx > 0);
+ prev_item = pattern + item_idx - 1;
+ MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP);
+ if (mlx5_flow_is_std_vxlan_port(prev_item))
+ return &graph[MLX5_EXPANSION_STD_VXLAN];
+ else
+ return &graph[MLX5_EXPANSION_L3_VXLAN];
+ break;
+ default:
+ return node;
+ }
+}