[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
+ [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops,
#endif
[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
uint64_t rss_types;
/**<
* RSS types bit-field associated with this node
- * (see ETH_RSS_* definitions).
+ * (see RTE_ETH_RSS_* definitions).
+ */
+ uint64_t node_flags;
+ /**<
+ * Bit-fields that define how the node is used in the expansion.
+ * (see MLX5_EXPANSION_NODE_* definitions).
*/
- uint8_t optional;
- /**< optional expand field. Default 0 to expand, 1 not go deeper. */
};
+/* Optional expand field. The expansion alg will not go deeper. */
+#define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
+
+/* The node is not added implicitly as expansion to the flow pattern.
+ * If the node type does not match the flow pattern item type, the
+ * expansion alg will go deeper to its next items.
+ * In the current implementation, the list of next nodes indexes can
+ * have up to one node with this flag set and it has to be the last
+ * node index (before the list terminator).
+ */
+#define MLX5_EXPANSION_NODE_EXPLICIT (UINT64_C(1) << 1)
+
/** Object returned by mlx5_flow_expand_rss(). */
struct mlx5_flow_expand_rss {
uint32_t entries;
static void
mlx5_dbg__print_pattern(const struct rte_flow_item *item);
+static const struct mlx5_flow_expand_node *
+mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
+ unsigned int item_idx,
+ const struct mlx5_flow_expand_node graph[],
+ const struct mlx5_flow_expand_node *node);
+
static bool
mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
{
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ case RTE_FLOW_ITEM_TYPE_GTP:
return true;
default:
break;
return false;
}
+/**
+ * Network Service Header (NSH) and its next protocol values
+ * are described in RFC-8393.
+ */
+static enum rte_flow_item_type
+mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
+{
+ enum rte_flow_item_type type;
+
+ switch (proto_mask & proto_spec) {
+ case RTE_VXLAN_GPE_TYPE_IPV4:
+ type = RTE_FLOW_ITEM_TYPE_IPV4;
+ break;
+ case RTE_VXLAN_GPE_TYPE_IPV6:
+ type = RTE_VXLAN_GPE_TYPE_IPV6;
+ break;
+ case RTE_VXLAN_GPE_TYPE_ETH:
+ type = RTE_FLOW_ITEM_TYPE_ETH;
+ break;
+ default:
+ type = RTE_FLOW_ITEM_TYPE_END;
+ }
+ return type;
+}
+
+static enum rte_flow_item_type
+mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
+{
+ enum rte_flow_item_type type;
+
+ switch (proto_mask & proto_spec) {
+ case IPPROTO_UDP:
+ type = RTE_FLOW_ITEM_TYPE_UDP;
+ break;
+ case IPPROTO_TCP:
+ type = RTE_FLOW_ITEM_TYPE_TCP;
+ break;
+ case IPPROTO_IP:
+ type = RTE_FLOW_ITEM_TYPE_IPV4;
+ break;
+ case IPPROTO_IPV6:
+ type = RTE_FLOW_ITEM_TYPE_IPV6;
+ break;
+ default:
+ type = RTE_FLOW_ITEM_TYPE_END;
+ }
+ return type;
+}
+
+static enum rte_flow_item_type
+mlx5_ethertype_to_item_type(rte_be16_t type_spec,
+ rte_be16_t type_mask, bool is_tunnel)
+{
+ enum rte_flow_item_type type;
+
+ switch (rte_be_to_cpu_16(type_spec & type_mask)) {
+ case RTE_ETHER_TYPE_TEB:
+ type = is_tunnel ?
+ RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END;
+ break;
+ case RTE_ETHER_TYPE_VLAN:
+ type = !is_tunnel ?
+ RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END;
+ break;
+ case RTE_ETHER_TYPE_IPV4:
+ type = RTE_FLOW_ITEM_TYPE_IPV4;
+ break;
+ case RTE_ETHER_TYPE_IPV6:
+ type = RTE_FLOW_ITEM_TYPE_IPV6;
+ break;
+ default:
+ type = RTE_FLOW_ITEM_TYPE_END;
+ }
+ return type;
+}
+
static enum rte_flow_item_type
mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
{
- enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
- uint16_t ether_type = 0;
- uint16_t ether_type_m;
- uint8_t ip_next_proto = 0;
- uint8_t ip_next_proto_m;
+#define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \
+ do { \
+ const void *m = item->mask; \
+ const void *s = item->spec; \
+ mask = m ? \
+ ((const struct rte_flow_item_##type *)m)->fld : \
+ rte_flow_item_##type##_mask.fld; \
+ spec = ((const struct rte_flow_item_##type *)s)->fld; \
+ } while (0)
+
+ enum rte_flow_item_type ret;
+ uint16_t spec, mask;
if (item == NULL || item->spec == NULL)
- return ret;
+ return RTE_FLOW_ITEM_TYPE_VOID;
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- if (item->mask)
- ether_type_m = ((const struct rte_flow_item_eth *)
- (item->mask))->type;
- else
- ether_type_m = rte_flow_item_eth_mask.type;
- if (ether_type_m != RTE_BE16(0xFFFF))
- break;
- ether_type = ((const struct rte_flow_item_eth *)
- (item->spec))->type;
- if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
- ret = RTE_FLOW_ITEM_TYPE_VLAN;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(eth, type);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_ethertype_to_item_type(spec, mask, false);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- if (item->mask)
- ether_type_m = ((const struct rte_flow_item_vlan *)
- (item->mask))->inner_type;
- else
- ether_type_m = rte_flow_item_vlan_mask.inner_type;
- if (ether_type_m != RTE_BE16(0xFFFF))
- break;
- ether_type = ((const struct rte_flow_item_vlan *)
- (item->spec))->inner_type;
- if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
- ret = RTE_FLOW_ITEM_TYPE_VLAN;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_ethertype_to_item_type(spec, mask, false);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- if (item->mask)
- ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
- (item->mask))->hdr.next_proto_id;
- else
- ip_next_proto_m =
- rte_flow_item_ipv4_mask.hdr.next_proto_id;
- if (ip_next_proto_m != 0xFF)
- break;
- ip_next_proto = ((const struct rte_flow_item_ipv4 *)
- (item->spec))->hdr.next_proto_id;
- if (ip_next_proto == IPPROTO_UDP)
- ret = RTE_FLOW_ITEM_TYPE_UDP;
- else if (ip_next_proto == IPPROTO_TCP)
- ret = RTE_FLOW_ITEM_TYPE_TCP;
- else if (ip_next_proto == IPPROTO_IP)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (ip_next_proto == IPPROTO_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_inet_proto_to_item_type(spec, mask);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- if (item->mask)
- ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
- (item->mask))->hdr.proto;
- else
- ip_next_proto_m =
- rte_flow_item_ipv6_mask.hdr.proto;
- if (ip_next_proto_m != 0xFF)
- break;
- ip_next_proto = ((const struct rte_flow_item_ipv6 *)
- (item->spec))->hdr.proto;
- if (ip_next_proto == IPPROTO_UDP)
- ret = RTE_FLOW_ITEM_TYPE_UDP;
- else if (ip_next_proto == IPPROTO_TCP)
- ret = RTE_FLOW_ITEM_TYPE_TCP;
- else if (ip_next_proto == IPPROTO_IP)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (ip_next_proto == IPPROTO_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_inet_proto_to_item_type(spec, mask);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol);
+ ret = mlx5_ethertype_to_item_type(spec, mask, true);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ MLX5_XSET_ITEM_MASK_SPEC(gre, protocol);
+ ret = mlx5_ethertype_to_item_type(spec, mask, true);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol);
+ ret = mlx5_nsh_proto_to_item_type(spec, mask);
break;
default:
ret = RTE_FLOW_ITEM_TYPE_VOID;
break;
}
return ret;
+#undef MLX5_XSET_ITEM_MASK_SPEC
+}
+
+static const int *
+mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
+ const int *next_node)
+{
+ const struct mlx5_flow_expand_node *node = NULL;
+ const int *next = next_node;
+
+ while (next && *next) {
+ /*
+ * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT
+ * flag set, because they were not found in the flow pattern.
+ */
+ node = &graph[*next];
+ if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT))
+ break;
+ next = node->next;
+ }
+ return next;
}
#define MLX5_RSS_EXP_ELT_N 16
* @param[in] pattern
* User flow pattern.
* @param[in] types
- * RSS types to expand (see ETH_RSS_* definitions).
+ * RSS types to expand (see RTE_ETH_RSS_* definitions).
* @param[in] graph
* Input graph to expand @p pattern according to @p types.
* @param[in] graph_root_index
const int *stack[MLX5_RSS_EXP_ELT_N];
int stack_pos = 0;
struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
- unsigned int i;
+ unsigned int i, item_idx, last_expand_item_idx = 0;
size_t lsize;
size_t user_pattern_size = 0;
void *addr = NULL;
struct rte_flow_item missed_item;
int missed = 0;
int elt = 0;
- const struct rte_flow_item *last_item = NULL;
+ const struct rte_flow_item *last_expand_item = NULL;
memset(&missed_item, 0, sizeof(missed_item));
lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
buf->entries = 0;
addr = buf->entry[0].pattern;
- for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ for (item = pattern, item_idx = 0;
+ item->type != RTE_FLOW_ITEM_TYPE_END;
+ item++, item_idx++) {
if (!mlx5_flow_is_rss_expandable_item(item)) {
user_pattern_size += sizeof(*item);
continue;
}
- last_item = item;
- for (i = 0; node->next && node->next[i]; ++i) {
+ last_expand_item = item;
+ last_expand_item_idx = item_idx;
+ i = 0;
+ while (node->next && node->next[i]) {
next = &graph[node->next[i]];
if (next->type == item->type)
break;
+ if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
+ node = next;
+ i = 0;
+ } else {
+ ++i;
+ }
}
if (next)
node = next;
* Check if the last valid item has spec set, need complete pattern,
* and the pattern can be used for expansion.
*/
- missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
+ missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item);
if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
/* Item type END indicates expansion is not required. */
return lsize;
if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
next = NULL;
missed = 1;
- for (i = 0; node->next && node->next[i]; ++i) {
+ i = 0;
+ while (node->next && node->next[i]) {
next = &graph[node->next[i]];
if (next->type == missed_item.type) {
flow_items[0].type = missed_item.type;
flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
break;
}
+ if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
+ node = next;
+ i = 0;
+ } else {
+ ++i;
+ }
next = NULL;
}
}
addr = (void *)(((uintptr_t)addr) +
elt * sizeof(*item));
}
+ } else if (last_expand_item != NULL) {
+ node = mlx5_flow_expand_rss_adjust_node(pattern,
+ last_expand_item_idx, graph, node);
}
memset(flow_items, 0, sizeof(flow_items));
- next_node = node->next;
+ next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+ node->next);
stack[stack_pos] = next_node;
node = next_node ? &graph[*next_node] : NULL;
while (node) {
addr = (void *)(((uintptr_t)addr) + n);
}
/* Go deeper. */
- if (!node->optional && node->next) {
- next_node = node->next;
+ if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) &&
+ node->next) {
+ next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+ node->next);
if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
rte_errno = E2BIG;
return -rte_errno;
stack[stack_pos] = next_node;
} else if (*(next_node + 1)) {
/* Follow up with the next possibility. */
+ next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+ ++next_node);
+ } else if (!stack_pos) {
+ /*
+ * Completing the traverse over the different paths.
+ * The next_node is advanced to the terminator.
+ */
++next_node;
} else {
/* Move to the next path. */
- if (stack_pos)
+ while (stack_pos) {
next_node = stack[--stack_pos];
- next_node++;
+ next_node++;
+ if (*next_node)
+ break;
+ }
+ next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+ next_node);
stack[stack_pos] = next_node;
}
- node = *next_node ? &graph[*next_node] : NULL;
+ node = next_node && *next_node ? &graph[*next_node] : NULL;
};
return lsize;
}
enum mlx5_expansion {
MLX5_EXPANSION_ROOT,
MLX5_EXPANSION_ROOT_OUTER,
- MLX5_EXPANSION_ROOT_ETH_VLAN,
- MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
MLX5_EXPANSION_OUTER_ETH,
- MLX5_EXPANSION_OUTER_ETH_VLAN,
MLX5_EXPANSION_OUTER_VLAN,
MLX5_EXPANSION_OUTER_IPV4,
MLX5_EXPANSION_OUTER_IPV4_UDP,
MLX5_EXPANSION_OUTER_IPV6_UDP,
MLX5_EXPANSION_OUTER_IPV6_TCP,
MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_STD_VXLAN,
+ MLX5_EXPANSION_L3_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
MLX5_EXPANSION_GRE,
MLX5_EXPANSION_NVGRE,
MLX5_EXPANSION_GRE_KEY,
MLX5_EXPANSION_MPLS,
MLX5_EXPANSION_ETH,
- MLX5_EXPANSION_ETH_VLAN,
MLX5_EXPANSION_VLAN,
MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV4_UDP,
MLX5_EXPANSION_IPV6_UDP,
MLX5_EXPANSION_IPV6_TCP,
MLX5_EXPANSION_IPV6_FRAG_EXT,
+ MLX5_EXPANSION_GTP,
+ MLX5_EXPANSION_GENEVE,
};
/** Supported expansion of items. */
MLX5_EXPANSION_OUTER_IPV6),
.type = RTE_FLOW_ITEM_TYPE_END,
},
- [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
- .type = RTE_FLOW_ITEM_TYPE_END,
- },
- [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT
- (MLX5_EXPANSION_OUTER_ETH_VLAN),
- .type = RTE_FLOW_ITEM_TYPE_END,
- },
[MLX5_EXPANSION_OUTER_ETH] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
- MLX5_EXPANSION_OUTER_IPV6),
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- .rss_types = 0,
- },
- [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
.type = RTE_FLOW_ITEM_TYPE_ETH,
.rss_types = 0,
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
MLX5_EXPANSION_OUTER_IPV6),
.type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
},
[MLX5_EXPANSION_OUTER_IPV4] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT
MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_IPV4,
- .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER,
+ .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
},
[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
- MLX5_EXPANSION_MPLS),
+ MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_GENEVE,
+ MLX5_EXPANSION_GTP),
.type = RTE_FLOW_ITEM_TYPE_UDP,
- .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
},
[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
.type = RTE_FLOW_ITEM_TYPE_TCP,
- .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
},
[MLX5_EXPANSION_OUTER_IPV6] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT
MLX5_EXPANSION_GRE,
MLX5_EXPANSION_NVGRE),
.type = RTE_FLOW_ITEM_TYPE_IPV6,
- .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER,
+ .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
},
[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
- MLX5_EXPANSION_MPLS),
+ MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_GENEVE,
+ MLX5_EXPANSION_GTP),
.type = RTE_FLOW_ITEM_TYPE_UDP,
- .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
},
[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
.type = RTE_FLOW_ITEM_TYPE_TCP,
- .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
},
[MLX5_EXPANSION_VXLAN] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_VXLAN,
},
+ [MLX5_EXPANSION_STD_VXLAN] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ },
+ [MLX5_EXPANSION_L3_VXLAN] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ },
[MLX5_EXPANSION_VXLAN_GPE] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
MLX5_EXPANSION_IPV4,
.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
},
[MLX5_EXPANSION_GRE] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_GRE_KEY,
MLX5_EXPANSION_MPLS),
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_MPLS),
.type = RTE_FLOW_ITEM_TYPE_GRE_KEY,
- .optional = 1,
+ .node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
},
[MLX5_EXPANSION_NVGRE] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_ETH),
.type = RTE_FLOW_ITEM_TYPE_MPLS,
+ .node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
},
[MLX5_EXPANSION_ETH] = {
- .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
- MLX5_EXPANSION_IPV6),
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- },
- [MLX5_EXPANSION_ETH_VLAN] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
.type = RTE_FLOW_ITEM_TYPE_ETH,
},
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
},
[MLX5_EXPANSION_IPV4] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
MLX5_EXPANSION_IPV4_TCP),
.type = RTE_FLOW_ITEM_TYPE_IPV4,
- .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER,
+ .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
},
[MLX5_EXPANSION_IPV4_UDP] = {
.type = RTE_FLOW_ITEM_TYPE_UDP,
- .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
},
[MLX5_EXPANSION_IPV4_TCP] = {
.type = RTE_FLOW_ITEM_TYPE_TCP,
- .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
},
[MLX5_EXPANSION_IPV6] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
MLX5_EXPANSION_IPV6_TCP,
MLX5_EXPANSION_IPV6_FRAG_EXT),
.type = RTE_FLOW_ITEM_TYPE_IPV6,
- .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER,
+ .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
},
[MLX5_EXPANSION_IPV6_UDP] = {
.type = RTE_FLOW_ITEM_TYPE_UDP,
- .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
},
[MLX5_EXPANSION_IPV6_TCP] = {
.type = RTE_FLOW_ITEM_TYPE_TCP,
- .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
},
[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
},
+ [MLX5_EXPANSION_GTP] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_GTP,
+ },
+ [MLX5_EXPANSION_GENEVE] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_GENEVE,
+ },
};
static struct rte_flow_action_handle *
struct rte_mbuf *m,
struct rte_flow_restore_info *info,
struct rte_flow_error *err);
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *err);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
.tunnel_item_release = mlx5_flow_tunnel_item_release,
.get_restore_info = mlx5_flow_tunnel_get_restore_info,
+ .flex_item_create = mlx5_flow_flex_item_create,
+ .flex_item_release = mlx5_flow_flex_item_release,
+ .info_get = mlx5_flow_info_get,
+ .configure = mlx5_flow_port_configure,
};
/* Tunnel information. */
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
enum modify_reg start_reg;
bool skip_mtr_reg = false;
case MLX5_MTR_COLOR:
case MLX5_ASO_FLOW_HIT:
case MLX5_ASO_CONNTRACK:
+ case MLX5_SAMPLE_ID:
/* All features use the same REG_C. */
MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
return priv->mtr_color_reg;
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "invalid tag id");
- if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
+ if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "unsupported tag id");
* If the available index REG_C_y >= REG_C_x, skip the
* color register.
*/
- if (skip_mtr_reg && config->flow_mreg_c
+ if (skip_mtr_reg && priv->sh->flow_mreg_c
[id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
if (id >= (uint32_t)(REG_C_7 - start_reg))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "invalid tag id");
- if (config->flow_mreg_c
+ if (priv->sh->flow_mreg_c
[id + 1 + start_reg - REG_C_0] != REG_NON)
- return config->flow_mreg_c
+ return priv->sh->flow_mreg_c
[id + 1 + start_reg - REG_C_0];
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "unsupported tag id");
}
- return config->flow_mreg_c[id + start_reg - REG_C_0];
+ return priv->sh->flow_mreg_c[id + start_reg - REG_C_0];
}
MLX5_ASSERT(false);
return rte_flow_error_set(error, EINVAL,
mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
/*
* Having available reg_c can be regarded inclusively as supporting
* - reg_c's are preserved across different domain (FDB and NIC) on
* packet loopback by flow lookup miss.
*/
- return config->flow_mreg_c[2] != REG_NON;
+ return priv->sh->flow_mreg_c[2] != REG_NON;
}
/**
struct mlx5_priv *priv = dev->data->dev_private;
if (!attr->group && !attr->transfer)
- return priv->config.flow_prio - 2;
+ return priv->sh->flow_max_priority - 2;
return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
}
* Pointer to device flow rule attributes.
* @param[in] subpriority
* The priority based on the items.
+ * @param[in] external
+ * Flow is user flow.
* @return
* The matcher priority of the flow.
*/
uint16_t
mlx5_get_matcher_priority(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- uint32_t subpriority)
+ uint32_t subpriority, bool external)
{
uint16_t priority = (uint16_t)attr->priority;
struct mlx5_priv *priv = dev->data->dev_private;
if (!attr->group && !attr->transfer) {
if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
- priority = priv->config.flow_prio - 1;
+ priority = priv->sh->flow_max_priority - 1;
return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
+ } else if (!external && attr->transfer && attr->group == 0 &&
+ attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
+ return (priv->sh->flow_max_priority - 1) * 3;
}
if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
* @param[in] tunnel
* 1 when the hash field is for a tunnel item.
* @param[in] layer_types
- * ETH_RSS_* types.
+ * RTE_ETH_RSS_* types.
* @param[in] hash_fields
* Item hash fields.
*
}
/**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device
* flow.
*
* @param[in] dev
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
return;
for (i = 0; i != ind_tbl->queues_n; ++i) {
int idx = ind_tbl->queues[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
+ MLX5_ASSERT(rxq_ctrl != NULL);
+ if (rxq_ctrl == NULL)
+ continue;
/*
* To support metadata register copy on Tx loopback,
* this must be always enabled (metadata may arive
* from other port - not from local flows only.
*/
- if (priv->config.dv_flow_en &&
- priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
- mlx5_flow_ext_mreg_supported(dev)) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n = 1;
- } else if (mark) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n++;
- }
if (tunnel) {
unsigned int j;
}
}
+static void
+flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (priv->mark_enabled)
+ return;
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+ rxq_ctrl->rxq.mark = 1;
+ }
+ priv->mark_enabled = 1;
+}
+
/**
* Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
*
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ MLX5_ASSERT(wks);
+ if (wks->mark)
+ flow_rxq_mark_flag_set(dev);
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
flow_drv_rxq_flags_set(dev, dev_handle);
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
MLX5_ASSERT(dev->data->dev_started);
for (i = 0; i != ind_tbl->queues_n; ++i) {
int idx = ind_tbl->queues[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
-
- if (priv->config.dv_flow_en &&
- priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
- mlx5_flow_ext_mreg_supported(dev)) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n = 1;
- } else if (mark) {
- rxq_ctrl->flow_mark_n--;
- rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
- }
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
+
+ MLX5_ASSERT(rxq_ctrl != NULL);
+ if (rxq_ctrl == NULL)
+ continue;
if (tunnel) {
unsigned int j;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
unsigned int j;
- if (!(*priv->rxqs)[i])
+ if (rxq == NULL || rxq->ctrl == NULL)
continue;
- rxq_ctrl = container_of((*priv->rxqs)[i],
- struct mlx5_rxq_ctrl, rxq);
- rxq_ctrl->flow_mark_n = 0;
- rxq_ctrl->rxq.mark = 0;
+ rxq->ctrl->rxq.mark = 0;
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
- rxq_ctrl->flow_tunnels_n[j] = 0;
- rxq_ctrl->rxq.tunnel = 0;
+ rxq->ctrl->flow_tunnels_n[j] = 0;
+ rxq->ctrl->rxq.tunnel = 0;
}
+ priv->mark_enabled = 0;
}
/**
mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *data;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
- if (!(*priv->rxqs)[i])
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_data *data;
+
+ if (rxq == NULL || rxq->ctrl == NULL)
continue;
- data = (*priv->rxqs)[i];
+ data = &rxq->ctrl->rxq;
if (!rte_flow_dynf_metadata_avail()) {
data->dynf_meta = 0;
data->flow_meta_mask = 0;
data->dynf_meta = 1;
data->flow_meta_mask = rte_flow_dynf_metadata_mask;
data->flow_meta_offset = rte_flow_dynf_metadata_offs;
- data->flow_meta_port_mask = (uint32_t)~0;
- if (priv->config.dv_xmeta_en == MLX5_XMETA_MODE_META16)
- data->flow_meta_port_mask >>= 16;
+ data->flow_meta_port_mask = priv->sh->dv_meta_mask;
}
}
}
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&queue->index,
"queue index out of range");
- if (!(*priv->rxqs)[queue->index])
+ if (mlx5_rxq_get(dev, queue->index) == NULL)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&queue->index,
return 0;
}
+/**
+ * Validate queue numbers for device RSS.
+ *
+ * @param[in] dev
+ * Configured device.
+ * @param[in] queues
+ * Array of queue numbers.
+ * @param[in] queues_n
+ * Size of the @p queues array.
+ * @param[out] error
+ * On error, filled with a textual error description.
+ * @param[out] queue
+ * On error, filled with an offending queue index in @p queues array.
+ *
+ * @return
+ * 0 on success, a negative errno code on error.
+ */
+static int
+mlx5_validate_rss_queues(struct rte_eth_dev *dev,
+ const uint16_t *queues, uint32_t queues_n,
+ const char **error, uint32_t *queue_idx)
+{
+ const struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
+ uint32_t i;
+
+ for (i = 0; i != queues_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev,
+ queues[i]);
+
+ if (queues[i] >= priv->rxqs_n) {
+ *error = "queue index out of range";
+ *queue_idx = i;
+ return -EINVAL;
+ }
+ if (rxq_ctrl == NULL) {
+ *error = "queue is not configured";
+ *queue_idx = i;
+ return -EINVAL;
+ }
+ if (i == 0)
+ rxq_type = rxq_ctrl->type;
+ if (rxq_type != rxq_ctrl->type) {
+ *error = "combining hairpin and regular RSS queues is not supported";
+ *queue_idx = i;
+ return -ENOTSUP;
+ }
+ }
+ return 0;
+}
+
/*
* Validate the rss action.
*
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = action->conf;
- enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
- unsigned int i;
+ int ret;
+ const char *message;
+ uint32_t queue_idx;
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&rss->key_len,
"RSS hash key too large");
- if (rss->queue_num > priv->config.ind_table_max_size)
+ if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&rss->queue_num,
&rss->types,
"some RSS protocols are not"
" supported");
- if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
- !(rss->types & ETH_RSS_IP))
+ if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+ !(rss->types & RTE_ETH_RSS_IP))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
"L3 partial RSS requested but L3 RSS"
" type not specified");
- if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
- !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+ if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+ !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
"L4 partial RSS requested but L4 RSS"
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "No queues configured");
- for (i = 0; i != rss->queue_num; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl;
-
- if (rss->queue[i] >= priv->rxqs_n)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue[i], "queue index out of range");
- if (!(*priv->rxqs)[rss->queue[i]])
- return rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue[i], "queue is not configured");
- rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
- struct mlx5_rxq_ctrl, rxq);
- if (i == 0)
- rxq_type = rxq_ctrl->type;
- if (rxq_type != rxq_ctrl->type)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue[i],
- "combining hairpin and regular RSS queues is not supported");
+ ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num,
+ &message, &queue_idx);
+ if (ret != 0) {
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[queue_idx], message);
}
return 0;
}
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t priority_max = priv->config.flow_prio - 1;
+ uint32_t priority_max = priv->sh->flow_max_priority - 1;
if (attributes->group)
return rte_flow_error_set(error, ENOTSUP,
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
"egress is not supported");
- if (attributes->transfer && !priv->config.dv_esw_en)
+ if (attributes->transfer && !priv->sh->config.dv_esw_en)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
NULL, "transfer is not supported");
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L2 layer should not follow VLAN");
+ if (item_flags & MLX5_FLOW_LAYER_GTP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L2 layer should not follow GTP");
if (!mask)
mask = &rte_flow_item_eth_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
*
* @param[in] dev
* Pointer to the Ethernet device structure.
+ * @param[in] udp_dport
+ * UDP destination port
* @param[in] item
* Item specification.
* @param[in] item_flags
*/
int
mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
+ uint16_t udp_dport,
const struct rte_flow_item *item,
uint64_t item_flags,
const struct rte_flow_attr *attr,
"no outer UDP layer found");
if (!mask)
mask = &rte_flow_item_vxlan_mask;
- /* FDB domain & NIC domain non-zero group */
- if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
- valid_mask = &nic_mask;
- /* Group zero in NIC domain */
- if (!attr->group && !attr->transfer && priv->sh->tunnel_header_0_1)
- valid_mask = &nic_mask;
+
+ if (priv->sh->steering_format_version !=
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
+ !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) {
+ /* FDB domain & NIC domain non-zero group */
+ if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
+ valid_mask = &nic_mask;
+ /* Group zero in NIC domain */
+ if (!attr->group && !attr->transfer &&
+ priv->sh->tunnel_header_0_1)
+ valid_mask = &nic_mask;
+ }
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)valid_mask,
uint8_t vni[4];
} id = { .vlan_id = 0, };
- if (!priv->config.l3_vxlan_en)
+ if (!priv->sh->config.l3_vxlan_en)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 VXLAN is not enabled by device"
const struct rte_flow_item_geneve *mask = item->mask;
int ret;
uint16_t gbhdr;
- uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+ uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ?
MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
const struct rte_flow_item_geneve nic_mask = {
.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
.protocol = RTE_BE16(UINT16_MAX),
};
- if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
+ if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 Geneve is not enabled by device"
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
- struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
+ struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
uint8_t data_max_supported =
hca_attr->max_geneve_tlv_option_data_len * 4;
- struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_item_geneve *geneve_spec;
const struct rte_flow_item_geneve *geneve_mask;
const struct rte_flow_item_geneve_opt *spec = item->spec;
if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Geneve TLV opt length exceeeds the limit (31)");
+ "Geneve TLV opt length exceeds the limit (31)");
/* Check if class type and length masks are full. */
if (full_mask.option_class != mask->option_class ||
full_mask.option_type != mask->option_type ||
"Geneve TLV opt class/type/length masks must be full");
/* Check if length is supported */
if ((uint32_t)spec->option_len >
- config->hca_attr.max_geneve_tlv_option_data_len)
+ hca_attr->max_geneve_tlv_option_data_len)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
"Geneve TLV opt length not supported");
- if (config->hca_attr.max_geneve_tlv_options > 1)
+ if (hca_attr->max_geneve_tlv_options > 1)
DRV_LOG(DEBUG,
"max_geneve_tlv_options supports more than 1 option");
/* Check GENEVE item preceding. */
"Data mask is of unsupported size");
}
/* Check GENEVE option is supported in NIC. */
- if (!config->hca_attr.geneve_tlv_opt)
+ if (!hca_attr->geneve_tlv_opt)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
"Geneve TLV opt not supported");
struct mlx5_priv *priv = dev->data->dev_private;
int ret;
- if (!priv->config.mpls_en)
+ if (!priv->sh->dev_cap.mpls_en)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"MPLS not supported or"
if (type != MLX5_FLOW_TYPE_MAX)
return type;
+ /*
+ * Currently when dv_flow_en == 2, only HW steering engine is
+ * supported. New engines can also be chosen here if ready.
+ */
+ if (priv->sh->config.dv_flow_en == 2)
+ return MLX5_FLOW_TYPE_HW;
/* If no OS specific type - continue with DV/VERBS selection */
- if (attr->transfer && priv->config.dv_esw_en)
+ if (attr->transfer && priv->sh->config.dv_esw_en)
type = MLX5_FLOW_TYPE_DV;
if (!attr->transfer)
- type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
- MLX5_FLOW_TYPE_VERBS;
+ type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
+ MLX5_FLOW_TYPE_VERBS;
return type;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = NULL;
+ struct mlx5_meter_policy_action_container *acg;
+ struct mlx5_meter_policy_action_container *acy;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
if (!policy)
return NULL;
}
- if (policy->is_rss)
- rss =
- policy->act_cnt[RTE_COLOR_GREEN].rss->conf;
+ if (policy->is_rss) {
+ acg =
+ &policy->act_cnt[RTE_COLOR_GREEN];
+ acy =
+ &policy->act_cnt[RTE_COLOR_YELLOW];
+ if (acg->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS)
+ rss = acg->rss->conf;
+ else if (acy->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS)
+ rss = acy->rss->conf;
+ }
}
break;
}
uint16_t offset = (age_idx >> 16) & UINT16_MAX;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
- struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
+ struct mlx5_aso_age_pool *pool;
+ rte_rwlock_read_lock(&mng->resize_rwl);
+ pool = mng->pools[pool_idx];
+ rte_rwlock_read_unlock(&mng->resize_rwl);
return &pool->actions[offset - 1];
}
}
static unsigned int
-find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
+find_graph_root(uint32_t rss_level)
{
- const struct rte_flow_item *item;
- unsigned int has_vlan = 0;
-
- for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- has_vlan = 1;
- break;
- }
- }
- if (has_vlan)
- return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
- MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
return rss_level < 2 ? MLX5_EXPANSION_ROOT :
MLX5_EXPANSION_ROOT_OUTER;
}
* subflow.
*
* @param[in] dev_flow
- * Pointer the created preifx subflow.
+ * Pointer the created prefix subflow.
*
* @return
* The layers get from prefix subflow.
return true;
case RTE_FLOW_ACTION_TYPE_FLAG:
case RTE_FLOW_ACTION_TYPE_MARK:
- if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+ if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
return true;
else
return false;
[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
};
- /* Fill the register fileds in the flow. */
+ /* Fill the register fields in the flow. */
ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return NULL;
/*
* The copy Flows are not included in any list. There
* ones are referenced from other Flows and can not
- * be applied, removed, deleted in ardbitrary order
+ * be applied, removed, deleted in arbitrary order
* by list traversing.
*/
mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
uint32_t mark_id;
/* Check whether extensive metadata feature is engaged. */
- if (!priv->config.dv_flow_en ||
- priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+ if (!priv->sh->config.dv_flow_en ||
+ priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
!mlx5_flow_ext_mreg_supported(dev) ||
!priv->sh->dv_regc0_mask)
return 0;
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
struct mlx5_flow_mreg_copy_resource *mcp_res;
const struct rte_flow_action_mark *mark;
struct rte_flow_error *error)
{
struct mlx5_flow *dev_flow;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
flow_split_info->flow_idx, error);
/*
* If dev_flow is as one of the suffix flow, some actions in suffix
* flow may need some user defined item layer flags, and pass the
- * Metadate rxq mark flag to suffix flow as well.
+ * Metadata rxq mark flag to suffix flow as well.
*/
if (flow_split_info->prefix_layers)
dev_flow->handle->layers = flow_split_info->prefix_layers;
- if (flow_split_info->prefix_mark)
- dev_flow->handle->mark = 1;
+ if (flow_split_info->prefix_mark) {
+ MLX5_ASSERT(wks);
+ wks->mark = 1;
+ }
if (sub_flow)
*sub_flow = dev_flow;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0};
uint32_t i;
- /**
+ /*
* This is a tmp dev_flow,
* no need to register any matcher for it in translate.
*/
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
struct mlx5_flow dev_flow = {0};
struct mlx5_flow_handle dev_handle = { {0} };
+ uint8_t fate = final_policy->act_cnt[i].fate_action;
- if (final_policy->is_rss) {
- const void *rss_act =
+ if (fate == MLX5_FLOW_FATE_SHARED_RSS) {
+ const struct rte_flow_action_rss *rss_act =
final_policy->act_cnt[i].rss->conf;
struct rte_flow_action rss_actions[2] = {
[0] = {
.type = RTE_FLOW_ACTION_TYPE_RSS,
- .conf = rss_act
+ .conf = rss_act,
},
[1] = {
.type = RTE_FLOW_ACTION_TYPE_END,
- .conf = NULL
+ .conf = NULL,
}
};
rss_desc_v[i].hash_fields ?
rss_desc_v[i].queue_num : 1;
rss_desc_v[i].tunnel =
- !!(dev_flow.handle->layers &
- MLX5_FLOW_LAYER_TUNNEL);
- } else {
+ !!(dev_flow.handle->layers &
+ MLX5_FLOW_LAYER_TUNNEL);
+ /* Use the RSS queues in the containers. */
+ rss_desc_v[i].queue =
+ (uint16_t *)(uintptr_t)rss_act->queue;
+ rss_desc[i] = &rss_desc_v[i];
+ } else if (fate == MLX5_FLOW_FATE_QUEUE) {
/* This is queue action. */
rss_desc_v[i] = wks->rss_desc;
rss_desc_v[i].key_len = 0;
rss_desc_v[i].queue =
&final_policy->act_cnt[i].queue;
rss_desc_v[i].queue_num = 1;
+ rss_desc[i] = &rss_desc_v[i];
+ } else {
+ rss_desc[i] = NULL;
}
- rss_desc[i] = &rss_desc_v[i];
}
sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
flow, policy, rss_desc);
} else {
enum mlx5_meter_domain mtr_domain =
attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
- attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
- MLX5_MTR_DOMAIN_INGRESS;
+ (attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+ MLX5_MTR_DOMAIN_INGRESS);
sub_policy = policy->sub_policys[mtr_domain][0];
}
- if (!sub_policy) {
+ if (!sub_policy)
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to get meter sub-policy.");
- goto exit;
- }
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to get meter sub-policy.");
exit:
return sub_policy;
}
uint32_t tag_id = 0;
struct rte_flow_item *vlan_item_dst = NULL;
const struct rte_flow_item *vlan_item_src = NULL;
+ const struct rte_flow_item *orig_items = items;
struct rte_flow_action *hw_mtr_action;
struct rte_flow_action *action_pre_head = NULL;
int32_t flow_src_port = priv->representor_id;
if (!fm->def_policy) {
sub_policy = get_meter_sub_policy(dev, flow, wks,
- attr, items, error);
+ attr, orig_items,
+ error);
if (!sub_policy)
return -rte_errno;
} else {
enum mlx5_meter_domain mtr_domain =
attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
- attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
- MLX5_MTR_DOMAIN_INGRESS;
+ (attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+ MLX5_MTR_DOMAIN_INGRESS);
sub_policy =
&priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy;
actions_pre++;
if (!tag_action)
return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "No tag action space.");
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "No tag action space.");
if (!mtr_flow_id) {
tag_action->type = RTE_FLOW_ACTION_TYPE_VOID;
goto exit;
* Pointer to the Q/RSS action.
* @param[in] actions_n
* Number of original actions.
+ * @param[in] mtr_sfx
+ * Check if it is in meter suffix table.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
struct rte_flow_action *split_actions,
const struct rte_flow_action *actions,
const struct rte_flow_action *qrss,
- int actions_n, struct rte_flow_error *error)
+ int actions_n, int mtr_sfx,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rte_flow_action_set_tag *set_tag;
* - Add jump to mreg CP_TBL.
* As a result, there will be one more action.
*/
- ++actions_n;
memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
+ /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */
+ ++actions_n;
set_tag = (void *)(split_actions + actions_n);
/*
- * If tag action is not set to void(it means we are not the meter
- * suffix flow), add the tag action. Since meter suffix flow already
- * has the tag added.
+ * If we are not the meter suffix flow, add the tag action.
+ * Since meter suffix flow already has the tag added.
*/
- if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
+ if (!mtr_sfx) {
/*
* Allocate the new subflow ID. This one is unique within
* device and not shared with representors. Otherwise,
MLX5_RTE_FLOW_ACTION_TYPE_TAG,
.conf = set_tag,
};
+ } else {
+ /*
+ * If we are the suffix flow of meter, tag already exist.
+ * Set the QUEUE/RSS action to void.
+ */
+ split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID;
}
/* JUMP action to jump to mreg copy table (CP_TBL). */
jump = (void *)(set_tag + 1);
* @param[out] error
* Perform verbose error reporting if not NULL.
* @param[in] encap_idx
- * The encap action inndex.
+ * The encap action index.
*
* @return
* 0 on success, negative value otherwise
int *modify_after_mirror)
{
const struct rte_flow_action_sample *sample;
+ const struct rte_flow_action_raw_decap *decap;
int actions_n = 0;
uint32_t ratio = 0;
int sub_type = 0;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
- case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
case RTE_FLOW_ACTION_TYPE_METER:
if (fdb_mirror)
*modify_after_mirror = 1;
break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap = actions->conf;
+ while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
+ actions_n++;
+ if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ const struct rte_flow_action_raw_encap *encap =
+ actions->conf;
+ if (decap->size <=
+ MLX5_ENCAPSULATION_DECISION_SIZE &&
+ encap->size >
+ MLX5_ENCAPSULATION_DECISION_SIZE)
+ /* L3 encap. */
+ break;
+ }
+ if (fdb_mirror)
+ *modify_after_mirror = 1;
+ break;
default:
break;
}
/* Prepare the prefix tag action. */
append_index++;
set_tag = (void *)(actions_pre + actions_n + append_index);
- ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
+ ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error);
if (ret < 0)
return ret;
mlx5_ipool_malloc(priv->sh->ipool
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
const struct rte_flow_action *qrss = NULL;
struct rte_flow_action *ext_actions = NULL;
struct mlx5_flow *dev_flow = NULL;
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no memory to split "
"metadata flow");
- /*
- * If we are the suffix flow of meter, tag already exist.
- * Set the tag action to void.
- */
- if (mtr_sfx)
- ext_actions[qrss - actions].type =
- RTE_FLOW_ACTION_TYPE_VOID;
- else
- ext_actions[qrss - actions].type =
- (enum rte_flow_action_type)
- MLX5_RTE_FLOW_ACTION_TYPE_TAG;
/*
* Create the new actions list with removed Q/RSS action
* and appended set tag and jump to register copy table
* in advance, because it is needed for set tag action.
*/
qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
- qrss, actions_n, error);
+ qrss, actions_n,
+ mtr_sfx, error);
if (!mtr_sfx && !qrss_id) {
ret = -rte_errno;
goto exit;
/* Add suffix subflow to execute Q/RSS. */
flow_split_info->prefix_layers = layers;
flow_split_info->prefix_mark = 0;
+ flow_split_info->table_id = 0;
ret = flow_create_split_inner(dev, flow, &dev_flow,
&q_attr, mtr_sfx ? items :
q_items, q_actions,
goto exit;
}
/* Add the prefix subflow. */
- flow_split_info->prefix_mark = 0;
skip_scale_restore = flow_split_info->skip_scale;
flow_split_info->skip_scale |=
1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
MLX5_FLOW_TABLE_LEVEL_METER;
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
- flow_split_info->prefix_mark = dev_flow->handle->mark;
+ flow_split_info->prefix_mark |= wks->mark;
flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
}
/* Add the prefix subflow. */
struct mlx5_flow_dv_sample_resource *sample_res;
struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
struct mlx5_flow_tbl_resource *sfx_tbl;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
#endif
size_t act_size;
size_t item_size;
* When reg_c_preserve is set, metadata registers Cx preserve
* their value even through packet duplication.
*/
- add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
+ add_tag = (!fdb_tx ||
+ priv->sh->cdev->config.hca_attr.reg_c_preserve);
if (add_tag)
sfx_items = (struct rte_flow_item *)((char *)sfx_actions
+ act_size);
}
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
- flow_split_info->prefix_mark = dev_flow->handle->mark;
+ MLX5_ASSERT(wks);
+ flow_split_info->prefix_mark |= wks->mark;
/* Suffix group level already be scaled with factor, set
* MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
* again in translation.
int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
union {
struct mlx5_flow_expand_rss buf;
- uint8_t buffer[2048];
+ uint8_t buffer[4096];
} expand_buffer;
union {
struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
* mlx5_flow_hashfields_adjust() in advance.
*/
rss_desc->level = rss->level;
- /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
- rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+ /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+ rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
}
flow->dev_handles = 0;
if (rss && rss->types) {
unsigned int graph_root;
- graph_root = find_graph_root(items, rss->level);
+ graph_root = find_graph_root(rss->level);
ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
items, rss->types,
mlx5_support_expansion, graph_root);
}
/**
- * Validate a flow supported by the NIC.
+ * Create a dedicated flow rule on e-switch table 1, matches ESW manager
+ * and sq number, directs all packets to peer vport.
*
- * @see rte_flow_validate()
- * @see rte_flow_ops
- */
-int
-mlx5_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action original_actions[],
- struct rte_flow_error *error)
-{
- int hairpin_flow;
- struct mlx5_translated_action_handle
- indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
- int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
- const struct rte_flow_action *actions;
- struct rte_flow_action *translated_actions = NULL;
- int ret = flow_action_handles_translate(dev, original_actions,
- indir_actions,
- &indir_actions_n,
- &translated_actions, error);
-
- if (ret)
- return ret;
- actions = translated_actions ? translated_actions : original_actions;
- hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
- ret = flow_drv_validate(dev, attr, items, actions,
- true, hairpin_flow, error);
- rte_free(translated_actions);
- return ret;
-}
-
-/**
- * Create a flow.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param txq
+ * Txq index.
*
- * @see rte_flow_create()
- * @see rte_flow_ops
+ * @return
+ * Flow ID on success, 0 otherwise and rte_errno is set.
*/
-struct rte_flow *
-mlx5_flow_create(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+uint32_t
+mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)
{
+ struct rte_flow_attr attr = {
+ .group = 0,
+ .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
+ .ingress = 1,
+ .egress = 0,
+ .transfer = 1,
+ };
+ struct rte_flow_item_port_id port_spec = {
+ .id = MLX5_PORT_ESW_MGR,
+ };
+ struct mlx5_rte_flow_item_tx_queue txq_spec = {
+ .queue = txq,
+ };
+ struct rte_flow_item pattern[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
+ .spec = &port_spec,
+ },
+ {
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+ .spec = &txq_spec,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action_jump jump = {
+ .group = 1,
+ };
+ struct rte_flow_action_port_id port = {
+ .id = dev->data->port_id,
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow_error error;
+
+ /*
+ * Creates group 0, highest priority jump flow.
+ * Matches txq to bypass kernel packets.
+ */
+ if (flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions,
+ false, &error) == 0)
+ return 0;
+ /* Create group 1, lowest priority redirect flow for txq. */
+ attr.group = 1;
+ actions[0].conf = &port;
+ actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
+ return flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern,
+ actions, false, &error);
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action original_actions[],
+ struct rte_flow_error *error)
+{
+ int hairpin_flow;
+ struct mlx5_translated_action_handle
+ indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
+ int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
+ const struct rte_flow_action *actions;
+ struct rte_flow_action *translated_actions = NULL;
+ int ret = flow_action_handles_translate(dev, original_actions,
+ indir_actions,
+ &indir_actions_n,
+ &translated_actions, error);
+
+ if (ret)
+ return ret;
+ actions = translated_actions ? translated_actions : original_actions;
+ hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
+ ret = flow_drv_validate(dev, attr, items, actions,
+ true, hairpin_flow, error);
+ rte_free(translated_actions);
+ return ret;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx5_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow non-Q creation not supported");
+ return NULL;
+ }
/*
* If the device is not started yet, it is not allowed to created a
* flow from application. PMD default flows and traffic control flows
* @param type
* Flow type to be flushed.
* @param active
- * If flushing is called avtively.
+ * If flushing is called actively.
*/
void
mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
return data;
err:
- if (data->rss_desc.queue)
- free(data->rss_desc.queue);
+ free(data->rss_desc.queue);
free(data);
return NULL;
}
if (!priv->reta_idx_n || !priv->rxqs_n) {
return 0;
}
- if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
action_rss.types = 0;
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
struct rte_flow *flow,
struct rte_flow_error *error __rte_unused)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow non-Q destruction not supported");
flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
(uintptr_t)(void *)flow);
return 0;
struct rte_flow_error *error)
{
int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ if (priv->sh->config.dv_flow_en == 2)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow non-Q query not supported");
ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
error);
if (ret < 0)
struct rte_flow_attr *attr,
bool *is_rss,
uint8_t *domain_bitmap,
- bool *is_def_policy,
+ uint8_t *policy_mode,
struct rte_mtr_error *error)
{
const struct mlx5_flow_driver_ops *fops;
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->validate_mtr_acts(dev, actions, attr,
- is_rss, domain_bitmap, is_def_policy, error);
+ return fops->validate_mtr_acts(dev, actions, attr, is_rss,
+ domain_bitmap, policy_mode, error);
}
/**
*/
int
mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
- bool clear, uint64_t *pkts, uint64_t *bytes)
+ bool clear, uint64_t *pkts, uint64_t *bytes, void **action)
{
const struct mlx5_flow_driver_ops *fops;
struct rte_flow_attr attr = { .transfer = 0 };
if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->counter_query(dev, cnt, clear, pkts, bytes);
+ return fops->counter_query(dev, cnt, clear, pkts,
+ bytes, action);
}
DRV_LOG(ERR,
"port %u counter query is not supported.",
return -ENOTSUP;
}
+/**
+ * Get information about HWS pre-configurable resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[out] port_info
+ * Pointer to port information.
+ * @param[out] queue_info
+ * Pointer to queue information.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "info get with incorrect steering mode");
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->info_get(dev, port_info, queue_info, error);
+}
+
+/**
+ * Configure port HWS resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] port_attr
+ * Port configuration attributes.
+ * @param[in] nb_queue
+ * Number of queue.
+ * @param[in] queue_attr
+ * Array that holds attributes for each flow queue.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port configure with incorrect steering mode");
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
+}
+
/**
* Allocate a new memory for the counter values wrapped by all the needed
* management.
static int
mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_devx_mkey_attr mkey_attr;
struct mlx5_counter_stats_mem_mng *mem_mng;
volatile struct flow_counter_stats *raw_data;
int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
sizeof(struct mlx5_counter_stats_mem_mng);
size_t pgsize = rte_mem_page_size();
uint8_t *mem;
+ int ret;
int i;
if (pgsize == (size_t)-1) {
}
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!mem_mng->umem) {
- rte_errno = errno;
- mlx5_free(mem);
- return -rte_errno;
- }
- memset(&mkey_attr, 0, sizeof(mkey_attr));
- mkey_attr.addr = (uintptr_t)mem;
- mkey_attr.size = size;
- mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
- mkey_attr.pd = sh->pdn;
- mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
- mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
- mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
- if (!mem_mng->dm) {
- mlx5_os_umem_dereg(mem_mng->umem);
+ ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd,
+ sh->cdev->pdn, mem, size,
+ &mem_mng->wm);
+ if (ret) {
rte_errno = errno;
mlx5_free(mem);
return -rte_errno;
ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
MLX5_COUNTERS_PER_POOL,
NULL, NULL,
- pool->raw_hw->mem_mng->dm->id,
+ pool->raw_hw->mem_mng->wm.lkey,
(void *)(uintptr_t)
pool->raw_hw->data,
sh->devx_comp,
mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
enum modify_reg idx;
int n = 0;
/* reg_c[0] and reg_c[1] are reserved. */
- config->flow_mreg_c[n++] = REG_C_0;
- config->flow_mreg_c[n++] = REG_C_1;
+ priv->sh->flow_mreg_c[n++] = REG_C_0;
+ priv->sh->flow_mreg_c[n++] = REG_C_1;
/* Discover availability of other reg_c's. */
for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
struct rte_flow_attr attr = {
struct rte_flow *flow;
struct rte_flow_error error;
- if (!config->dv_flow_en)
+ if (!priv->sh->config.dv_flow_en)
break;
/* Create internal flow, validation skips copy action. */
flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
flow_idx);
if (!flow)
continue;
- config->flow_mreg_c[n++] = idx;
+ priv->sh->flow_mreg_c[n++] = idx;
flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
}
for (; n < MLX5_MREG_C_NUM; ++n)
- config->flow_mreg_c[n] = REG_NON;
+ priv->sh->flow_mreg_c[n] = REG_NON;
+ priv->sh->metadata_regc_check_flag = 1;
return 0;
}
int
save_dump_file(const uint8_t *data, uint32_t size,
- uint32_t type, uint32_t id, void *arg, FILE *file)
+ uint32_t type, uint64_t id, void *arg, FILE *file)
{
char line[BUF_SIZE];
uint32_t out = 0;
switch (type) {
case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR:
actions_num = *(uint32_t *)(arg);
- out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,%d,",
+ out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,",
type, id, actions_num);
break;
case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT:
- out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,",
+ out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",",
type, id);
break;
case DR_DUMP_REC_TYPE_PMD_COUNTER:
count = (struct rte_flow_query_count *)arg;
- fprintf(file, "%d,0x%x,%" PRIu64 ",%" PRIu64 "\n", type,
- id, count->hits, count->bytes);
+ fprintf(file,
+ "%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n",
+ type, id, count->hits, count->bytes);
return 0;
default:
return -1;
uint32_t actions_num;
const uint8_t *data;
size_t size;
- uint32_t id;
+ uint64_t id;
uint32_t type;
+ void *action = NULL;
if (!flow) {
return rte_flow_error_set(error, ENOENT,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "invalid flow handle");
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "invalid flow handle");
}
handle_idx = flow->dev_handles;
+ /* query counter */
+ if (flow->counter &&
+ (!mlx5_counter_query(dev, flow->counter, false,
+ &count.hits, &count.bytes, &action)) && action) {
+ id = (uint64_t)(uintptr_t)action;
+ type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+ save_dump_file(NULL, 0, type,
+ id, (void *)&count, file);
+ }
+
while (handle_idx) {
dh = mlx5_ipool_get(priv->sh->ipool
- [MLX5_IPOOL_MLX5_FLOW], handle_idx);
+ [MLX5_IPOOL_MLX5_FLOW], handle_idx);
if (!dh)
continue;
handle_idx = dh->next.next;
- id = (uint32_t)(uintptr_t)dh->drv_flow;
-
- /* query counter */
- type = DR_DUMP_REC_TYPE_PMD_COUNTER;
- if (!mlx5_flow_query_counter(dev, flow, &count, error))
- save_dump_file(NULL, 0, type,
- id, (void *)&count, file);
/* Get modify_hdr and encap_decap buf from ipools. */
encap_decap = NULL;
if (modify_hdr) {
data = (const uint8_t *)modify_hdr->actions;
size = (size_t)(modify_hdr->actions_num) * 8;
+ id = (uint64_t)(uintptr_t)modify_hdr->action;
actions_num = modify_hdr->actions_num;
type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
save_dump_file(data, size, type, id,
- (void *)(&actions_num), file);
+ (void *)(&actions_num), file);
}
if (encap_decap) {
data = encap_decap->buf;
size = encap_decap->size;
+ id = (uint64_t)(uintptr_t)encap_decap->action;
type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
save_dump_file(data, size, type,
id, NULL, file);
}
return 0;
}
+
+/**
+ * Dump all flow's encap_decap/modify_hdr/counter data to file
+ *
+ * @param[in] dev
+ * The pointer to Ethernet device.
+ * @param[in] file
+ * A pointer to a file for output.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+static int
+mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
+ FILE *file, struct rte_flow_error *error __rte_unused)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_hlist *h;
+ struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
+ struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+ struct rte_flow_query_count count;
+ uint32_t actions_num;
+ const uint8_t *data;
+ size_t size;
+ uint64_t id;
+ uint32_t type;
+ uint32_t i;
+ uint32_t j;
+ struct mlx5_list_inconst *l_inconst;
+ struct mlx5_list_entry *e;
+ int lcore_index;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ uint32_t max;
+ void *action;
+
+ /* encap_decap hlist is lcore_share, get global core cache. */
+ i = MLX5_LIST_GLOBAL;
+ h = sh->encaps_decaps;
+ if (h) {
+ for (j = 0; j <= h->mask; j++) {
+ l_inconst = &h->buckets[j].l;
+ if (!l_inconst || !l_inconst->cache[i])
+ continue;
+
+ e = LIST_FIRST(&l_inconst->cache[i]->h);
+ while (e) {
+ encap_decap =
+ (struct mlx5_flow_dv_encap_decap_resource *)e;
+ data = encap_decap->buf;
+ size = encap_decap->size;
+ id = (uint64_t)(uintptr_t)encap_decap->action;
+ type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
+ save_dump_file(data, size, type,
+ id, NULL, file);
+ e = LIST_NEXT(e, next);
+ }
+ }
+ }
+
+ /* get modify_hdr */
+ h = sh->modify_cmds;
+ if (h) {
+ lcore_index = rte_lcore_index(rte_lcore_id());
+ if (unlikely(lcore_index == -1)) {
+ lcore_index = MLX5_LIST_NLCORE;
+ rte_spinlock_lock(&h->l_const.lcore_lock);
+ }
+ i = lcore_index;
+
+ for (j = 0; j <= h->mask; j++) {
+ l_inconst = &h->buckets[j].l;
+ if (!l_inconst || !l_inconst->cache[i])
+ continue;
+
+ e = LIST_FIRST(&l_inconst->cache[i]->h);
+ while (e) {
+ modify_hdr =
+ (struct mlx5_flow_dv_modify_hdr_resource *)e;
+ data = (const uint8_t *)modify_hdr->actions;
+ size = (size_t)(modify_hdr->actions_num) * 8;
+ actions_num = modify_hdr->actions_num;
+ id = (uint64_t)(uintptr_t)modify_hdr->action;
+ type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
+ save_dump_file(data, size, type, id,
+ (void *)(&actions_num), file);
+ e = LIST_NEXT(e, next);
+ }
+ }
+
+ if (unlikely(lcore_index == MLX5_LIST_NLCORE))
+ rte_spinlock_unlock(&h->l_const.lcore_lock);
+ }
+
+ /* get counter */
+ MLX5_ASSERT(cmng->n_valid <= cmng->n);
+ max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
+ for (j = 1; j <= max; j++) {
+ action = NULL;
+ if ((!mlx5_counter_query(dev, j, false, &count.hits,
+ &count.bytes, &action)) && action) {
+ id = (uint64_t)(uintptr_t)action;
+ type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+ save_dump_file(NULL, 0, type,
+ id, (void *)&count, file);
+ }
+ }
+ return 0;
+}
#endif
/**
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* @return
- * 0 on success, a nagative value otherwise.
+ * 0 on success, a negative value otherwise.
*/
int
mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
int ret;
struct mlx5_flow_handle *dh;
struct rte_flow *flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- uint32_t idx;
-#endif
- if (!priv->config.dv_flow_en) {
+ if (!sh->config.dv_flow_en) {
if (fputs("device dv flow disabled\n", file) <= 0)
return -errno;
return -ENOTSUP;
/* dump all */
if (!flow_idx) {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- MLX5_IPOOL_FOREACH(priv->flows[MLX5_FLOW_TYPE_GEN], idx, flow)
- mlx5_flow_dev_dump_ipool(dev, flow, file, error);
+ if (mlx5_flow_dev_dump_sh_all(dev, file, error))
+ return -EINVAL;
#endif
return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
sh->rx_domain,
flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
(uintptr_t)(void *)flow_idx);
if (!flow)
- return -ENOENT;
+ return -EINVAL;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
mlx5_flow_dev_dump_ipool(dev, flow, file, error);
return ret;
}
+/**
+ * Validate existing indirect actions against current device configuration
+ * and attach them to device resources.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_attach(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool *ipool =
+ priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
+ struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
+ int ret = 0;
+ uint32_t idx;
+
+ ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+ struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+ const char *message;
+ uint32_t queue_idx;
+
+ ret = mlx5_validate_rss_queues(dev, ind_tbl->queues,
+ ind_tbl->queues_n,
+ &message, &queue_idx);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s",
+ dev->data->port_id, ind_tbl->queues[queue_idx],
+ message);
+ break;
+ }
+ }
+ if (ret != 0)
+ return ret;
+ ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+ struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+ ret = mlx5_ind_table_obj_attach(dev, ind_tbl);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u could not attach "
+ "indirection table obj %p",
+ dev->data->port_id, (void *)ind_tbl);
+ goto error;
+ }
+ }
+ return 0;
+error:
+ shared_rss_last = shared_rss;
+ ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+ struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+ if (shared_rss == shared_rss_last)
+ break;
+ if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)
+ DRV_LOG(CRIT, "Port %u could not detach "
+ "indirection table obj %p on rollback",
+ dev->data->port_id, (void *)ind_tbl);
+ }
+ return ret;
+}
+
+/**
+ * Detach indirect actions of the device from its resources.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_detach(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool *ipool =
+ priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
+ struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
+ int ret = 0;
+ uint32_t idx;
+
+ ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+ struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+ ret = mlx5_ind_table_obj_detach(dev, ind_tbl);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u could not detach "
+ "indirection table obj %p",
+ dev->data->port_id, (void *)ind_tbl);
+ goto error;
+ }
+ }
+ return 0;
+error:
+ shared_rss_last = shared_rss;
+ ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+ struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+ if (shared_rss == shared_rss_last)
+ break;
+ if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)
+ DRV_LOG(CRIT, "Port %u could not attach "
+ "indirection table obj %p on rollback",
+ dev->data->port_id, (void *)ind_tbl);
+ }
+ return ret;
+}
+
#ifndef HAVE_MLX5DV_DR
#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
#else
}
/**
- * tunnel offload functionalilty is defined for DV environment only
+ * tunnel offload functionality is defined for DV environment only
*/
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
__extension__
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "invalid port configuration");
- if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
ctx->action_rss.types = 0;
for (i = 0; i != priv->reta_idx_n; ++i)
ctx->queue[i] = (*priv->reta_idx)[i];
return err;
}
-static inline bool
+static inline int
mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
struct rte_flow_tunnel *tunnel,
- const char *err_msg)
+ struct rte_flow_error *error)
{
- err_msg = NULL;
- if (!is_tunnel_offload_active(dev)) {
- err_msg = "tunnel offload was not activated";
- goto out;
- } else if (!tunnel) {
- err_msg = "no application tunnel";
- goto out;
- }
+ struct mlx5_priv *priv = dev->data->dev_private;
+ if (!priv->sh->config.dv_flow_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "flow DV interface is off");
+ if (!is_tunnel_offload_active(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "tunnel offload was not activated");
+ if (!tunnel)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "no application tunnel");
switch (tunnel->type) {
default:
- err_msg = "unsupported tunnel type";
- goto out;
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "unsupported tunnel type");
case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
break;
}
-
-out:
- return !err_msg;
+ return 0;
}
static int
uint32_t *num_of_actions,
struct rte_flow_error *error)
{
- int ret;
struct mlx5_flow_tunnel *tunnel;
- const char *err_msg = NULL;
- bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+ int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
- if (!verdict)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
- err_msg);
+ if (ret)
+ return ret;
ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
if (ret < 0) {
return rte_flow_error_set(error, ret,
uint32_t *num_of_items,
struct rte_flow_error *error)
{
- int ret;
struct mlx5_flow_tunnel *tunnel;
- const char *err_msg = NULL;
- bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+ int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
- if (!verdict)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- err_msg);
+ if (ret)
+ return ret;
ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
if (ret < 0) {
return rte_flow_error_set(error, ret,
{
uint64_t ol_flags = m->ol_flags;
const struct mlx5_flow_tbl_data_entry *tble;
- const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
if (!is_tunnel_offload_active(dev)) {
info->flags = 0;
}
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+/* Flex flow item API */
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error)
+{
+ static const char err_msg[] = "flex item creation unsupported";
+ struct rte_flow_attr attr = { .transfer = 0 };
+ const struct mlx5_flow_driver_ops *fops =
+ flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+ if (!fops->item_create) {
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, err_msg);
+ return NULL;
+ }
+ return fops->item_create(dev, conf, error);
+}
+
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error)
+{
+ static const char err_msg[] = "flex item release unsupported";
+ struct rte_flow_attr attr = { .transfer = 0 };
+ const struct mlx5_flow_driver_ops *fops =
+ flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+ if (!fops->item_release) {
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, err_msg);
+ return -rte_errno;
+ }
+ return fops->item_release(dev, handle, error);
+}
+
static void
mlx5_dbg__print_pattern(const struct rte_flow_item *item)
{
}
printf("END\n");
}
+
+static int
+mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item)
+{
+ const struct rte_flow_item_udp *spec = udp_item->spec;
+ const struct rte_flow_item_udp *mask = udp_item->mask;
+ uint16_t udp_dport = 0;
+
+ if (spec != NULL) {
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port &
+ mask->hdr.dst_port);
+ }
+ return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN);
+}
+
+static const struct mlx5_flow_expand_node *
+mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
+ unsigned int item_idx,
+ const struct mlx5_flow_expand_node graph[],
+ const struct mlx5_flow_expand_node *node)
+{
+ const struct rte_flow_item *item = pattern + item_idx, *prev_item;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN &&
+ node != NULL &&
+ node->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ /*
+ * The expansion node is VXLAN and it is also the last
+ * expandable item in the pattern, so need to continue
+ * expansion of the inner tunnel.
+ */
+ MLX5_ASSERT(item_idx > 0);
+ prev_item = pattern + item_idx - 1;
+ MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP);
+ if (mlx5_flow_is_std_vxlan_port(prev_item))
+ return &graph[MLX5_EXPANSION_STD_VXLAN];
+ return &graph[MLX5_EXPANSION_L3_VXLAN];
+ }
+ return node;
+}
+
+/* Map of Verbs to Flow priority with 8 Verbs priorities. */
+static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
+ { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
+};
+
+/* Map of Verbs to Flow priority with 16 Verbs priorities. */
+static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
+ { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
+ { 9, 10, 11 }, { 12, 13, 14 },
+};
+
+/**
+ * Discover the number of available flow priorities.
+ *
+ * @param dev
+ * Ethernet device.
+ *
+ * @return
+ * On success, number of available flow priorities.
+ * On failure, a negative errno-style code and rte_errno is set.
+ */
+int
+mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
+{
+ static const uint16_t vprio[] = {8, 16};
+ const struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type;
+ int ret;
+
+ type = mlx5_flow_os_get_type();
+ if (type == MLX5_FLOW_TYPE_MAX) {
+ type = MLX5_FLOW_TYPE_VERBS;
+ if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en)
+ type = MLX5_FLOW_TYPE_DV;
+ }
+ fops = flow_get_drv_ops(type);
+ if (fops->discover_priorities == NULL) {
+ DRV_LOG(ERR, "Priority discovery not supported");
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio));
+ if (ret < 0)
+ return ret;
+ switch (ret) {
+ case 8:
+ ret = RTE_DIM(priority_map_3);
+ break;
+ case 16:
+ ret = RTE_DIM(priority_map_5);
+ break;
+ default:
+ rte_errno = ENOTSUP;
+ DRV_LOG(ERR,
+ "port %u maximum priority: %d expected 8/16",
+ dev->data->port_id, ret);
+ return -rte_errno;
+ }
+ DRV_LOG(INFO, "port %u supported flow priorities:"
+ " 0-%d for ingress or egress root table,"
+ " 0-%d for non-root table or transfer root table.",
+ dev->data->port_id, ret - 2,
+ MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
+ return ret;
+}
+
+/**
+ * Adjust flow priority based on the highest layer and the request priority.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] priority
+ * The rule base priority.
+ * @param[in] subpriority
+ * The priority based on the items.
+ *
+ * @return
+ * The new priority.
+ */
+uint32_t
+mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+ uint32_t subpriority)
+{
+ uint32_t res = 0;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ switch (priv->sh->flow_max_priority) {
+ case RTE_DIM(priority_map_3):
+ res = priority_map_3[priority][subpriority];
+ break;
+ case RTE_DIM(priority_map_5):
+ res = priority_map_5[priority][subpriority];
+ break;
+ }
+ return res;
+}