linux/pkt_cls.h \
enum TCA_FLOWER_KEY_VLAN_ETH_TYPE \
$(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_FLAGS \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_FLAGS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_FLAGS_MASK \
+ $(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_TC_ACT_VLAN \
linux/tc_act/tc_vlan.h \
'TCA_FLOWER_KEY_VLAN_PRIO' ],
[ 'HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE', 'linux/pkt_cls.h',
'TCA_FLOWER_KEY_VLAN_ETH_TYPE' ],
+ [ 'HAVE_TCA_FLOWER_KEY_TCP_FLAGS', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_TCP_FLAGS' ],
+ [ 'HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_TCP_FLAGS_MASK' ],
[ 'HAVE_TC_ACT_VLAN', 'linux/tc_act/tc_vlan.h',
'TCA_VLAN_PUSH_VLAN_PRIORITY' ],
[ 'HAVE_RDMA_NL_NLDEV', 'rdma/rdma_netlink.h',
* Bit-fields that holds the items detected until now.
* @param[in] target_protocol
* The next protocol in the previous item.
+ * @param[in] flow_mask
+ * mlx5 flow-specific (TCF, DV, verbs, etc.) supported header fields mask.
* @param[out] error
* Pointer to error structure.
*
mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
uint64_t item_flags,
uint8_t target_protocol,
+ const struct rte_flow_item_tcp *flow_mask,
struct rte_flow_error *error)
{
const struct rte_flow_item_tcp *mask = item->mask;
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret;
+ assert(flow_mask);
if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
mask = &rte_flow_item_tcp_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_tcp_mask,
+ (const uint8_t *)flow_mask,
sizeof(struct rte_flow_item_tcp), error);
if (ret < 0)
return ret;
int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
uint64_t item_flags,
uint8_t target_protocol,
+ const struct rte_flow_item_tcp *flow_mask,
struct rte_flow_error *error);
int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
uint64_t item_flags,
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- ret = mlx5_flow_validate_item_tcp(items, item_flags,
- next_protocol, error);
+ ret = mlx5_flow_validate_item_tcp
+ (items, item_flags,
+ next_protocol,
+ &rte_flow_item_tcp_mask,
+ error);
if (ret < 0)
return ret;
item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE
#define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25
#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS
+#define TCA_FLOWER_KEY_TCP_FLAGS 71
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK
+#define TCA_FLOWER_KEY_TCP_FLAGS_MASK 72
+#endif
#ifndef IPV6_ADDR_LEN
#define IPV6_ADDR_LEN 16
.tcp.hdr = {
.src_port = RTE_BE16(0xffff),
.dst_port = RTE_BE16(0xffff),
+ .tcp_flags = 0xff,
},
.udp.hdr = {
.src_port = RTE_BE16(0xffff),
return -rte_errno;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- ret = mlx5_flow_validate_item_tcp(items, item_flags,
- next_protocol, error);
+ ret = mlx5_flow_validate_item_tcp
+ (items, item_flags,
+ next_protocol,
+ &flow_tcf_mask_supported.tcp,
+ error);
if (ret < 0)
return ret;
item_flags |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
TCA_FLOWER_KEY_TCP_DST_MASK,
mask.tcp->hdr.dst_port);
}
+ if (mask.tcp->hdr.tcp_flags) {
+ mnl_attr_put_u16
+ (nlh,
+ TCA_FLOWER_KEY_TCP_FLAGS,
+ rte_cpu_to_be_16
+ (spec.tcp->hdr.tcp_flags));
+ mnl_attr_put_u16
+ (nlh,
+ TCA_FLOWER_KEY_TCP_FLAGS_MASK,
+ rte_cpu_to_be_16
+ (mask.tcp->hdr.tcp_flags));
+ }
break;
default:
return rte_flow_error_set(error, ENOTSUP,
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- ret = mlx5_flow_validate_item_tcp(items, item_flags,
- next_protocol, error);
+ ret = mlx5_flow_validate_item_tcp
+ (items, item_flags,
+ next_protocol,
+ &rte_flow_item_tcp_mask,
+ error);
if (ret < 0)
return ret;
item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :