In the recent update, the misc5 matcher was introduced to
match VxLAN header extra fields. However, ConnectX-5
doesn't support misc5 for the UDP ports different from
VXLAN's standard one (4789).
Need to fall back to the previous approach and use legacy
misc matcher if non-standard UDP port is recognized
in VxLAN flow.
Fixes:
630a587bfb37 ("net/mlx5: support matching on VXLAN reserved field")
Cc: stable@dpdk.org
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Last reserved 8-bits matching is only supported When using DV flow
engine (``dv_flow_en`` = 1).
+ For ConnectX-5, the UDP destination port must be the standard one (4789).
Group zero's behavior may differ which depends on FW.
Matching value equals 0 (value & mask) is not supported.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
+ * @param[in] udp_dport
+ * UDP destination port
* @param[in] item
* Item specification.
* @param[in] item_flags
*/
int
mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
+ uint16_t udp_dport,
const struct rte_flow_item *item,
uint64_t item_flags,
const struct rte_flow_attr *attr,
"no outer UDP layer found");
if (!mask)
mask = &rte_flow_item_vxlan_mask;
- /* FDB domain & NIC domain non-zero group */
- if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
- valid_mask = &nic_mask;
- /* Group zero in NIC domain */
- if (!attr->group && !attr->transfer && priv->sh->tunnel_header_0_1)
- valid_mask = &nic_mask;
+
+ if (priv->sh->steering_format_version !=
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
+ !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) {
+ /* FDB domain & NIC domain non-zero group */
+ if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
+ valid_mask = &nic_mask;
+ /* Group zero in NIC domain */
+ if (!attr->group && !attr->transfer &&
+ priv->sh->tunnel_header_0_1)
+ valid_mask = &nic_mask;
+ }
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)valid_mask,
struct rte_eth_dev *dev,
struct rte_flow_error *error);
int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
+ uint16_t udp_dport,
const struct rte_flow_item *item,
uint64_t item_flags,
const struct rte_flow_attr *attr,
const struct rte_flow_item *rule_items = items;
const struct rte_flow_item *port_id_item = NULL;
bool def_policy = false;
+ uint16_t udp_dport = 0;
if (items == NULL)
return -1;
ret = mlx5_flow_validate_item_udp(items, item_flags,
next_protocol,
error);
+ const struct rte_flow_item_udp *spec = items->spec;
+ const struct rte_flow_item_udp *mask = items->mask;
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ if (spec != NULL)
+ udp_dport = rte_be_to_cpu_16
+ (spec->hdr.dst_port &
+ mask->hdr.dst_port);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- ret = mlx5_flow_validate_item_vxlan(dev, items,
- item_flags, attr,
- error);
+ ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
+ items, item_flags,
+ attr, error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_VXLAN;
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
}
+ dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
if (!vxlan_v)
return;
if (!vxlan_m) {
else
vxlan_m = &nic_mask;
}
- if ((!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
+ if ((priv->sh->steering_format_version ==
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
+ dport != MLX5_UDP_PORT_VXLAN) ||
+ (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
void *misc_m;
void *misc_v;
uint8_t next_protocol = 0xff;
uint16_t ether_type = 0;
bool is_empty_vlan = false;
+ uint16_t udp_dport = 0;
if (items == NULL)
return -1;
ret = mlx5_flow_validate_item_udp(items, item_flags,
next_protocol,
error);
+ const struct rte_flow_item_udp *spec = items->spec;
+ const struct rte_flow_item_udp *mask = items->mask;
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ if (spec != NULL)
+ udp_dport = rte_be_to_cpu_16
+ (spec->hdr.dst_port &
+ mask->hdr.dst_port);
+
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- ret = mlx5_flow_validate_item_vxlan(dev, items,
- item_flags, attr,
- error);
+ ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
+ items, item_flags,
+ attr, error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_VXLAN;