/* The value must be in the range of the mask. */
for (i = 0; i < sizeof(eth_m->dst); ++i)
l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(eth_m->type));
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
- *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
if (eth_v->type) {
/* When ethertype is present set mask for tagged VLAN. */
MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
/* Set value for tagged VLAN if ethertype is 802.1Q. */
if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
- eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ))
+ eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
1);
+ /* Return here to avoid setting match on ethertype. */
+ return;
+ }
}
+ /*
+ * HW supports match on one Ethertype, the Ethertype following the last
+ * VLAN tag of the packet (see PRM).
+ * Set match on ethertype only if ETH header is not followed by VLAN.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(eth_m->type));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
+ *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv4(void *matcher, void *key,
const struct rte_flow_item *item,
+ const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
ipv4_m->hdr.next_proto_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv6(void *matcher, void *key,
const struct rte_flow_item *item,
+ const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
ipv6_m->hdr.proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
ipv6_v->hdr.proto & ipv6_m->hdr.proto);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
}
/**
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel,
+ items, item_flags, tunnel,
dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel,
+ items, item_flags, tunnel,
dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :