flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
uint32_t rix_jump);
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ return RTE_ETHER_TYPE_TEB;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ return RTE_ETHER_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ return RTE_ETHER_TYPE_IPV6;
+ else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+ return RTE_ETHER_TYPE_MPLS;
+ return 0;
+}
+
static int16_t
flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
{
info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_DMAC_15_0};
if (width < 16) {
- mask[idx] = rte_cpu_to_be_16(0xffff >>
+ mask[1] = rte_cpu_to_be_16(0xffff >>
(16 - width));
width = 0;
} else {
- mask[idx] = RTE_BE16(0xffff);
+ mask[1] = RTE_BE16(0xffff);
width -= 16;
}
if (!width)
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DMAC_47_16};
- mask[idx] = rte_cpu_to_be_32((0xffffffff >>
- (32 - width)) << off);
+ mask[0] = rte_cpu_to_be_32((0xffffffff >>
+ (32 - width)) << off);
} else {
if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 4,
+ info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_DMAC_15_0};
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DMAC_47_16};
info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_SMAC_15_0};
if (width < 16) {
- mask[idx] = rte_cpu_to_be_16(0xffff >>
+ mask[1] = rte_cpu_to_be_16(0xffff >>
(16 - width));
width = 0;
} else {
- mask[idx] = RTE_BE16(0xffff);
+ mask[1] = RTE_BE16(0xffff);
width -= 16;
}
if (!width)
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SMAC_47_16};
- mask[idx] = rte_cpu_to_be_32((0xffffffff >>
- (32 - width)) << off);
+ mask[0] = rte_cpu_to_be_32((0xffffffff >>
+ (32 - width)) << off);
} else {
if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 4,
+ info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_SMAC_15_0};
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SMAC_47_16};
info[idx] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_SIPV6_31_0};
if (width < 32) {
- mask[idx] =
+ mask[3] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[3] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
info[idx] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_SIPV6_63_32};
if (width < 32) {
- mask[idx] =
+ mask[2] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[2] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
info[idx] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_SIPV6_95_64};
if (width < 32) {
- mask[idx] =
+ mask[1] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[1] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_127_96};
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 12,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_31_0};
if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 8,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_63_32};
if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 4,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
info[idx] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_DIPV6_31_0};
if (width < 32) {
- mask[idx] =
+ mask[3] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[3] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
info[idx] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_DIPV6_63_32};
if (width < 32) {
- mask[idx] =
+ mask[2] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[2] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
info[idx] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_DIPV6_95_64};
if (width < 32) {
- mask[idx] =
+ mask[1] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[1] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_127_96};
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 12,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_31_0};
if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 8,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_63_32};
if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 4,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
* Pointer to rte_eth_dev structure.
* @param[in] action_flags
* Bit-fields that holds the actions detected until now.
+ * @param[in] item_flags
+ * Holds the items detected.
* @param[in] action
* Pointer to the meter action.
* @param[in] attr
*/
static int
mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
- uint64_t action_flags,
+ uint64_t action_flags, uint64_t item_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
const struct rte_flow_item *port_id_item,
NULL,
"Flow and meter policy "
"have different src port.");
+ } else if (mtr_policy->is_rss) {
+ struct mlx5_flow_meter_policy *fp;
+ struct mlx5_meter_policy_action_container *acg;
+ struct mlx5_meter_policy_action_container *acy;
+ const struct rte_flow_action *rss_act;
+ int ret;
+
+ fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
+ mtr_policy);
+ if (fp == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Unable to get the final "
+ "policy in the hierarchy");
+ acg = &fp->act_cnt[RTE_COLOR_GREEN];
+ acy = &fp->act_cnt[RTE_COLOR_YELLOW];
+ MLX5_ASSERT(acg->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS ||
+ acy->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS);
+ if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
+ rss_act = acg->rss;
+ else
+ rss_act = acy->rss;
+ ret = mlx5_flow_validate_action_rss(rss_act,
+ action_flags, dev, attr,
+ item_flags, error);
+ if (ret)
+ return ret;
}
*def_policy = false;
}
case RTE_FLOW_ACTION_TYPE_METER:
ret = mlx5_flow_validate_action_meter(dev,
action_flags,
+ item_flags,
actions, attr,
port_id_item,
&def_policy,
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] inner
- * Item is inner pattern.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
*/
static void
flow_dv_translate_item_gre(void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ uint64_t pattern_flags)
{
+ static const struct rte_flow_item_gre empty_gre = {0,};
const struct rte_flow_item_gre *gre_m = item->mask;
const struct rte_flow_item_gre *gre_v = item->spec;
- void *headers_m;
- void *headers_v;
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
struct {
uint16_t value;
};
} gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
+ uint16_t protocol_m, protocol_v;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
- if (!gre_v)
- return;
- if (!gre_m)
- gre_m = &rte_flow_item_gre_mask;
- MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
- rte_be_to_cpu_16(gre_m->protocol));
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
- rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
+ if (!gre_v) {
+ gre_v = &empty_gre;
+ gre_m = &empty_gre;
+ } else {
+ if (!gre_m)
+ gre_m = &rte_flow_item_gre_mask;
+ }
gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
gre_crks_rsvd0_ver_v.s_present &
gre_crks_rsvd0_ver_m.s_present);
+ protocol_m = rte_be_to_cpu_16(gre_m->protocol);
+ protocol_v = rte_be_to_cpu_16(gre_v->protocol);
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ protocol_m = 0xFFFF;
+ protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ }
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ protocol_m & protocol_v);
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] inner
- * Item is inner pattern.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
*/
static void
flow_dv_translate_item_nvgre(void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ unsigned long pattern_flags)
{
const struct rte_flow_item_nvgre *nvgre_m = item->mask;
const struct rte_flow_item_nvgre *nvgre_v = item->spec;
.mask = &gre_mask,
.last = NULL,
};
- flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
+ flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
if (!nvgre_v)
return;
if (!nvgre_m)
static void
flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item,
+ const uint64_t pattern_flags)
{
+ static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
- void *headers_m;
- void *headers_v;
+ /* The item was validated to be on the outer side */
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m =
MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
void *misc_v =
MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
- char *vni_m;
- char *vni_v;
- uint16_t dport;
- int size;
- int i;
+ char *vni_m =
+ MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
+ char *vni_v =
+ MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
+ int i, size = sizeof(vxlan_m->vni);
uint8_t flags_m = 0xff;
uint8_t flags_v = 0xc;
+ uint8_t m_protocol, v_protocol;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
- dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
- MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_VXLAN_GPE);
+ }
+ if (!vxlan_v) {
+ vxlan_v = &dummy_vxlan_gpe_hdr;
+ vxlan_m = &dummy_vxlan_gpe_hdr;
+ } else {
+ if (!vxlan_m)
+ vxlan_m = &rte_flow_item_vxlan_gpe_mask;
}
- if (!vxlan_v)
- return;
- if (!vxlan_m)
- vxlan_m = &rte_flow_item_vxlan_gpe_mask;
- size = sizeof(vxlan_m->vni);
- vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
- vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
memcpy(vni_m, vxlan_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
- MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
- vxlan_m->protocol);
- MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
- vxlan_v->protocol);
+ m_protocol = vxlan_m->protocol;
+ v_protocol = vxlan_v->protocol;
+ if (!m_protocol) {
+ m_protocol = 0xff;
+ /* Force next protocol to ensure next headers parsing. */
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
+ }
+ MLX5_SET(fte_match_set_misc3, misc_m,
+ outer_vxlan_gpe_next_protocol, m_protocol);
+ MLX5_SET(fte_match_set_misc3, misc_v,
+ outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
}
/**
static void
flow_dv_translate_item_geneve(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item,
+ uint64_t pattern_flags)
{
+ static const struct rte_flow_item_geneve empty_geneve = {0,};
const struct rte_flow_item_geneve *geneve_m = item->mask;
const struct rte_flow_item_geneve *geneve_v = item->spec;
- void *headers_m;
- void *headers_v;
+ /* GENEVE flow item validation allows single tunnel item */
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- uint16_t dport;
uint16_t gbhdr_m;
uint16_t gbhdr_v;
- char *vni_m;
- char *vni_v;
- size_t size, i;
+ char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+ char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+ size_t size = sizeof(geneve_m->vni), i;
+ uint16_t protocol_m, protocol_v;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
- dport = MLX5_UDP_PORT_GENEVE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_GENEVE);
+ }
+ if (!geneve_v) {
+ geneve_v = &empty_geneve;
+ geneve_m = &empty_geneve;
+ } else {
+ if (!geneve_m)
+ geneve_m = &rte_flow_item_geneve_mask;
}
- if (!geneve_v)
- return;
- if (!geneve_m)
- geneve_m = &rte_flow_item_geneve_mask;
- size = sizeof(geneve_m->vni);
- vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
- vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
memcpy(vni_m, geneve_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & geneve_v->vni[i];
- MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
- rte_be_to_cpu_16(geneve_m->protocol));
- MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
- rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+ protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
+ protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ protocol_m = 0xFFFF;
+ protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ }
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+ protocol_m & protocol_v);
}
/**
switch (prev_layer) {
case MLX5_FLOW_LAYER_OUTER_L4_UDP:
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
- MLX5_UDP_PORT_MPLS);
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_MPLS);
+ }
break;
case MLX5_FLOW_LAYER_GRE:
/* Fall-through. */
case MLX5_FLOW_LAYER_GRE_KEY:
- MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
- RTE_ETHER_TYPE_MPLS);
+ if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ RTE_ETHER_TYPE_MPLS);
+ }
break;
default:
break;
void *headers_m, void *headers_v)
{
if (mask->l4_ok) {
- /* application l4_ok filter aggregates all hardware l4 filters
- * therefore hw l4_checksum_ok must be implicitly added here.
+ /* RTE l4_ok filter aggregates hardware l4_ok and
+ * l4_checksum_ok filters.
+ * Positive RTE l4_ok match requires hardware match on both L4
+ * hardware integrity bits.
+ * For negative match, check hardware l4_checksum_ok bit only,
+ * because hardware sets that bit to 0 for all packets
+ * with bad L4.
*/
- struct rte_flow_item_integrity local_item;
-
- local_item.l4_csum_ok = 1;
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
- local_item.l4_csum_ok);
if (value->l4_ok) {
- /* application l4_ok = 1 matches sets both hw flags
- * l4_ok and l4_checksum_ok flags to 1.
- */
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- l4_checksum_ok, local_item.l4_csum_ok);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
- mask->l4_ok);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
- value->l4_ok);
- } else {
- /* application l4_ok = 0 matches on hw flag
- * l4_checksum_ok = 0 only.
- */
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- l4_checksum_ok, 0);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
}
- } else if (mask->l4_csum_ok) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
- mask->l4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
+ !!value->l4_ok);
+ }
+ if (mask->l4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
value->l4_csum_ok);
}
void *headers_m, void *headers_v, bool is_ipv4)
{
if (mask->l3_ok) {
- /* application l3_ok filter aggregates all hardware l3 filters
- * therefore hw ipv4_checksum_ok must be implicitly added here.
+ /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
+ * ipv4_csum_ok filters.
+ * Positive RTE l3_ok match requires hardware match on both L3
+ * hardware integrity bits.
+ * For negative match, check hardware l3_csum_ok bit only,
+ * because hardware sets that bit to 0 for all packets
+ * with bad L3.
*/
- struct rte_flow_item_integrity local_item;
-
- local_item.ipv4_csum_ok = !!is_ipv4;
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
- local_item.ipv4_csum_ok);
- if (value->l3_ok) {
+ if (is_ipv4) {
+ if (value->l3_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m,
+ l3_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ l3_ok, 1);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m,
+ ipv4_checksum_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- ipv4_checksum_ok, local_item.ipv4_csum_ok);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
- mask->l3_ok);
+ ipv4_checksum_ok, !!value->l3_ok);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
value->l3_ok);
- } else {
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- ipv4_checksum_ok, 0);
}
- } else if (mask->ipv4_csum_ok) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
- mask->ipv4_csum_ok);
+ }
+ if (mask->ipv4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
value->ipv4_csum_ok);
}
.std_tbl_fix = true,
};
const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
+ const struct rte_flow_item *tunnel_item = NULL;
if (!wks)
return rte_flow_error_set(error, ENOMEM,
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- flow_dv_translate_item_gre(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
flow_dv_translate_item_gre_key(match_mask,
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- flow_dv_translate_item_nvgre(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(dev, attr,
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- flow_dv_translate_item_vxlan_gpe(match_mask,
- match_value, items,
- tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
- flow_dv_translate_item_geneve(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GENEVE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
integrity_items,
item_flags);
}
+ if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
+ flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
+ flow_dv_translate_item_geneve(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GRE) {
+ if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
+ flow_dv_translate_item_gre(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+ flow_dv_translate_item_nvgre(match_mask, match_value,
+ tunnel_item, item_flags);
+ else
+ MLX5_ASSERT(false);
+ }
#ifdef RTE_LIBRTE_MLX5_DEBUG
MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));