flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
uint32_t rix_jump);
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ return RTE_ETHER_TYPE_TEB;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ return RTE_ETHER_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ return RTE_ETHER_TYPE_IPV6;
+ else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+ return RTE_ETHER_TYPE_MPLS;
+ return 0;
+}
+
static int16_t
flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
{
info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_DMAC_15_0};
if (width < 16) {
- mask[idx] = rte_cpu_to_be_16(0xffff >>
+ mask[1] = rte_cpu_to_be_16(0xffff >>
(16 - width));
width = 0;
} else {
- mask[idx] = RTE_BE16(0xffff);
+ mask[1] = RTE_BE16(0xffff);
width -= 16;
}
if (!width)
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DMAC_47_16};
- mask[idx] = rte_cpu_to_be_32((0xffffffff >>
- (32 - width)) << off);
+ mask[0] = rte_cpu_to_be_32((0xffffffff >>
+ (32 - width)) << off);
} else {
if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 4,
+ info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_DMAC_15_0};
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DMAC_47_16};
info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_SMAC_15_0};
if (width < 16) {
- mask[idx] = rte_cpu_to_be_16(0xffff >>
+ mask[1] = rte_cpu_to_be_16(0xffff >>
(16 - width));
width = 0;
} else {
- mask[idx] = RTE_BE16(0xffff);
+ mask[1] = RTE_BE16(0xffff);
width -= 16;
}
if (!width)
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SMAC_47_16};
- mask[idx] = rte_cpu_to_be_32((0xffffffff >>
- (32 - width)) << off);
+ mask[0] = rte_cpu_to_be_32((0xffffffff >>
+ (32 - width)) << off);
} else {
if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 4,
+ info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_SMAC_15_0};
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SMAC_47_16};
info[idx] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_SIPV6_31_0};
if (width < 32) {
- mask[idx] =
+ mask[3] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[3] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
info[idx] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_SIPV6_63_32};
if (width < 32) {
- mask[idx] =
+ mask[2] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[2] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
info[idx] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_SIPV6_95_64};
if (width < 32) {
- mask[idx] =
+ mask[1] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[1] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_127_96};
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 12,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_31_0};
if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 8,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_63_32};
if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 4,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
info[idx] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_DIPV6_31_0};
if (width < 32) {
- mask[idx] =
+ mask[3] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[3] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
info[idx] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_DIPV6_63_32};
if (width < 32) {
- mask[idx] =
+ mask[2] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[2] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
info[idx] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_DIPV6_95_64};
if (width < 32) {
- mask[idx] =
+ mask[1] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[1] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_127_96};
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 12,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_31_0};
if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 8,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_63_32};
if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 4,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
* Pointer to rte_eth_dev structure.
* @param[in] action_flags
* Bit-fields that holds the actions detected until now.
+ * @param[in] item_flags
+ * Holds the items detected.
* @param[in] action
* Pointer to the meter action.
* @param[in] attr
*/
static int
mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
- uint64_t action_flags,
+ uint64_t action_flags, uint64_t item_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
const struct rte_flow_item *port_id_item,
NULL,
"Flow and meter policy "
"have different src port.");
+ } else if (mtr_policy->is_rss) {
+ struct mlx5_flow_meter_policy *fp;
+ struct mlx5_meter_policy_action_container *acg;
+ struct mlx5_meter_policy_action_container *acy;
+ const struct rte_flow_action *rss_act;
+ int ret;
+
+ fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
+ mtr_policy);
+ if (fp == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Unable to get the final "
+ "policy in the hierarchy");
+ acg = &fp->act_cnt[RTE_COLOR_GREEN];
+ acy = &fp->act_cnt[RTE_COLOR_YELLOW];
+ MLX5_ASSERT(acg->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS ||
+ acy->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS);
+ if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
+ rss_act = acg->rss;
+ else
+ rss_act = acy->rss;
+ ret = mlx5_flow_validate_action_rss(rss_act,
+ action_flags, dev, attr,
+ item_flags, error);
+ if (ret)
+ return ret;
}
*def_policy = false;
}
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, jump should "
"be after sample action");
+ if (*action_flags & MLX5_FLOW_ACTION_CT)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "Sample after CT not supported");
act = sample->actions;
for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
return NULL;
}
pool->devx_obj = dcs;
+ rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
pool->index = pools_mng->n_valid;
if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
mlx5_free(pool);
claim_zero(mlx5_devx_cmd_destroy(dcs));
+ rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
return NULL;
}
pools_mng->pools[pool->index] = pool;
pools_mng->n_valid++;
+ rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
pool->mtrs[i].offset = i;
LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
return 0;
}
+static int
+flow_dv_validate_item_flex(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint64_t *last_item,
+ bool is_inner,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_flex *flow_spec = item->spec;
+ const struct rte_flow_item_flex *flow_mask = item->mask;
+ struct mlx5_flex_item *flex;
+
+ if (!flow_spec)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex flow item spec cannot be NULL");
+ if (!flow_mask)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex flow item mask cannot be NULL");
+ if (item->last)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex flow item last not supported");
+ if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid flex flow item handle");
+ flex = (struct mlx5_flex_item *)flow_spec->handle;
+ switch (flex->tunnel_mode) {
+ case FLEX_TUNNEL_MODE_SINGLE:
+ if (item_flags &
+ (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex items not supported");
+ break;
+ case FLEX_TUNNEL_MODE_OUTER:
+ if (is_inner)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "inner flex item was not configured");
+ if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex items not supported");
+ break;
+ case FLEX_TUNNEL_MODE_INNER:
+ if (!is_inner)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "outer flex item was not configured");
+ if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex items not supported");
+ break;
+ case FLEX_TUNNEL_MODE_MULTI:
+ if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
+ (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex items not supported");
+ }
+ break;
+ case FLEX_TUNNEL_MODE_TUNNEL:
+ if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex tunnel items not supported");
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "invalid flex item configuration");
+ }
+ *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
+ MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
+ MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
+ return 0;
+}
+
/**
* Internal validation function. For validating both actions and items.
*
tunnel = is_tunnel_offload_active(dev) ?
mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
if (tunnel) {
+ if (!priv->config.dv_flow_en)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "tunnel offload requires DV flow interface");
if (priv->representor)
return rte_flow_error_set
(error, ENOTSUP,
* list it here as a supported type
*/
break;
+ case RTE_FLOW_ITEM_TYPE_FLEX:
+ ret = flow_dv_validate_item_flex(dev, items, item_flags,
+ &last_item,
+ tunnel != 0, error);
+ if (ret < 0)
+ return ret;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
case RTE_FLOW_ACTION_TYPE_METER:
ret = mlx5_flow_validate_action_meter(dev,
action_flags,
+ item_flags,
actions, attr,
port_id_item,
&def_policy,
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] inner
- * Item is inner pattern.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
*/
static void
flow_dv_translate_item_gre(void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ uint64_t pattern_flags)
{
+ static const struct rte_flow_item_gre empty_gre = {0,};
const struct rte_flow_item_gre *gre_m = item->mask;
const struct rte_flow_item_gre *gre_v = item->spec;
- void *headers_m;
- void *headers_v;
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
struct {
uint16_t value;
};
} gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
+ uint16_t protocol_m, protocol_v;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
- if (!gre_v)
- return;
- if (!gre_m)
- gre_m = &rte_flow_item_gre_mask;
- MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
- rte_be_to_cpu_16(gre_m->protocol));
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
- rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
+ if (!gre_v) {
+ gre_v = &empty_gre;
+ gre_m = &empty_gre;
+ } else {
+ if (!gre_m)
+ gre_m = &rte_flow_item_gre_mask;
+ }
gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
gre_crks_rsvd0_ver_v.s_present &
gre_crks_rsvd0_ver_m.s_present);
+ protocol_m = rte_be_to_cpu_16(gre_m->protocol);
+ protocol_v = rte_be_to_cpu_16(gre_v->protocol);
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ protocol_m = 0xFFFF;
+ protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ }
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ protocol_m & protocol_v);
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] inner
- * Item is inner pattern.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
*/
static void
flow_dv_translate_item_nvgre(void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ unsigned long pattern_flags)
{
const struct rte_flow_item_nvgre *nvgre_m = item->mask;
const struct rte_flow_item_nvgre *nvgre_v = item->spec;
.mask = &gre_mask,
.last = NULL,
};
- flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
+ flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
if (!nvgre_v)
return;
if (!nvgre_m)
static void
flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item,
+ const uint64_t pattern_flags)
{
+ static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
- void *headers_m;
- void *headers_v;
+ /* The item was validated to be on the outer side */
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m =
MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
void *misc_v =
MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
- char *vni_m;
- char *vni_v;
- uint16_t dport;
- int size;
- int i;
+ char *vni_m =
+ MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
+ char *vni_v =
+ MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
+ int i, size = sizeof(vxlan_m->vni);
uint8_t flags_m = 0xff;
uint8_t flags_v = 0xc;
+ uint8_t m_protocol, v_protocol;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
- dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
- MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_VXLAN_GPE);
+ }
+ if (!vxlan_v) {
+ vxlan_v = &dummy_vxlan_gpe_hdr;
+ vxlan_m = &dummy_vxlan_gpe_hdr;
+ } else {
+ if (!vxlan_m)
+ vxlan_m = &rte_flow_item_vxlan_gpe_mask;
}
- if (!vxlan_v)
- return;
- if (!vxlan_m)
- vxlan_m = &rte_flow_item_vxlan_gpe_mask;
- size = sizeof(vxlan_m->vni);
- vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
- vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
memcpy(vni_m, vxlan_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
- MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
- vxlan_m->protocol);
- MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
- vxlan_v->protocol);
+ m_protocol = vxlan_m->protocol;
+ v_protocol = vxlan_v->protocol;
+ if (!m_protocol) {
+ m_protocol = 0xff;
+ /* Force next protocol to ensure next headers parsing. */
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
+ }
+ MLX5_SET(fte_match_set_misc3, misc_m,
+ outer_vxlan_gpe_next_protocol, m_protocol);
+ MLX5_SET(fte_match_set_misc3, misc_v,
+ outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
}
/**
static void
flow_dv_translate_item_geneve(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item,
+ uint64_t pattern_flags)
{
+ static const struct rte_flow_item_geneve empty_geneve = {0,};
const struct rte_flow_item_geneve *geneve_m = item->mask;
const struct rte_flow_item_geneve *geneve_v = item->spec;
- void *headers_m;
- void *headers_v;
+ /* GENEVE flow item validation allows single tunnel item */
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- uint16_t dport;
uint16_t gbhdr_m;
uint16_t gbhdr_v;
- char *vni_m;
- char *vni_v;
- size_t size, i;
+ char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+ char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+ size_t size = sizeof(geneve_m->vni), i;
+ uint16_t protocol_m, protocol_v;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
- dport = MLX5_UDP_PORT_GENEVE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_GENEVE);
+ }
+ if (!geneve_v) {
+ geneve_v = &empty_geneve;
+ geneve_m = &empty_geneve;
+ } else {
+ if (!geneve_m)
+ geneve_m = &rte_flow_item_geneve_mask;
}
- if (!geneve_v)
- return;
- if (!geneve_m)
- geneve_m = &rte_flow_item_geneve_mask;
- size = sizeof(geneve_m->vni);
- vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
- vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
memcpy(vni_m, geneve_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & geneve_v->vni[i];
- MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
- rte_be_to_cpu_16(geneve_m->protocol));
- MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
- rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+ protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
+ protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ protocol_m = 0xFFFF;
+ protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ }
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+ protocol_m & protocol_v);
}
/**
switch (prev_layer) {
case MLX5_FLOW_LAYER_OUTER_L4_UDP:
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
- MLX5_UDP_PORT_MPLS);
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_MPLS);
+ }
break;
case MLX5_FLOW_LAYER_GRE:
/* Fall-through. */
case MLX5_FLOW_LAYER_GRE_KEY:
- MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
- RTE_ETHER_TYPE_MPLS);
+ if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ RTE_ETHER_TYPE_MPLS);
+ }
break;
default:
break;
*/
if (!ecpri_m->hdr.common.u32)
return;
- samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
+ samples = priv->sh->ecpri_parser.ids;
/* Need to take the whole DW as the mask to fill the entry. */
dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
prog_sample_field_value_0);
reg_value, reg_mask);
}
+static void
+flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
+ const struct rte_flow_item *item,
+ struct mlx5_flow *dev_flow, bool is_inner)
+{
+ const struct rte_flow_item_flex *spec =
+ (const struct rte_flow_item_flex *)item->spec;
+ int index = mlx5_flex_acquire_index(dev, spec->handle, false);
+
+ MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+ if (index < 0)
+ return;
+ if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
+ /* Don't count both inner and outer flex items in one rule. */
+ if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
+ MLX5_ASSERT(false);
+ dev_flow->handle->flex_item |= RTE_BIT32(index);
+ }
+ mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
+}
+
static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
#define HEADER_IS_ZERO(match_criteria, headers) \
void *headers_m, void *headers_v)
{
if (mask->l4_ok) {
- /* application l4_ok filter aggregates all hardware l4 filters
- * therefore hw l4_checksum_ok must be implicitly added here.
+ /* RTE l4_ok filter aggregates hardware l4_ok and
+ * l4_checksum_ok filters.
+ * Positive RTE l4_ok match requires hardware match on both L4
+ * hardware integrity bits.
+ * For negative match, check hardware l4_checksum_ok bit only,
+ * because hardware sets that bit to 0 for all packets
+ * with bad L4.
*/
- struct rte_flow_item_integrity local_item;
-
- local_item.l4_csum_ok = 1;
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
- local_item.l4_csum_ok);
if (value->l4_ok) {
- /* application l4_ok = 1 matches sets both hw flags
- * l4_ok and l4_checksum_ok flags to 1.
- */
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- l4_checksum_ok, local_item.l4_csum_ok);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
- mask->l4_ok);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
- value->l4_ok);
- } else {
- /* application l4_ok = 0 matches on hw flag
- * l4_checksum_ok = 0 only.
- */
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- l4_checksum_ok, 0);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
}
- } else if (mask->l4_csum_ok) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
- mask->l4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
+ !!value->l4_ok);
+ }
+ if (mask->l4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
value->l4_csum_ok);
}
void *headers_m, void *headers_v, bool is_ipv4)
{
if (mask->l3_ok) {
- /* application l3_ok filter aggregates all hardware l3 filters
- * therefore hw ipv4_checksum_ok must be implicitly added here.
+ /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
+ * ipv4_csum_ok filters.
+ * Positive RTE l3_ok match requires hardware match on both L3
+ * hardware integrity bits.
+ * For negative match, check hardware l3_csum_ok bit only,
+ * because hardware sets that bit to 0 for all packets
+ * with bad L3.
*/
- struct rte_flow_item_integrity local_item;
-
- local_item.ipv4_csum_ok = !!is_ipv4;
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
- local_item.ipv4_csum_ok);
- if (value->l3_ok) {
+ if (is_ipv4) {
+ if (value->l3_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m,
+ l3_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ l3_ok, 1);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m,
+ ipv4_checksum_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- ipv4_checksum_ok, local_item.ipv4_csum_ok);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
- mask->l3_ok);
+ ipv4_checksum_ok, !!value->l3_ok);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
value->l3_ok);
- } else {
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- ipv4_checksum_ok, 0);
}
- } else if (mask->ipv4_csum_ok) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
- mask->ipv4_csum_ok);
+ }
+ if (mask->ipv4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
value->ipv4_csum_ok);
}
.std_tbl_fix = true,
};
const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
+ const struct rte_flow_item *tunnel_item = NULL;
if (!wks)
return rte_flow_error_set(error, ENOMEM,
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- flow_dv_translate_item_gre(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
flow_dv_translate_item_gre_key(match_mask,
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- flow_dv_translate_item_nvgre(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(dev, attr,
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- flow_dv_translate_item_vxlan_gpe(match_mask,
- match_value, items,
- tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
- flow_dv_translate_item_geneve(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GENEVE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
flow_dv_translate_item_aso_ct(dev, match_mask,
match_value, items);
break;
+ case RTE_FLOW_ITEM_TYPE_FLEX:
+ flow_dv_translate_item_flex(dev, match_mask,
+ match_value, items,
+ dev_flow, tunnel != 0);
+ last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+ MLX5_FLOW_ITEM_OUTER_FLEX;
+ break;
default:
break;
}
integrity_items,
item_flags);
}
+ if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
+ flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
+ flow_dv_translate_item_geneve(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GRE) {
+ if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
+ flow_dv_translate_item_gre(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+ flow_dv_translate_item_nvgre(match_mask, match_value,
+ tunnel_item, item_flags);
+ else
+ MLX5_ASSERT(false);
+ }
#ifdef RTE_LIBRTE_MLX5_DEBUG
MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
if (!dev_handle)
return;
flow->dev_handles = dev_handle->next.next;
+ while (dev_handle->flex_item) {
+ int index = rte_bsf32(dev_handle->flex_item);
+
+ mlx5_flex_release_index(dev, index);
+ dev_handle->flex_item &= ~RTE_BIT32(index);
+ }
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
if (dev_handle->dvh.rix_sample)
flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
const struct rte_flow_action_rss *r2)
{
- if (!r1 || !r2)
+ if (r1 == NULL || r2 == NULL)
return 0;
- if (r1->func != r2->func || r1->level != r2->level ||
- r1->types != r2->types || r1->key_len != r2->key_len ||
- memcmp(r1->key, r2->key, r1->key_len))
+ if (!(r1->level <= 1 && r2->level <= 1) &&
+ !(r1->level > 1 && r2->level > 1))
return 1;
+ if (r1->types != r2->types &&
+ !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
+ (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
+ return 1;
+ if (r1->key || r2->key) {
+ const void *key1 = r1->key ? r1->key : rss_hash_default_key;
+ const void *key2 = r2->key ? r2->key : rss_hash_default_key;
+
+ if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
+ return 1;
+ }
return 0;
}
*policy_mode = MLX5_MTR_POLICY_MODE_OG;
} else if (def_green && !def_yellow) {
*policy_mode = MLX5_MTR_POLICY_MODE_OY;
+ } else {
+ *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
}
/* Set to empty string in case of NULL pointer access by user. */
flow_err.message = "";
return 0;
}
+/**
+ * Discover the number of available flow priorities
+ * by trying to create a flow with the highest priority value
+ * for each possible number.
+ *
+ * @param[in] dev
+ * Ethernet device.
+ * @param[in] vprio
+ * List of possible number of available priorities.
+ * @param[in] vprio_n
+ * Size of @p vprio array.
+ * @return
+ * On success, number of available flow priorities.
+ * On failure, a negative errno-style code and rte_errno is set.
+ */
+static int
+flow_dv_discover_priorities(struct rte_eth_dev *dev,
+ const uint16_t *vprio, int vprio_n)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item item = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = ð,
+ .mask = ð,
+ };
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf),
+ },
+ };
+ union mlx5_flow_tbl_key tbl_key;
+ struct mlx5_flow flow;
+ void *action;
+ struct rte_flow_error error;
+ uint8_t misc_mask;
+ int i, err, ret = -ENOTSUP;
+
+ /*
+ * Prepare a flow with a catch-all pattern and a drop action.
+ * Use drop queue, because shared drop action may be unavailable.
+ */
+ action = priv->drop_queue.hrxq->action;
+ if (action == NULL) {
+ DRV_LOG(ERR, "Priority discovery requires a drop action");
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ memset(&flow, 0, sizeof(flow));
+ flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
+ if (flow.handle == NULL) {
+ DRV_LOG(ERR, "Cannot create flow handle");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ flow.ingress = true;
+ flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ flow.dv.actions[0] = action;
+ flow.dv.actions_n = 1;
+ memset(ð, 0, sizeof(eth));
+ flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
+ &item, /* inner */ false, /* group */ 0);
+ matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
+ for (i = 0; i < vprio_n; i++) {
+ /* Configure the next proposed maximum priority. */
+ matcher.priority = vprio[i] - 1;
+ memset(&tbl_key, 0, sizeof(tbl_key));
+ err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
+ /* tunnel */ NULL,
+ /* group */ 0,
+ &error);
+ if (err != 0) {
+ /* This action is pure SW and must always succeed. */
+ DRV_LOG(ERR, "Cannot register matcher");
+ ret = -rte_errno;
+ break;
+ }
+ /* Try to apply the flow to HW. */
+ misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
+ __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
+ err = mlx5_flow_os_create_flow
+ (flow.handle->dvh.matcher->matcher_object,
+ (void *)&flow.dv.value, flow.dv.actions_n,
+ flow.dv.actions, &flow.handle->drv_flow);
+ if (err == 0) {
+ claim_zero(mlx5_flow_os_destroy_flow
+ (flow.handle->drv_flow));
+ flow.handle->drv_flow = NULL;
+ }
+ claim_zero(flow_dv_matcher_release(dev, flow.handle));
+ if (err != 0)
+ break;
+ ret = vprio[i];
+ }
+ mlx5_ipool_free(pool, flow.handle_idx);
+ /* Set rte_errno if no expected priority value matched. */
+ if (ret < 0)
+ rte_errno = -ret;
+ return ret;
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.validate = flow_dv_validate,
.prepare = flow_dv_prepare,
.action_update = flow_dv_action_update,
.action_query = flow_dv_action_query,
.sync_domain = flow_dv_sync_domain,
+ .discover_priorities = flow_dv_discover_priorities,
+ .item_create = flow_dv_item_create,
+ .item_release = flow_dv_item_release,
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */