flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
uint32_t rix_jump);
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ return RTE_ETHER_TYPE_TEB;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ return RTE_ETHER_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ return RTE_ETHER_TYPE_IPV6;
+ else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+ return RTE_ETHER_TYPE_MPLS;
+ return 0;
+}
+
static int16_t
flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
{
(32 - width)) << off);
} else {
if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 4,
+ info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_DMAC_15_0};
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DMAC_47_16};
(32 - width)) << off);
} else {
if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 4,
+ info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_SMAC_15_0};
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SMAC_47_16};
mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 12,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_31_0};
if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 8,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_63_32};
if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 4,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 12,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_31_0};
if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 8,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_63_32};
if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 4,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
* Pointer to rte_eth_dev structure.
* @param[in] action_flags
* Bit-fields that holds the actions detected until now.
+ * @param[in] item_flags
+ * Holds the items detected.
* @param[in] action
* Pointer to the meter action.
* @param[in] attr
*/
static int
mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
- uint64_t action_flags,
+ uint64_t action_flags, uint64_t item_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
const struct rte_flow_item *port_id_item,
NULL,
"Flow and meter policy "
"have different src port.");
+ } else if (mtr_policy->is_rss) {
+ struct mlx5_flow_meter_policy *fp;
+ struct mlx5_meter_policy_action_container *acg;
+ struct mlx5_meter_policy_action_container *acy;
+ const struct rte_flow_action *rss_act;
+ int ret;
+
+ fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
+ mtr_policy);
+ if (fp == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Unable to get the final "
+ "policy in the hierarchy");
+ acg = &fp->act_cnt[RTE_COLOR_GREEN];
+ acy = &fp->act_cnt[RTE_COLOR_YELLOW];
+ MLX5_ASSERT(acg->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS ||
+ acy->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS);
+ if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
+ rss_act = acg->rss;
+ else
+ rss_act = acy->rss;
+ ret = mlx5_flow_validate_action_rss(rss_act,
+ action_flags, dev, attr,
+ item_flags, error);
+ if (ret)
+ return ret;
}
*def_policy = false;
}
case RTE_FLOW_ACTION_TYPE_METER:
ret = mlx5_flow_validate_action_meter(dev,
action_flags,
+ item_flags,
actions, attr,
port_id_item,
&def_policy,
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] inner
- * Item is inner pattern.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
*/
static void
flow_dv_translate_item_gre(void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ uint64_t pattern_flags)
{
+ static const struct rte_flow_item_gre empty_gre = {0,};
const struct rte_flow_item_gre *gre_m = item->mask;
const struct rte_flow_item_gre *gre_v = item->spec;
- void *headers_m;
- void *headers_v;
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
struct {
uint16_t value;
};
} gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
+ uint16_t protocol_m, protocol_v;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
- if (!gre_v)
- return;
- if (!gre_m)
- gre_m = &rte_flow_item_gre_mask;
- MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
- rte_be_to_cpu_16(gre_m->protocol));
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
- rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
+ if (!gre_v) {
+ gre_v = &empty_gre;
+ gre_m = &empty_gre;
+ } else {
+ if (!gre_m)
+ gre_m = &rte_flow_item_gre_mask;
+ }
gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
gre_crks_rsvd0_ver_v.s_present &
gre_crks_rsvd0_ver_m.s_present);
+ protocol_m = rte_be_to_cpu_16(gre_m->protocol);
+ protocol_v = rte_be_to_cpu_16(gre_v->protocol);
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ if (protocol_v)
+ protocol_m = 0xFFFF;
+ }
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ protocol_m & protocol_v);
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] inner
- * Item is inner pattern.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
*/
static void
flow_dv_translate_item_nvgre(void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ unsigned long pattern_flags)
{
const struct rte_flow_item_nvgre *nvgre_m = item->mask;
const struct rte_flow_item_nvgre *nvgre_v = item->spec;
.mask = &gre_mask,
.last = NULL,
};
- flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
+ flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
if (!nvgre_v)
return;
if (!nvgre_m)
m_protocol = vxlan_m->protocol;
v_protocol = vxlan_v->protocol;
if (!m_protocol) {
- m_protocol = 0xff;
/* Force next protocol to ensure next headers parsing. */
if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
+ if (v_protocol)
+ m_protocol = 0xFF;
}
MLX5_SET(fte_match_set_misc3, misc_m,
outer_vxlan_gpe_next_protocol, m_protocol);
static void
flow_dv_translate_item_geneve(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item,
+ uint64_t pattern_flags)
{
+ static const struct rte_flow_item_geneve empty_geneve = {0,};
const struct rte_flow_item_geneve *geneve_m = item->mask;
const struct rte_flow_item_geneve *geneve_v = item->spec;
- void *headers_m;
- void *headers_v;
+ /* GENEVE flow item validation allows single tunnel item */
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- uint16_t dport;
uint16_t gbhdr_m;
uint16_t gbhdr_v;
- char *vni_m;
- char *vni_v;
- size_t size, i;
+ char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+ char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+ size_t size = sizeof(geneve_m->vni), i;
+ uint16_t protocol_m, protocol_v;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
- dport = MLX5_UDP_PORT_GENEVE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_GENEVE);
+ }
+ if (!geneve_v) {
+ geneve_v = &empty_geneve;
+ geneve_m = &empty_geneve;
+ } else {
+ if (!geneve_m)
+ geneve_m = &rte_flow_item_geneve_mask;
}
- if (!geneve_v)
- return;
- if (!geneve_m)
- geneve_m = &rte_flow_item_geneve_mask;
- size = sizeof(geneve_m->vni);
- vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
- vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
memcpy(vni_m, geneve_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & geneve_v->vni[i];
- MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
- rte_be_to_cpu_16(geneve_m->protocol));
- MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
- rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+ protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
+ protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ if (protocol_v)
+ protocol_m = 0xFFFF;
+ }
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+ protocol_m & protocol_v);
}
/**
switch (prev_layer) {
case MLX5_FLOW_LAYER_OUTER_L4_UDP:
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
- MLX5_UDP_PORT_MPLS);
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_MPLS);
+ }
break;
case MLX5_FLOW_LAYER_GRE:
/* Fall-through. */
case MLX5_FLOW_LAYER_GRE_KEY:
- MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
- RTE_ETHER_TYPE_MPLS);
+ if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ RTE_ETHER_TYPE_MPLS);
+ }
break;
default:
break;
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- flow_dv_translate_item_gre(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
flow_dv_translate_item_gre_key(match_mask,
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- flow_dv_translate_item_nvgre(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(dev, attr,
tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
- flow_dv_translate_item_geneve(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GENEVE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
+ flow_dv_translate_item_geneve(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GRE) {
+ if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
+ flow_dv_translate_item_gre(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+ flow_dv_translate_item_nvgre(match_mask, match_value,
+ tunnel_item, item_flags);
+ else
+ MLX5_ASSERT(false);
+ }
#ifdef RTE_LIBRTE_MLX5_DEBUG
MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
size_t i;
int err;
- if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
+ if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
+ !!dev->data->dev_started)) {
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot setup indirection table");
error_hrxq_new:
err = rte_errno;
__flow_dv_action_rss_hrxqs_release(dev, shared_rss);
- if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
+ if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
shared_rss->ind_tbl = NULL;
rte_errno = err;
return -rte_errno;
NULL,
"shared rss hrxq has references");
queue = shared_rss->ind_tbl->queues;
- remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
+ remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
+ !!dev->data->dev_started);
if (remaining)
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
void *queue = NULL;
uint16_t *queue_old = NULL;
uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
+ bool dev_started = !!dev->data->dev_started;
if (!shared_rss)
return rte_flow_error_set(error, EINVAL,
rte_spinlock_lock(&shared_rss->action_rss_sl);
queue_old = shared_rss->ind_tbl->queues;
ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
- queue, action_conf->queue_num, true);
+ queue, action_conf->queue_num,
+ true /* standalone */,
+ dev_started /* ref_new_qs */,
+ dev_started /* deref_old_qs */);
if (ret) {
mlx5_free(queue);
ret = rte_flow_error_set(error, rte_errno,