case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ case RTE_FLOW_ITEM_TYPE_GTP:
return true;
default:
break;
MLX5_EXPANSION_IPV6_UDP,
MLX5_EXPANSION_IPV6_TCP,
MLX5_EXPANSION_IPV6_FRAG_EXT,
+ MLX5_EXPANSION_GTP
};
/** Supported expansion of items. */
[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
- MLX5_EXPANSION_MPLS),
+ MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_GTP),
.type = RTE_FLOW_ITEM_TYPE_UDP,
.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
},
[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_VXLAN_GPE,
- MLX5_EXPANSION_MPLS),
+ MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_GTP),
.type = RTE_FLOW_ITEM_TYPE_UDP,
.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
},
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_ETH),
.type = RTE_FLOW_ITEM_TYPE_MPLS,
+ .optional = 1,
},
[MLX5_EXPANSION_ETH] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
},
+ [MLX5_EXPANSION_GTP] = {
+ .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_GTP
+ }
};
static struct rte_flow_action_handle *
data->dynf_meta = 1;
data->flow_meta_mask = rte_flow_dynf_metadata_mask;
data->flow_meta_offset = rte_flow_dynf_metadata_offs;
- data->flow_meta_port_mask = (uint32_t)~0;
- if (priv->config.dv_xmeta_en == MLX5_XMETA_MODE_META16)
- data->flow_meta_port_mask >>= 16;
+ data->flow_meta_port_mask = priv->sh->dv_meta_mask;
}
}
}
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L2 layer should not follow VLAN");
+ if (item_flags & MLX5_FLOW_LAYER_GTP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L2 layer should not follow GTP");
if (!mask)
mask = &rte_flow_item_eth_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0};
uint32_t i;
- /**
+ /*
* This is a tmp dev_flow,
* no need to register any matcher for it in translate.
*/
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
struct mlx5_flow dev_flow = {0};
struct mlx5_flow_handle dev_handle = { {0} };
+ uint8_t fate = final_policy->act_cnt[i].fate_action;
- if (final_policy->is_rss) {
+ if (fate == MLX5_FLOW_FATE_SHARED_RSS) {
const void *rss_act =
final_policy->act_cnt[i].rss->conf;
struct rte_flow_action rss_actions[2] = {
[0] = {
.type = RTE_FLOW_ACTION_TYPE_RSS,
- .conf = rss_act
+ .conf = rss_act,
},
[1] = {
.type = RTE_FLOW_ACTION_TYPE_END,
- .conf = NULL
+ .conf = NULL,
}
};
rss_desc_v[i].hash_fields ?
rss_desc_v[i].queue_num : 1;
rss_desc_v[i].tunnel =
- !!(dev_flow.handle->layers &
- MLX5_FLOW_LAYER_TUNNEL);
- } else {
+ !!(dev_flow.handle->layers &
+ MLX5_FLOW_LAYER_TUNNEL);
+ rss_desc[i] = &rss_desc_v[i];
+ } else if (fate == MLX5_FLOW_FATE_QUEUE) {
/* This is queue action. */
rss_desc_v[i] = wks->rss_desc;
rss_desc_v[i].key_len = 0;
rss_desc_v[i].queue =
&final_policy->act_cnt[i].queue;
rss_desc_v[i].queue_num = 1;
+ rss_desc[i] = &rss_desc_v[i];
+ } else {
+ rss_desc[i] = NULL;
}
- rss_desc[i] = &rss_desc_v[i];
}
sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
flow, policy, rss_desc);
} else {
enum mlx5_meter_domain mtr_domain =
attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
- attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
- MLX5_MTR_DOMAIN_INGRESS;
+ (attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+ MLX5_MTR_DOMAIN_INGRESS);
sub_policy = policy->sub_policys[mtr_domain][0];
}
- if (!sub_policy) {
+ if (!sub_policy)
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to get meter sub-policy.");
- goto exit;
- }
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to get meter sub-policy.");
exit:
return sub_policy;
}
} else {
enum mlx5_meter_domain mtr_domain =
attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
- attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
- MLX5_MTR_DOMAIN_INGRESS;
+ (attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+ MLX5_MTR_DOMAIN_INGRESS);
sub_policy =
&priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy;
actions_pre++;
if (!tag_action)
return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "No tag action space.");
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "No tag action space.");
if (!mtr_flow_id) {
tag_action->type = RTE_FLOW_ACTION_TYPE_VOID;
goto exit;
int *modify_after_mirror)
{
const struct rte_flow_action_sample *sample;
+ const struct rte_flow_action_raw_decap *decap;
int actions_n = 0;
uint32_t ratio = 0;
int sub_type = 0;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
- case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
case RTE_FLOW_ACTION_TYPE_METER:
if (fdb_mirror)
*modify_after_mirror = 1;
break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap = actions->conf;
+ while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
+ actions_n++;
+ if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ const struct rte_flow_action_raw_encap *encap =
+ actions->conf;
+ if (decap->size <=
+ MLX5_ENCAPSULATION_DECISION_SIZE &&
+ encap->size >
+ MLX5_ENCAPSULATION_DECISION_SIZE)
+ /* L3 encap. */
+ break;
+ }
+ if (fdb_mirror)
+ *modify_after_mirror = 1;
+ break;
default:
break;
}
struct rte_flow_attr *attr,
bool *is_rss,
uint8_t *domain_bitmap,
- bool *is_def_policy,
+ uint8_t *policy_mode,
struct rte_mtr_error *error)
{
const struct mlx5_flow_driver_ops *fops;
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->validate_mtr_acts(dev, actions, attr,
- is_rss, domain_bitmap, is_def_policy, error);
+ return fops->validate_mtr_acts(dev, actions, attr, is_rss,
+ domain_bitmap, policy_mode, error);
}
/**