return false;
}
+/**
+ * Network Service Header (NSH) and its next protocol values
+ * are described in RFC-8393.
+ */
+static enum rte_flow_item_type
+mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
+{
+ enum rte_flow_item_type type;
+
+ switch (proto_mask & proto_spec) {
+ case RTE_VXLAN_GPE_TYPE_IPV4:
+ type = RTE_FLOW_ITEM_TYPE_IPV4;
+ break;
+ case RTE_VXLAN_GPE_TYPE_IPV6:
+ type = RTE_VXLAN_GPE_TYPE_IPV6;
+ break;
+ case RTE_VXLAN_GPE_TYPE_ETH:
+ type = RTE_FLOW_ITEM_TYPE_ETH;
+ break;
+ default:
+ type = RTE_FLOW_ITEM_TYPE_END;
+ }
+ return type;
+}
+
+static enum rte_flow_item_type
+mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
+{
+ enum rte_flow_item_type type;
+
+ switch (proto_mask & proto_spec) {
+ case IPPROTO_UDP:
+ type = RTE_FLOW_ITEM_TYPE_UDP;
+ break;
+ case IPPROTO_TCP:
+ type = RTE_FLOW_ITEM_TYPE_TCP;
+ break;
+ case IPPROTO_IP:
+ type = RTE_FLOW_ITEM_TYPE_IPV4;
+ break;
+ case IPPROTO_IPV6:
+ type = RTE_FLOW_ITEM_TYPE_IPV6;
+ break;
+ default:
+ type = RTE_FLOW_ITEM_TYPE_END;
+ }
+ return type;
+}
+
+static enum rte_flow_item_type
+mlx5_ethertype_to_item_type(rte_be16_t type_spec,
+ rte_be16_t type_mask, bool is_tunnel)
+{
+ enum rte_flow_item_type type;
+
+ switch (rte_be_to_cpu_16(type_spec & type_mask)) {
+ case RTE_ETHER_TYPE_TEB:
+ type = is_tunnel ?
+ RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END;
+ break;
+ case RTE_ETHER_TYPE_VLAN:
+ type = !is_tunnel ?
+ RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END;
+ break;
+ case RTE_ETHER_TYPE_IPV4:
+ type = RTE_FLOW_ITEM_TYPE_IPV4;
+ break;
+ case RTE_ETHER_TYPE_IPV6:
+ type = RTE_FLOW_ITEM_TYPE_IPV6;
+ break;
+ default:
+ type = RTE_FLOW_ITEM_TYPE_END;
+ }
+ return type;
+}
+
static enum rte_flow_item_type
mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
{
- enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
- uint16_t ether_type = 0;
- uint16_t ether_type_m;
- uint8_t ip_next_proto = 0;
- uint8_t ip_next_proto_m;
+#define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \
+ do { \
+ const void *m = item->mask; \
+ const void *s = item->spec; \
+ mask = m ? \
+ ((const struct rte_flow_item_##type *)m)->fld : \
+ rte_flow_item_##type##_mask.fld; \
+ spec = ((const struct rte_flow_item_##type *)s)->fld; \
+ } while (0)
+
+ enum rte_flow_item_type ret;
+ uint16_t spec, mask;
if (item == NULL || item->spec == NULL)
- return ret;
+ return RTE_FLOW_ITEM_TYPE_VOID;
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- if (item->mask)
- ether_type_m = ((const struct rte_flow_item_eth *)
- (item->mask))->type;
- else
- ether_type_m = rte_flow_item_eth_mask.type;
- if (ether_type_m != RTE_BE16(0xFFFF))
- break;
- ether_type = ((const struct rte_flow_item_eth *)
- (item->spec))->type;
- if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
- ret = RTE_FLOW_ITEM_TYPE_VLAN;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(eth, type);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_ethertype_to_item_type(spec, mask, false);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- if (item->mask)
- ether_type_m = ((const struct rte_flow_item_vlan *)
- (item->mask))->inner_type;
- else
- ether_type_m = rte_flow_item_vlan_mask.inner_type;
- if (ether_type_m != RTE_BE16(0xFFFF))
- break;
- ether_type = ((const struct rte_flow_item_vlan *)
- (item->spec))->inner_type;
- if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
- ret = RTE_FLOW_ITEM_TYPE_VLAN;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_ethertype_to_item_type(spec, mask, false);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- if (item->mask)
- ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
- (item->mask))->hdr.next_proto_id;
- else
- ip_next_proto_m =
- rte_flow_item_ipv4_mask.hdr.next_proto_id;
- if (ip_next_proto_m != 0xFF)
- break;
- ip_next_proto = ((const struct rte_flow_item_ipv4 *)
- (item->spec))->hdr.next_proto_id;
- if (ip_next_proto == IPPROTO_UDP)
- ret = RTE_FLOW_ITEM_TYPE_UDP;
- else if (ip_next_proto == IPPROTO_TCP)
- ret = RTE_FLOW_ITEM_TYPE_TCP;
- else if (ip_next_proto == IPPROTO_IP)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (ip_next_proto == IPPROTO_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_inet_proto_to_item_type(spec, mask);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- if (item->mask)
- ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
- (item->mask))->hdr.proto;
- else
- ip_next_proto_m =
- rte_flow_item_ipv6_mask.hdr.proto;
- if (ip_next_proto_m != 0xFF)
- break;
- ip_next_proto = ((const struct rte_flow_item_ipv6 *)
- (item->spec))->hdr.proto;
- if (ip_next_proto == IPPROTO_UDP)
- ret = RTE_FLOW_ITEM_TYPE_UDP;
- else if (ip_next_proto == IPPROTO_TCP)
- ret = RTE_FLOW_ITEM_TYPE_TCP;
- else if (ip_next_proto == IPPROTO_IP)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (ip_next_proto == IPPROTO_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_inet_proto_to_item_type(spec, mask);
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
- ether_type_m = item->mask ?
- ((const struct rte_flow_item_geneve *)
- (item->mask))->protocol :
- rte_flow_item_geneve_mask.protocol;
- ether_type = ((const struct rte_flow_item_geneve *)
- (item->spec))->protocol;
- ether_type_m = rte_be_to_cpu_16(ether_type_m);
- ether_type = rte_be_to_cpu_16(ether_type);
- switch (ether_type_m & ether_type) {
- case RTE_ETHER_TYPE_TEB:
- ret = RTE_FLOW_ITEM_TYPE_ETH;
- break;
- case RTE_ETHER_TYPE_IPV4:
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- break;
- case RTE_ETHER_TYPE_IPV6:
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- break;
- default:
- ret = RTE_FLOW_ITEM_TYPE_END;
- }
+ MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol);
+ ret = mlx5_ethertype_to_item_type(spec, mask, true);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ MLX5_XSET_ITEM_MASK_SPEC(gre, protocol);
+ ret = mlx5_ethertype_to_item_type(spec, mask, true);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol);
+ ret = mlx5_nsh_proto_to_item_type(spec, mask);
break;
default:
ret = RTE_FLOW_ITEM_TYPE_VOID;
break;
}
return ret;
+#undef MLX5_XSET_ITEM_MASK_SPEC
}
static const int *
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
* this must be always enabled (metadata may arive
* from other port - not from local flows only.
*/
- if (priv->config.dv_flow_en &&
- priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
- mlx5_flow_ext_mreg_supported(dev)) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n = 1;
- } else if (mark) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n++;
- }
if (tunnel) {
unsigned int j;
}
}
+static void
+flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (priv->mark_enabled)
+ return;
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+ rxq_ctrl->rxq.mark = 1;
+ }
+ priv->mark_enabled = 1;
+}
+
/**
* Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
*
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ MLX5_ASSERT(wks);
+ if (wks->mark)
+ flow_rxq_mark_flag_set(dev);
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
flow_drv_rxq_flags_set(dev, dev_handle);
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
MLX5_ASSERT(rxq_ctrl != NULL);
if (rxq_ctrl == NULL)
continue;
- if (priv->config.dv_flow_en &&
- priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
- mlx5_flow_ext_mreg_supported(dev)) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n = 1;
- } else if (mark) {
- rxq_ctrl->flow_mark_n--;
- rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
- }
if (tunnel) {
unsigned int j;
if (rxq == NULL || rxq->ctrl == NULL)
continue;
- rxq->ctrl->flow_mark_n = 0;
rxq->ctrl->rxq.mark = 0;
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
rxq->ctrl->flow_tunnels_n[j] = 0;
rxq->ctrl->rxq.tunnel = 0;
}
+ priv->mark_enabled = 0;
}
/**
struct rte_flow_error *error)
{
struct mlx5_flow *dev_flow;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
flow_split_info->flow_idx, error);
*/
if (flow_split_info->prefix_layers)
dev_flow->handle->layers = flow_split_info->prefix_layers;
- if (flow_split_info->prefix_mark)
- dev_flow->handle->mark = 1;
+ if (flow_split_info->prefix_mark) {
+ MLX5_ASSERT(wks);
+ wks->mark = 1;
+ }
if (sub_flow)
*sub_flow = dev_flow;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
MLX5_FLOW_TABLE_LEVEL_METER;
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
- flow_split_info->prefix_mark |= dev_flow->handle->mark;
+ flow_split_info->prefix_mark |= wks->mark;
flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
}
/* Add the prefix subflow. */
struct mlx5_flow_dv_sample_resource *sample_res;
struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
struct mlx5_flow_tbl_resource *sfx_tbl;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
#endif
size_t act_size;
size_t item_size;
}
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
- flow_split_info->prefix_mark |= dev_flow->handle->mark;
+ MLX5_ASSERT(wks);
+ flow_split_info->prefix_mark |= wks->mark;
/* Suffix group level already be scaled with factor, set
* MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
* again in translation.