return ct;
}
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ return RTE_ETHER_TYPE_TEB;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ return RTE_ETHER_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ return RTE_ETHER_TYPE_IPV6;
+ else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+ return RTE_ETHER_TYPE_MPLS;
+ return 0;
+}
+
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
uint32_t rix_jump);
-static inline uint16_t
-mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
-{
- if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
- return RTE_ETHER_TYPE_TEB;
- else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
- return RTE_ETHER_TYPE_IPV4;
- else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
- return RTE_ETHER_TYPE_IPV6;
- else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
- return RTE_ETHER_TYPE_MPLS;
- return 0;
-}
-
static int16_t
flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
{
.size = size,
};
#else
+ static const struct rte_flow_item_gre empty_gre = {0,};
const struct rte_flow_item_gre *spec = item->spec;
const struct rte_flow_item_gre *mask = item->mask;
unsigned int size = sizeof(struct ibv_flow_spec_gre);
.size = size,
};
- if (!mask)
- mask = &rte_flow_item_gre_mask;
- if (spec) {
- tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
- tunnel.val.protocol = spec->protocol;
- tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
- tunnel.mask.protocol = mask->protocol;
- /* Remove unwanted bits from values. */
- tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+ if (!spec) {
+ spec = &empty_gre;
+ mask = &empty_gre;
+ } else {
+ if (!mask)
+ mask = &rte_flow_item_gre_mask;
+ }
+ tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+ tunnel.val.protocol = spec->protocol;
+ tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+ tunnel.mask.protocol = mask->protocol;
+ /* Remove unwanted bits from values. */
+ tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+ tunnel.val.key &= tunnel.mask.key;
+ if (tunnel.mask.protocol) {
tunnel.val.protocol &= tunnel.mask.protocol;
- tunnel.val.key &= tunnel.mask.key;
+ } else {
+ tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
+ if (tunnel.val.protocol) {
+ tunnel.mask.protocol = 0xFFFF;
+ tunnel.val.protocol =
+ rte_cpu_to_be_16(tunnel.val.protocol);
+ }
}
#endif
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- flow_verbs_translate_item_gre(dev_flow, items,
- item_flags);
subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
item_flags |= MLX5_FLOW_LAYER_GRE;
break;
NULL, "item not supported");
}
}
+ if (item_flags & MLX5_FLOW_LAYER_GRE)
+ flow_verbs_translate_item_gre(dev_flow, items, item_flags);
dev_flow->handle->layers = item_flags;
/* Other members of attr will be ignored. */
dev_flow->verbs.attr.priority =