/** PMD tunnel related context */
struct mlx5_flow_tunnel_hub {
+ /* Tunnels list
+ * Access to the list MUST be MT protected
+ */
LIST_HEAD(, mlx5_flow_tunnel) tunnels;
- rte_spinlock_t sl; /* Tunnel list spinlock. */
+ /* protect access to the tunnels list */
+ rte_spinlock_t sl;
struct mlx5_hlist *groups; /** non tunnel groups */
};
static inline bool
is_tunnel_offload_active(struct rte_eth_dev *dev)
{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_priv *priv = dev->data->dev_private;
return !!priv->config.dv_miss_info;
+#else
+ RTE_SET_USED(dev);
+ return false;
+#endif
}
static inline bool
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
- struct flow_grp_info flags,
- struct rte_flow_error *error);
+ const struct flow_grp_info *flags,
+ struct rte_flow_error *error);
uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
int tunnel, uint64_t layer_types,
uint64_t hash_fields);