MLX5_FLOW_FATE_PORT_ID,
MLX5_FLOW_FATE_DROP,
MLX5_FLOW_FATE_DEFAULT_MISS,
+ MLX5_FLOW_FATE_SHARED_RSS,
MLX5_FLOW_FATE_MAX,
};
/**< Generic value indicates the fate action. */
uint32_t rix_default_fate;
/**< Indicates default miss fate action. */
+ uint32_t rix_srss;
+ /**< Indicates shared RSS fate action. */
};
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_handle_dv dvh;
#define MLX5_NUM_MAX_DEV_FLOWS 32
/** Device flow structure. */
+__extension__
struct mlx5_flow {
struct rte_flow *flow; /**< Pointer to the main flow. */
uint32_t flow_idx; /**< The memory pool index to the main flow. */
uint64_t act_flags;
/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
bool external; /**< true if the flow is created external to PMD. */
- uint8_t ingress; /**< 1 if the flow is ingress. */
+ uint8_t ingress:1; /**< 1 if the flow is ingress. */
+ uint8_t skip_scale:1;
+ /**< 1 if skip the scale the table with factor. */
union {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_dv_workspace dv;
/** PMD tunnel related context */
struct mlx5_flow_tunnel_hub {
+ /* Tunnels list
+ * Access to the list MUST be MT protected
+ */
LIST_HEAD(, mlx5_flow_tunnel) tunnels;
- rte_spinlock_t sl; /* Tunnel list spinlock. */
+ /* protect access to the tunnels list */
+ rte_spinlock_t sl;
struct mlx5_hlist *groups; /** non tunnel groups */
};
static inline bool
is_tunnel_offload_active(struct rte_eth_dev *dev)
{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_priv *priv = dev->data->dev_private;
return !!priv->config.dv_miss_info;
+#else
+ RTE_SET_USED(dev);
+ return false;
+#endif
}
static inline bool
/* Flow structure. */
struct rte_flow {
ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
- uint32_t shared_rss; /** < Shared RSS action ID. */
uint32_t dev_handles;
/**< Device flow handles that are part of the flow. */
uint32_t drv_type:2; /**< Driver type. */
/* Thread specific flow workspace intermediate data. */
struct mlx5_flow_workspace {
+ /* If creating another flow in same thread, push new as stack. */
+ struct mlx5_flow_workspace *prev;
+ struct mlx5_flow_workspace *next;
+ uint32_t inuse; /* can't create new flow with current. */
struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
- struct mlx5_flow_rss_desc rss_desc[2];
- uint32_t rssq_num[2]; /* Allocated queue num in rss_desc. */
- int flow_idx; /* Intermediate device flow index. */
- int flow_nested_idx; /* Intermediate device flow index, nested. */
+ struct mlx5_flow_rss_desc rss_desc;
+ uint32_t rssq_num; /* Allocated queue num in rss_desc. */
+ uint32_t flow_idx; /* Intermediate device flow index. */
+};
+
+struct mlx5_flow_split_info {
+ bool external;
+ /**< True if flow is created by request external to PMD. */
+ uint8_t skip_scale; /**< Skip the scale the table with factor. */
+ uint32_t flow_idx; /**< This memory pool index to the flow. */
+ uint32_t prefix_mark; /**< Prefix subflow mark flag. */
+ uint64_t prefix_layers; /**< Prefix subflow layers. */
};
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
uint64_t fdb_def_rule:1;
/* force standard group translation */
uint64_t std_tbl_fix:1;
+ uint64_t skip_scale:1;
};
static inline bool
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
- struct flow_grp_info flags,
- struct rte_flow_error *error);
+ const struct flow_grp_info *flags,
+ struct rte_flow_error *error);
uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
int tunnel, uint64_t layer_types,
uint64_t hash_fields);