MLX5_RTE_FLOW_ACTION_TYPE_MARK,
MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
- MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS,
MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+ MLX5_RTE_FLOW_ACTION_TYPE_AGE,
+};
+
+#define MLX5_SHARED_ACTION_TYPE_OFFSET 30
+
+enum {
+ MLX5_SHARED_ACTION_TYPE_RSS,
+ MLX5_SHARED_ACTION_TYPE_AGE,
};
/* Matches on selected register. */
MLX5_COPY_MARK,
MLX5_MTR_COLOR,
MLX5_MTR_SFX,
+ MLX5_ASO_FLOW_HIT,
};
/* Default queue number. */
#define MLX5_GENEVE_OPT_LEN_0 14
#define MLX5_GENEVE_OPT_LEN_1 63
-#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
- sizeof(struct rte_flow_item_ipv4))
+#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \
+ sizeof(struct rte_ipv4_hdr))
/* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */
#define MLX5_IPV4_FRAG_OFFSET_MASK \
MLX5_FLOW_FATE_PORT_ID,
MLX5_FLOW_FATE_DROP,
MLX5_FLOW_FATE_DEFAULT_MISS,
+ MLX5_FLOW_FATE_SHARED_RSS,
MLX5_FLOW_FATE_MAX,
};
#define MLX5_ENCAP_MAX_LEN 132
-/* Encap/decap resource key of the hash organization. */
-union mlx5_flow_encap_decap_key {
- struct {
- uint32_t ft_type:8; /**< Flow table type, Rx or Tx. */
- uint32_t refmt_type:8; /**< Header reformat type. */
- uint32_t buf_size:8; /**< Encap buf size. */
- uint32_t table_level:8; /**< Root table or not. */
- uint32_t cksum; /**< Encap buf check sum. */
- };
- uint64_t v64; /**< full 64bits value of key */
-};
-
/* Encap/decap resource structure. */
struct mlx5_flow_dv_encap_decap_resource {
struct mlx5_hlist_entry entry;
/* Sample action resource structure. */
struct mlx5_flow_dv_sample_resource {
- ILIST_ENTRY(uint32_t)next; /**< Pointer to next element. */
- uint32_t refcnt; /**< Reference counter. */
- void *verbs_action; /**< Verbs sample action object. */
+ struct mlx5_cache_entry entry; /**< Cache entry. */
+ union {
+ void *verbs_action; /**< Verbs sample action object. */
+ void **sub_actions; /**< Sample sub-action array. */
+ };
+ struct rte_eth_dev *dev; /**< Device registers the action. */
+ uint32_t idx; /** Sample object index. */
uint8_t ft_type; /** Flow Table Type */
uint32_t ft_id; /** Flow Table Level */
uint32_t ratio; /** Sample Ratio */
/* Destination array action resource structure. */
struct mlx5_flow_dv_dest_array_resource {
- ILIST_ENTRY(uint32_t)next; /**< Pointer to next element. */
- uint32_t refcnt; /**< Reference counter. */
+ struct mlx5_cache_entry entry; /**< Cache entry. */
+ uint32_t idx; /** Destination array action object index. */
uint8_t ft_type; /** Flow Table Type */
uint8_t num_of_dest; /**< Number of destination actions. */
+ struct rte_eth_dev *dev; /**< Device registers the action. */
void *action; /**< Pointer to the rdma core action. */
struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM];
/**< Action index resources. */
/**< Generic value indicates the fate action. */
uint32_t rix_default_fate;
/**< Indicates default miss fate action. */
+ uint32_t rix_srss;
+ /**< Indicates shared RSS fate action. */
};
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_handle_dv dvh;
#define MLX5_NUM_MAX_DEV_FLOWS 32
/** Device flow structure. */
+__extension__
struct mlx5_flow {
struct rte_flow *flow; /**< Pointer to the main flow. */
uint32_t flow_idx; /**< The memory pool index to the main flow. */
uint64_t act_flags;
/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
bool external; /**< true if the flow is created external to PMD. */
- uint8_t ingress; /**< 1 if the flow is ingress. */
+ uint8_t ingress:1; /**< 1 if the flow is ingress. */
+ uint8_t skip_scale:1;
+ /**< 1 if skip the scale the table with factor. */
union {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_dv_workspace dv;
uint32_t ref_cnt; /**< Use count. */
};
-/* Fdir flow structure */
-struct mlx5_fdir_flow {
- LIST_ENTRY(mlx5_fdir_flow) next; /* Pointer to the next element. */
- struct mlx5_fdir *fdir; /* Pointer to fdir. */
- uint32_t rix_flow; /* Index to flow. */
-};
-
#define MLX5_MAX_TUNNELS 256
#define MLX5_TNL_MISS_RULE_PRIORITY 3
#define MLX5_TNL_MISS_FDB_JUMP_GRP 0x1234faac
/** PMD tunnel related context */
struct mlx5_flow_tunnel_hub {
+ /* Tunnels list
+ * Access to the list MUST be MT protected
+ */
LIST_HEAD(, mlx5_flow_tunnel) tunnels;
+ /* protect access to the tunnels list */
+ rte_spinlock_t sl;
struct mlx5_hlist *groups; /** non tunnel groups */
};
static inline bool
is_tunnel_offload_active(struct rte_eth_dev *dev)
{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_priv *priv = dev->data->dev_private;
return !!priv->config.dv_miss_info;
+#else
+ RTE_SET_USED(dev);
+ return false;
+#endif
}
static inline bool
/* Flow structure. */
struct rte_flow {
ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
- struct mlx5_shared_action_rss *shared_rss; /** < Shred RSS action. */
uint32_t dev_handles;
/**< Device flow handles that are part of the flow. */
uint32_t drv_type:2; /**< Driver type. */
- uint32_t fdir:1; /**< Identifier of associated FDIR if any. */
uint32_t tunnel:1;
uint32_t meter:16; /**< Holds flow meter id. */
uint32_t rix_mreg_copy;
/**< Index to metadata register copy table resource. */
uint32_t counter; /**< Holds flow counter. */
uint32_t tunnel_id; /**< Tunnel id */
+ uint32_t age; /**< Holds ASO age bit index. */
} __rte_packed;
/*
MLX5_RSS_HASH_NONE,
};
-#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
-
/* Shared RSS action structure */
struct mlx5_shared_action_rss {
+ ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
+ uint32_t refcnt; /**< Atomically accessed refcnt. */
struct rte_flow_action_rss origin; /**< Original rte RSS action. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint16_t *queue; /**< Queue indices to use. */
};
struct rte_flow_shared_action {
- LIST_ENTRY(rte_flow_shared_action) next;
- /**< Pointer to the next element. */
- uint32_t refcnt; /**< Atomically accessed refcnt. */
- uint64_t type;
- /**< Shared action type (see MLX5_FLOW_ACTION_SHARED_*). */
- union {
- struct mlx5_shared_action_rss rss;
- /**< Shared RSS action. */
- };
+ uint32_t id;
};
/* Thread specific flow workspace intermediate data. */
struct mlx5_flow_workspace {
+ /* If creating another flow in same thread, push new as stack. */
+ struct mlx5_flow_workspace *prev;
+ struct mlx5_flow_workspace *next;
+ uint32_t inuse; /* can't create new flow with current. */
struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
- struct mlx5_flow_rss_desc rss_desc[2];
- uint32_t rssq_num[2]; /* Allocated queue num in rss_desc. */
- int flow_idx; /* Intermediate device flow index. */
- int flow_nested_idx; /* Intermediate device flow index, nested. */
+ struct mlx5_flow_rss_desc rss_desc;
+ uint32_t rssq_num; /* Allocated queue num in rss_desc. */
+ uint32_t flow_idx; /* Intermediate device flow index. */
+};
+
+struct mlx5_flow_split_info {
+ bool external;
+ /**< True if flow is created by request external to PMD. */
+ uint8_t skip_scale; /**< Skip the scale the table with factor. */
+ uint32_t flow_idx; /**< This memory pool index to the flow. */
+ uint32_t prefix_mark; /**< Prefix subflow mark flag. */
+ uint64_t prefix_layers; /**< Prefix subflow layers. */
};
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
struct rte_flow_shared_action *action,
const void *action_conf,
struct rte_flow_error *error);
+typedef int (*mlx5_flow_action_query_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action *action,
+ void *data,
+ struct rte_flow_error *error);
typedef int (*mlx5_flow_sync_domain_t)
(struct rte_eth_dev *dev,
uint32_t domains,
uint32_t flags);
+
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
mlx5_flow_prepare_t prepare;
mlx5_flow_action_create_t action_create;
mlx5_flow_action_destroy_t action_destroy;
mlx5_flow_action_update_t action_update;
+ mlx5_flow_action_query_t action_query;
mlx5_flow_sync_domain_t sync_domain;
};
uint64_t fdb_def_rule:1;
/* force standard group translation */
uint64_t std_tbl_fix:1;
+ uint64_t skip_scale:1;
};
static inline bool
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
- struct flow_grp_info flags,
- struct rte_flow_error *error);
+ const struct flow_grp_info *flags,
+ struct rte_flow_error *error);
uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
int tunnel, uint64_t layer_types,
uint64_t hash_fields);
int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
struct rte_mtr_error *error);
int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
-struct rte_flow_shared_action *mlx5_flow_get_shared_rss(struct rte_flow *flow);
int mlx5_shared_action_flush(struct rte_eth_dev *dev);
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
void flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry);
+int flow_dv_sample_match_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+struct mlx5_cache_entry *flow_dv_sample_create_cb
+ (struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+void flow_dv_sample_remove_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry);
+
+int flow_dv_dest_array_match_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+struct mlx5_cache_entry *flow_dv_dest_array_create_cb
+ (struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry);
+struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
+ uint32_t age_idx);
#endif /* RTE_PMD_MLX5_FLOW_H_ */