#include <stdint.h>
#include <string.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
-#include <rte_atomic.h>
#include <rte_alarm.h>
#include <rte_mtr.h>
+#include <mlx5_glue.h>
#include <mlx5_prm.h>
#include "mlx5.h"
MLX5_RTE_FLOW_ITEM_TYPE_TAG,
MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
+ MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL,
};
/* Private (internal) rte flow actions. */
MLX5_RTE_FLOW_ACTION_TYPE_TAG,
MLX5_RTE_FLOW_ACTION_TYPE_MARK,
MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
+ MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS,
+ MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
};
/* Matches on selected register. */
MLX5_MTR_SFX,
};
-/* Pattern outer Layer bits. */
+/* Default queue number. */
+#define MLX5_RSSQ_DEFAULT_NUM 16
+
#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
/* Pattern tunnel Layer bits (continued). */
#define MLX5_FLOW_LAYER_GTP (1u << 28)
+/* Pattern eCPRI Layer bit. */
+#define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29)
+
+/* IPv6 Fragment Extension Header bit. */
+#define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
#define MLX5_FLOW_ACTION_METER (1ull << 31)
#define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32)
#define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
+#define MLX5_FLOW_ACTION_AGE (1ull << 34)
+#define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35)
+#define MLX5_FLOW_ACTION_SAMPLE (1ull << 36)
+#define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37)
+#define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
#define MLX5_FLOW_FATE_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
- MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP)
+ MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \
+ MLX5_FLOW_ACTION_DEFAULT_MISS)
#define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
sizeof(struct rte_flow_item_ipv4))
+/* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */
+#define MLX5_IPV4_FRAG_OFFSET_MASK \
+ (RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)
+
+/* Specific item's fields can accept a range of values (using spec and last). */
+#define MLX5_ITEM_RANGE_NOT_ACCEPTED false
+#define MLX5_ITEM_RANGE_ACCEPTED true
+
+/* Software header modify action numbers of a flow. */
+#define MLX5_ACT_NUM_MDF_IPV4 1
+#define MLX5_ACT_NUM_MDF_IPV6 4
+#define MLX5_ACT_NUM_MDF_MAC 2
+#define MLX5_ACT_NUM_MDF_VID 1
+#define MLX5_ACT_NUM_MDF_PORT 2
+#define MLX5_ACT_NUM_MDF_TTL 1
+#define MLX5_ACT_NUM_DEC_TTL MLX5_ACT_NUM_MDF_TTL
+#define MLX5_ACT_NUM_MDF_TCPSEQ 1
+#define MLX5_ACT_NUM_MDF_TCPACK 1
+#define MLX5_ACT_NUM_SET_REG 1
+#define MLX5_ACT_NUM_SET_TAG 1
+#define MLX5_ACT_NUM_CPY_MREG MLX5_ACT_NUM_SET_TAG
+#define MLX5_ACT_NUM_SET_MARK MLX5_ACT_NUM_SET_TAG
+#define MLX5_ACT_NUM_SET_META MLX5_ACT_NUM_SET_TAG
+#define MLX5_ACT_NUM_SET_DSCP 1
+
enum mlx5_flow_drv_type {
MLX5_FLOW_TYPE_MIN,
MLX5_FLOW_TYPE_DV,
MLX5_FLOW_FATE_JUMP,
MLX5_FLOW_FATE_PORT_ID,
MLX5_FLOW_FATE_DROP,
+ MLX5_FLOW_FATE_DEFAULT_MISS,
MLX5_FLOW_FATE_MAX,
};
/* Matcher structure. */
struct mlx5_flow_dv_matcher {
- LIST_ENTRY(mlx5_flow_dv_matcher) next;
- /**< Pointer to the next element. */
+ struct mlx5_cache_entry entry; /**< Pointer to the next element. */
struct mlx5_flow_tbl_resource *tbl;
/**< Pointer to the table(group) the matcher associated with. */
- rte_atomic32_t refcnt; /**< Reference counter. */
void *matcher_object; /**< Pointer to DV matcher */
uint16_t crc; /**< CRC of key. */
uint16_t priority; /**< Priority of matcher. */
#define MLX5_ENCAP_MAX_LEN 132
+/* Encap/decap resource key of the hash organization. */
+union mlx5_flow_encap_decap_key {
+ struct {
+ uint32_t ft_type:8; /**< Flow table type, Rx or Tx. */
+ uint32_t refmt_type:8; /**< Header reformat type. */
+ uint32_t buf_size:8; /**< Encap buf size. */
+ uint32_t table_level:8; /**< Root table or not. */
+ uint32_t cksum; /**< Encap buf check sum. */
+ };
+ uint64_t v64; /**< full 64bits value of key */
+};
+
/* Encap/decap resource structure. */
struct mlx5_flow_dv_encap_decap_resource {
- ILIST_ENTRY(uint32_t)next;
+ struct mlx5_hlist_entry entry;
/* Pointer to next element. */
- rte_atomic32_t refcnt; /**< Reference counter. */
- void *verbs_action;
- /**< Verbs encap/decap action object. */
+ uint32_t refcnt; /**< Reference counter. */
+ void *action;
+ /**< Encap/decap action object. */
uint8_t buf[MLX5_ENCAP_MAX_LEN];
size_t size;
uint8_t reformat_type;
uint8_t ft_type;
uint64_t flags; /**< Flags for RDMA API. */
+ uint32_t idx; /**< Index for the index memory pool. */
};
/* Tag resource structure. */
struct mlx5_hlist_entry entry;
/**< hash list entry for tag resource, tag value as the key. */
void *action;
- /**< Verbs tag action object. */
- rte_atomic32_t refcnt; /**< Reference counter. */
+ /**< Tag action object. */
+ uint32_t refcnt; /**< Reference counter. */
uint32_t idx; /**< Index for the index memory pool. */
};
/*
* Number of modification commands.
- * If extensive metadata registers are supported, the maximal actions amount is
- * 16 and 8 otherwise on root table. The validation could also be done in the
- * lower driver layer.
- * On non-root table, there is no limitation, but 32 is enough right now.
+ * The maximal actions amount in FW is some constant, and it is 16 in the
+ * latest releases. In some old releases, it will be limited to 8.
+ * Since there is no interface to query the capacity, the maximal value should
+ * be used to allow PMD to create the flow. The validation will be done in the
+ * lower driver layer or FW. A failure will be returned if exceeds the maximal
+ * supported actions number on the root table.
+ * On non-root tables, there is no limitation, but 32 is enough right now.
*/
#define MLX5_MAX_MODIFY_NUM 32
#define MLX5_ROOT_TBL_MODIFY_NUM 16
-#define MLX5_ROOT_TBL_MODIFY_NUM_NO_MREG 8
/* Modify resource structure */
struct mlx5_flow_dv_modify_hdr_resource {
- LIST_ENTRY(mlx5_flow_dv_modify_hdr_resource) next;
- /* Pointer to next element. */
- rte_atomic32_t refcnt; /**< Reference counter. */
- struct ibv_flow_action *verbs_action;
- /**< Verbs modify header action object. */
+ struct mlx5_hlist_entry entry;
+ void *action; /**< Modify header action object. */
+ /* Key area for hash list matching: */
uint8_t ft_type; /**< Flow table type, Rx or Tx. */
uint32_t actions_num; /**< Number of modification actions. */
uint64_t flags; /**< Flags for RDMA API. */
/**< Modification actions. */
};
+/* Modify resource key of the hash organization. */
+union mlx5_flow_modify_hdr_key {
+ struct {
+ uint32_t ft_type:8; /**< Flow table type, Rx or Tx. */
+ uint32_t actions_num:5; /**< Number of modification actions. */
+ uint32_t group:19; /**< Flow group id. */
+ uint32_t cksum; /**< Actions check sum. */
+ };
+ uint64_t v64; /**< full 64bits value of key */
+};
+
/* Jump action resource structure. */
struct mlx5_flow_dv_jump_tbl_resource {
- rte_atomic32_t refcnt; /**< Reference counter. */
- uint8_t ft_type; /**< Flow table type, Rx or Tx. */
void *action; /**< Pointer to the rdma core action. */
};
/* Port ID resource structure. */
struct mlx5_flow_dv_port_id_action_resource {
- ILIST_ENTRY(uint32_t)next;
- /* Pointer to next element. */
- rte_atomic32_t refcnt; /**< Reference counter. */
- void *action;
- /**< Verbs tag action object. */
+ struct mlx5_cache_entry entry;
+ void *action; /**< Action object. */
uint32_t port_id; /**< Port ID value. */
+ uint32_t idx; /**< Indexed pool memory index. */
};
/* Push VLAN action resource structure */
struct mlx5_flow_dv_push_vlan_action_resource {
- ILIST_ENTRY(uint32_t)next;
- /* Pointer to next element. */
- rte_atomic32_t refcnt; /**< Reference counter. */
- void *action; /**< Direct verbs action object. */
+ struct mlx5_cache_entry entry; /* Cache entry. */
+ void *action; /**< Action object. */
uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
rte_be32_t vlan_tag; /**< VLAN tag value. */
+ uint32_t idx; /**< Indexed pool memory index. */
};
/* Metadata register copy table entry. */
struct mlx5_hlist_entry hlist_ent;
LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
/* List entry for device flows. */
- uint32_t refcnt; /* Reference counter. */
- uint32_t appcnt; /* Apply/Remove counter. */
- struct rte_flow *flow; /* Built flow for copy. */
+ uint32_t idx;
+ uint32_t rix_flow; /* Built flow for copy. */
+};
+
+/* Table tunnel parameter. */
+struct mlx5_flow_tbl_tunnel_prm {
+ const struct mlx5_flow_tunnel *tunnel;
+ uint32_t group_id;
+ bool external;
};
/* Table data structure of the hash organization. */
/**< hash list entry, 64-bits key inside. */
struct mlx5_flow_tbl_resource tbl;
/**< flow table resource. */
- LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers;
+ struct mlx5_cache_list matchers;
/**< matchers' header associated with the flow table. */
struct mlx5_flow_dv_jump_tbl_resource jump;
/**< jump resource, at most one for each table created. */
uint32_t idx; /**< index for the indexed mempool. */
+ /**< tunnel offload */
+ const struct mlx5_flow_tunnel *tunnel;
+ uint32_t group_id;
+ bool external;
+ bool tunnel_offload; /* Tunnel offlod table or not. */
+ bool is_egress; /**< Egress table. */
+};
+
+/* Sub rdma-core actions list. */
+struct mlx5_flow_sub_actions_list {
+ uint32_t actions_num; /**< Number of sample actions. */
+ uint64_t action_flags;
+ void *dr_queue_action;
+ void *dr_tag_action;
+ void *dr_cnt_action;
+ void *dr_port_id_action;
+ void *dr_encap_action;
+};
+
+/* Sample sub-actions resource list. */
+struct mlx5_flow_sub_actions_idx {
+ uint32_t rix_hrxq; /**< Hash Rx queue object index. */
+ uint32_t rix_tag; /**< Index to the tag action. */
+ uint32_t cnt;
+ uint32_t rix_port_id_action; /**< Index to port ID action resource. */
+ uint32_t rix_encap_decap; /**< Index to encap/decap resource. */
+};
+
+/* Sample action resource structure. */
+struct mlx5_flow_dv_sample_resource {
+ struct mlx5_cache_entry entry; /**< Cache entry. */
+ union {
+ void *verbs_action; /**< Verbs sample action object. */
+ void **sub_actions; /**< Sample sub-action array. */
+ };
+ uint32_t idx; /** Sample object index. */
+ uint8_t ft_type; /** Flow Table Type */
+ uint32_t ft_id; /** Flow Table Level */
+ uint32_t ratio; /** Sample Ratio */
+ uint64_t set_action; /** Restore reg_c0 value */
+ void *normal_path_tbl; /** Flow Table pointer */
+ void *default_miss; /** default_miss dr_action. */
+ struct mlx5_flow_sub_actions_idx sample_idx;
+ /**< Action index resources. */
+ struct mlx5_flow_sub_actions_list sample_act;
+ /**< Action resources. */
+};
+
+#define MLX5_MAX_DEST_NUM 2
+
+/* Destination array action resource structure. */
+struct mlx5_flow_dv_dest_array_resource {
+ struct mlx5_cache_entry entry; /**< Cache entry. */
+ uint32_t idx; /** Destination array action object index. */
+ uint8_t ft_type; /** Flow Table Type */
+ uint8_t num_of_dest; /**< Number of destination actions. */
+ void *action; /**< Pointer to the rdma core action. */
+ struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM];
+ /**< Action index resources. */
+ struct mlx5_flow_sub_actions_list sample_act[MLX5_MAX_DEST_NUM];
+ /**< Action resources. */
};
/* Verbs specification header. */
uint16_t size;
};
-struct mlx5_flow_rss {
- uint32_t level;
- uint32_t queue_num; /**< Number of entries in @p queue. */
- uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
- uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
- uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
-};
+/* PMD flow priority for tunnel */
+#define MLX5_TUNNEL_PRIO_GET(rss_desc) \
+ ((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4)
+
/** Device flow handle structure for DV mode only. */
struct mlx5_flow_handle_dv {
/**< Index to push VLAN action resource in cache. */
uint32_t rix_tag;
/**< Index to the tag action. */
+ uint32_t rix_sample;
+ /**< Index to sample action resource in cache. */
+ uint32_t rix_dest_array;
+ /**< Index to destination array resource in cache. */
} __rte_packed;
/** Device flow handle structure: used both for creating & destroying. */
/**< Index to next device flow handle. */
uint64_t layers;
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
- void *ib_flow; /**< Verbs flow pointer. */
+ void *drv_flow; /**< pointer to driver flow object. */
uint32_t split_flow_id:28; /**< Sub flow unique match flow id. */
uint32_t mark:1; /**< Metadate rxq mark flag. */
uint32_t fate_action:3; /**< Fate action type. */
/**< Index to port ID action resource. */
uint32_t rix_fate;
/**< Generic value indicates the fate action. */
+ uint32_t rix_default_fate;
+ /**< Indicates default miss fate action. */
};
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_handle_dv dvh;
/**< Pointer to the jump action resource. */
struct mlx5_flow_dv_match_params value;
/**< Holds the value that the packet is compared to. */
+ struct mlx5_flow_dv_sample_resource *sample_res;
+ /**< Pointer to the sample action resource. */
+ struct mlx5_flow_dv_dest_array_resource *dest_array_res;
+ /**< Pointer to the destination array resource. */
};
/*
/** Device flow structure. */
struct mlx5_flow {
struct rte_flow *flow; /**< Pointer to the main flow. */
- uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+ uint32_t flow_idx; /**< The memory pool index to the main flow. */
+ uint64_t hash_fields; /**< Hash Rx queue hash fields. */
uint64_t act_flags;
/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
bool external; /**< true if the flow is created external to PMD. */
};
struct mlx5_flow_handle *handle;
uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
+ const struct mlx5_flow_tunnel *tunnel;
};
/* Flow meter state. */
uint32_t idx; /* Index to meter object. */
uint32_t meter_id;
/**< Meter id. */
- struct rte_mtr_params params;
- /**< Meter rule parameters. */
struct mlx5_flow_meter_profile *profile;
/**< Meter profile parameters. */
- struct rte_flow_attr attr;
- /**< Flow attributes. */
+
+ rte_spinlock_t sl; /**< Meter action spinlock. */
+
+ /** Policer actions (per meter output color). */
+ enum rte_mtr_policer_action action[RTE_COLORS];
+
+ /** Set of stats counters to be enabled.
+ * @see enum rte_mtr_stats_type
+ */
+ uint64_t stats_mask;
+
+ /**< Rule applies to ingress traffic. */
+ uint32_t ingress:1;
+
+ /**< Rule applies to egress traffic. */
+ uint32_t egress:1;
+ /**
+ * Instead of simply matching the properties of traffic as it would
+ * appear on a given DPDK port ID, enabling this attribute transfers
+ * a flow rule to the lowest possible level of any device endpoints
+ * found in the pattern.
+ *
+ * When supported, this effectively enables an application to
+ * re-route traffic not necessarily intended for it (e.g. coming
+ * from or addressed to different physical ports, VFs or
+ * applications) at the device level.
+ *
+ * It complements the behavior of some pattern items such as
+ * RTE_FLOW_ITEM_TYPE_PHY_PORT and is meaningless without them.
+ *
+ * When transferring flow rules, ingress and egress attributes keep
+ * their original meaning, as if processing traffic emitted or
+ * received by the application.
+ */
+ uint32_t transfer:1;
struct mlx5_meter_domains_infos *mfts;
/**< Flow table created for this meter. */
struct mlx5_flow_policer_stats policer_stats;
uint32_t ref_cnt; /**< Use count. */
};
+/* Fdir flow structure */
+struct mlx5_fdir_flow {
+ LIST_ENTRY(mlx5_fdir_flow) next; /* Pointer to the next element. */
+ struct mlx5_fdir *fdir; /* Pointer to fdir. */
+ uint32_t rix_flow; /* Index to flow. */
+};
+
+#define MLX5_MAX_TUNNELS 256
+#define MLX5_TNL_MISS_RULE_PRIORITY 3
+#define MLX5_TNL_MISS_FDB_JUMP_GRP 0x1234faac
+
+/*
+ * When tunnel offload is active, all JUMP group ids are converted
+ * using the same method. That conversion is applied both to tunnel and
+ * regular rule types.
+ * Group ids used in tunnel rules are relative to it's tunnel (!).
+ * Application can create number of steer rules, using the same
+ * tunnel, with different group id in each rule.
+ * Each tunnel stores its groups internally in PMD tunnel object.
+ * Groups used in regular rules do not belong to any tunnel and are stored
+ * in tunnel hub.
+ */
+
+struct mlx5_flow_tunnel {
+ LIST_ENTRY(mlx5_flow_tunnel) chain;
+ struct rte_flow_tunnel app_tunnel; /** app tunnel copy */
+ uint32_t tunnel_id; /** unique tunnel ID */
+ uint32_t refctn;
+ struct rte_flow_action action;
+ struct rte_flow_item item;
+ struct mlx5_hlist *groups; /** tunnel groups */
+};
+
+/** PMD tunnel related context */
+struct mlx5_flow_tunnel_hub {
+ LIST_HEAD(, mlx5_flow_tunnel) tunnels;
+ rte_spinlock_t sl; /* Tunnel list spinlock. */
+ struct mlx5_hlist *groups; /** non tunnel groups */
+};
+
+/* convert jump group to flow table ID in tunnel rules */
+struct tunnel_tbl_entry {
+ struct mlx5_hlist_entry hash;
+ uint32_t flow_table;
+};
+
+static inline uint32_t
+tunnel_id_to_flow_tbl(uint32_t id)
+{
+ return id | (1u << 16);
+}
+
+static inline uint32_t
+tunnel_flow_tbl_to_id(uint32_t flow_tbl)
+{
+ return flow_tbl & ~(1u << 16);
+}
+
+union tunnel_tbl_key {
+ uint64_t val;
+ struct {
+ uint32_t tunnel_id;
+ uint32_t group;
+ };
+};
+
+static inline struct mlx5_flow_tunnel_hub *
+mlx5_tunnel_hub(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ return priv->sh->tunnel_hub;
+}
+
+static inline bool
+is_tunnel_offload_active(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ return !!priv->config.dv_miss_info;
+}
+
+static inline bool
+is_flow_tunnel_match_rule(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused const struct rte_flow_attr *attr,
+ __rte_unused const struct rte_flow_item items[],
+ __rte_unused const struct rte_flow_action actions[])
+{
+ return (items[0].type == (typeof(items[0].type))
+ MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL);
+}
+
+static inline bool
+is_flow_tunnel_steer_rule(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused const struct rte_flow_attr *attr,
+ __rte_unused const struct rte_flow_item items[],
+ __rte_unused const struct rte_flow_action actions[])
+{
+ return (actions[0].type == (typeof(actions[0].type))
+ MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET);
+}
+
+static inline const struct mlx5_flow_tunnel *
+flow_actions_to_tunnel(const struct rte_flow_action actions[])
+{
+ return actions[0].conf;
+}
+
+static inline const struct mlx5_flow_tunnel *
+flow_items_to_tunnel(const struct rte_flow_item items[])
+{
+ return items[0].spec;
+}
+
/* Flow structure. */
struct rte_flow {
- TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- enum mlx5_flow_drv_type drv_type; /**< Driver type. */
- struct mlx5_flow_rss rss; /**< RSS context. */
- uint32_t counter; /**< Holds flow counter. */
- struct mlx5_flow_mreg_copy_resource *mreg_copy;
- /**< pointer to metadata register copy table resource. */
- uint16_t meter; /**< Holds flow meter id. */
+ ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
+ struct mlx5_shared_action_rss *shared_rss; /** < Shred RSS action. */
uint32_t dev_handles;
/**< Device flow handles that are part of the flow. */
- struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
- uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
- uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
+ uint32_t drv_type:2; /**< Driver type. */
+ uint32_t fdir:1; /**< Identifier of associated FDIR if any. */
+ uint32_t tunnel:1;
+ uint32_t meter:16; /**< Holds flow meter id. */
+ uint32_t rix_mreg_copy;
+ /**< Index to metadata register copy table resource. */
+ uint32_t counter; /**< Holds flow counter. */
+ uint32_t tunnel_id; /**< Tunnel id */
+} __rte_packed;
+
+/*
+ * Define list of valid combinations of RX Hash fields
+ * (see enum ibv_rx_hash_fields).
+ */
+#define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
+#define MLX5_RSS_HASH_IPV4_TCP \
+ (MLX5_RSS_HASH_IPV4 | \
+ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_SRC_PORT_TCP)
+#define MLX5_RSS_HASH_IPV4_UDP \
+ (MLX5_RSS_HASH_IPV4 | \
+ IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_SRC_PORT_UDP)
+#define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
+#define MLX5_RSS_HASH_IPV6_TCP \
+ (MLX5_RSS_HASH_IPV6 | \
+ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_SRC_PORT_TCP)
+#define MLX5_RSS_HASH_IPV6_UDP \
+ (MLX5_RSS_HASH_IPV6 | \
+ IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_SRC_PORT_UDP)
+#define MLX5_RSS_HASH_NONE 0ULL
+
+/* array of valid combinations of RX Hash fields for RSS */
+static const uint64_t mlx5_rss_hash_fields[] = {
+ MLX5_RSS_HASH_IPV4,
+ MLX5_RSS_HASH_IPV4_TCP,
+ MLX5_RSS_HASH_IPV4_UDP,
+ MLX5_RSS_HASH_IPV6,
+ MLX5_RSS_HASH_IPV6_TCP,
+ MLX5_RSS_HASH_IPV6_UDP,
+ MLX5_RSS_HASH_NONE,
+};
+
+#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
+
+/* Shared RSS action structure */
+struct mlx5_shared_action_rss {
+ struct rte_flow_action_rss origin; /**< Original rte RSS action. */
+ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+ uint16_t *queue; /**< Queue indices to use. */
+ uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
+ /**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
+ uint32_t hrxq_tunnel[MLX5_RSS_HASH_FIELDS_LEN];
+ /**< Hash RX queue indexes for tunneled RSS */
+};
+
+struct rte_flow_shared_action {
+ LIST_ENTRY(rte_flow_shared_action) next;
+ /**< Pointer to the next element. */
+ uint32_t refcnt; /**< Atomically accessed refcnt. */
+ uint64_t type;
+ /**< Shared action type (see MLX5_FLOW_ACTION_SHARED_*). */
+ union {
+ struct mlx5_shared_action_rss rss;
+ /**< Shared RSS action. */
+ };
+};
+
+/* Thread specific flow workspace intermediate data. */
+struct mlx5_flow_workspace {
+ struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
+ struct mlx5_flow_rss_desc rss_desc[2];
+ uint32_t rssq_num[2]; /* Allocated queue num in rss_desc. */
+ int flow_idx; /* Intermediate device flow index. */
+ int flow_nested_idx; /* Intermediate device flow index, nested. */
};
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
bool external,
+ int hairpin,
struct rte_flow_error *error);
typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
uint32_t cnt,
bool clear, uint64_t *pkts,
uint64_t *bytes);
+typedef int (*mlx5_flow_get_aged_flows_t)
+ (struct rte_eth_dev *dev,
+ void **context,
+ uint32_t nb_contexts,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_action_validate_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error);
+typedef struct rte_flow_shared_action *(*mlx5_flow_action_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_action_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_action_update_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ const void *action_conf,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_sync_domain_t)
+ (struct rte_eth_dev *dev,
+ uint32_t domains,
+ uint32_t flags);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
mlx5_flow_prepare_t prepare;
mlx5_flow_counter_alloc_t counter_alloc;
mlx5_flow_counter_free_t counter_free;
mlx5_flow_counter_query_t counter_query;
+ mlx5_flow_get_aged_flows_t get_aged_flows;
+ mlx5_flow_action_validate_t action_validate;
+ mlx5_flow_action_create_t action_create;
+ mlx5_flow_action_destroy_t action_destroy;
+ mlx5_flow_action_update_t action_update;
+ mlx5_flow_sync_domain_t sync_domain;
};
-
-#define MLX5_CNT_CONTAINER(sh, batch, thread) (&(sh)->cmng.ccont \
- [(((sh)->cmng.mhi[batch] >> (thread)) & 0x1) * 2 + (batch)])
-#define MLX5_CNT_CONTAINER_UNUSED(sh, batch, thread) (&(sh)->cmng.ccont \
- [(~((sh)->cmng.mhi[batch] >> (thread)) & 0x1) * 2 + (batch)])
-
/* mlx5_flow.c */
-struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id);
-void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool);
-uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id);
-uint32_t mlx5_flow_id_release(struct mlx5_flow_id_pool *pool,
- uint32_t id);
-int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes,
- bool external, uint32_t group, bool fdb_def_rule,
- uint32_t *table, struct rte_flow_error *error);
-uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel,
- uint64_t layer_types,
+struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
+__extension__
+struct flow_grp_info {
+ uint64_t external:1;
+ uint64_t transfer:1;
+ uint64_t fdb_def_rule:1;
+ /* force standard group translation */
+ uint64_t std_tbl_fix:1;
+};
+
+static inline bool
+tunnel_use_standard_attr_group_translate
+ (struct rte_eth_dev *dev,
+ const struct mlx5_flow_tunnel *tunnel,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[])
+{
+ bool verdict;
+
+ if (!is_tunnel_offload_active(dev))
+ /* no tunnel offload API */
+ verdict = true;
+ else if (tunnel) {
+ /*
+ * OvS will use jump to group 0 in tunnel steer rule.
+ * If tunnel steer rule starts from group 0 (attr.group == 0)
+ * that 0 group must be translated with standard method.
+ * attr.group == 0 in tunnel match rule translated with tunnel
+ * method
+ */
+ verdict = !attr->group &&
+ is_flow_tunnel_steer_rule(dev, attr, items, actions);
+ } else {
+ /*
+ * non-tunnel group translation uses standard method for
+ * root group only: attr.group == 0
+ */
+ verdict = !attr->group;
+ }
+
+ return verdict;
+}
+
+int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group, uint32_t *table,
+ struct flow_grp_info flags,
+ struct rte_flow_error *error);
+uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
+ int tunnel, uint64_t layer_types,
uint64_t hash_fields);
+int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
uint32_t subpriority);
int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
const struct rte_flow_action *mlx5_flow_find_action
(const struct rte_flow_action *actions,
enum rte_flow_action_type action);
+int mlx5_validate_action_rss(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error);
int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
struct rte_flow_error *error);
const struct rte_flow_attr *attr,
uint64_t item_flags,
struct rte_flow_error *error);
+int mlx5_flow_validate_action_default_miss(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
int mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
struct rte_flow_error *error);
const uint8_t *mask,
const uint8_t *nic_mask,
unsigned int size,
+ bool range_accepted,
struct rte_flow_error *error);
int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
- uint64_t item_flags,
+ uint64_t item_flags, bool ext_vlan_sup,
struct rte_flow_error *error);
int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
uint64_t item_flags,
uint64_t last_item,
uint16_t ether_type,
const struct rte_flow_item_ipv4 *acc_mask,
+ bool range_accepted,
struct rte_flow_error *error);
int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
uint64_t item_flags,
uint64_t item_flags,
struct rte_eth_dev *dev,
struct rte_flow_error *error);
+int mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
+ const struct rte_flow_item_ecpri *acc_mask,
+ struct rte_flow_error *error);
struct mlx5_meter_domains_infos *mlx5_flow_create_mtr_tbls
(struct rte_eth_dev *dev,
const struct mlx5_flow_meter *fm);
const struct rte_flow_attr *attr);
int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
struct rte_mtr_error *error);
+int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
+struct rte_flow_shared_action *mlx5_flow_get_shared_rss(struct rte_flow *flow);
+int mlx5_shared_action_flush(struct rte_eth_dev *dev);
+void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
+int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
+
+/* Hash list callbacks for flow tables: */
+struct mlx5_hlist_entry *flow_dv_tbl_create_cb(struct mlx5_hlist *list,
+ uint64_t key, void *entry_ctx);
+void flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry);
+struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
+ uint32_t table_id, uint8_t egress, uint8_t transfer,
+ bool external, const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group_id, uint8_t dummy, struct rte_flow_error *error);
+
+struct mlx5_hlist_entry *flow_dv_tag_create_cb(struct mlx5_hlist *list,
+ uint64_t key, void *cb_ctx);
+void flow_dv_tag_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry);
+
+int flow_dv_modify_match_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry,
+ uint64_t key, void *cb_ctx);
+struct mlx5_hlist_entry *flow_dv_modify_create_cb(struct mlx5_hlist *list,
+ uint64_t key, void *ctx);
+void flow_dv_modify_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry);
+
+struct mlx5_hlist_entry *flow_dv_mreg_create_cb(struct mlx5_hlist *list,
+ uint64_t key, void *ctx);
+void flow_dv_mreg_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry);
+
+int flow_dv_encap_decap_match_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry,
+ uint64_t key, void *cb_ctx);
+struct mlx5_hlist_entry *flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
+ uint64_t key, void *cb_ctx);
+void flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry);
+
+int flow_dv_matcher_match_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *ctx);
+struct mlx5_cache_entry *flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *ctx);
+void flow_dv_matcher_remove_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry);
+
+int flow_dv_port_id_match_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+struct mlx5_cache_entry *flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+void flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry);
+
+int flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+struct mlx5_cache_entry *flow_dv_push_vlan_create_cb
+ (struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+void flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry);
+
+int flow_dv_sample_match_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+struct mlx5_cache_entry *flow_dv_sample_create_cb
+ (struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+void flow_dv_sample_remove_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry);
+
+int flow_dv_dest_array_match_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+struct mlx5_cache_entry *flow_dv_dest_array_create_cb
+ (struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *cb_ctx);
+void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry);
#endif /* RTE_PMD_MLX5_FLOW_H_ */