MLX5_COPY_MARK,
MLX5_MTR_COLOR,
MLX5_MTR_SFX,
+ MLX5_ASO_FLOW_HIT,
};
/* Default queue number. */
#define MLX5_GENEVE_OPT_LEN_0 14
#define MLX5_GENEVE_OPT_LEN_1 63
-#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
- sizeof(struct rte_flow_item_ipv4))
+#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \
+ sizeof(struct rte_ipv4_hdr))
/* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */
#define MLX5_IPV4_FRAG_OFFSET_MASK \
#define MLX5_ENCAP_MAX_LEN 132
-/* Encap/decap resource key of the hash organization. */
-union mlx5_flow_encap_decap_key {
- struct {
- uint32_t ft_type:8; /**< Flow table type, Rx or Tx. */
- uint32_t refmt_type:8; /**< Header reformat type. */
- uint32_t buf_size:8; /**< Encap buf size. */
- uint32_t table_level:8; /**< Root table or not. */
- uint32_t cksum; /**< Encap buf check sum. */
- };
- uint64_t v64; /**< full 64bits value of key */
-};
-
/* Encap/decap resource structure. */
struct mlx5_flow_dv_encap_decap_resource {
struct mlx5_hlist_entry entry;
/**< Tag action object. */
uint32_t refcnt; /**< Reference counter. */
uint32_t idx; /**< Index for the index memory pool. */
+ uint32_t tag_id; /**< Tag ID. */
};
/*
/* List entry for device flows. */
uint32_t idx;
uint32_t rix_flow; /* Built flow for copy. */
+ uint32_t mark_id;
};
/* Table tunnel parameter. */
/**< tunnel offload */
const struct mlx5_flow_tunnel *tunnel;
uint32_t group_id;
- bool external;
- bool tunnel_offload; /* Tunnel offlod table or not. */
- bool is_egress; /**< Egress table. */
+ uint32_t external:1;
+ uint32_t tunnel_offload:1; /* Tunnel offlod table or not. */
+ uint32_t is_egress:1; /**< Egress table. */
+ uint32_t is_transfer:1; /**< Transfer table. */
+ uint32_t dummy:1; /**< DR table. */
+ uint32_t reserve:27; /**< Reserved to future using. */
+ uint32_t table_id; /**< Table ID. */
};
/* Sub rdma-core actions list. */
void *verbs_action; /**< Verbs sample action object. */
void **sub_actions; /**< Sample sub-action array. */
};
+ struct rte_eth_dev *dev; /**< Device registers the action. */
uint32_t idx; /** Sample object index. */
uint8_t ft_type; /** Flow Table Type */
uint32_t ft_id; /** Flow Table Level */
uint32_t idx; /** Destination array action object index. */
uint8_t ft_type; /** Flow Table Type */
uint8_t num_of_dest; /**< Number of destination actions. */
+ struct rte_eth_dev *dev; /**< Device registers the action. */
void *action; /**< Pointer to the rdma core action. */
struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM];
/**< Action index resources. */
/**< Action resources. */
};
-/* Verbs specification header. */
-struct ibv_spec_header {
- enum ibv_flow_spec_type type;
- uint16_t size;
-};
-
/* PMD flow priority for tunnel */
#define MLX5_TUNNEL_PRIO_GET(rss_desc) \
((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4)
uint32_t rix_srss;
/**< Indicates shared RSS fate action. */
};
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_flow_handle_dv dvh;
#endif
} __rte_packed;
* structure in Verbs. No DV flows attributes will be accessed.
* Macro offsetof() could also be used here.
*/
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
#define MLX5_FLOW_HANDLE_VERBS_SIZE \
(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
#else
/**< Pointer to the destination array resource. */
};
+#ifdef HAVE_INFINIBAND_VERBS_H
/*
* Maximal Verbs flow specifications & actions size.
* Some elements are mutually exclusive, but enough space should be allocated.
uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
/**< Specifications & actions buffer of verbs flow. */
};
+#endif /* HAVE_INFINIBAND_VERBS_H */
/** Maximal number of device sub-flows supported. */
#define MLX5_NUM_MAX_DEV_FLOWS 32
uint8_t skip_scale:1;
/**< 1 if skip the scale the table with factor. */
union {
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_flow_dv_workspace dv;
#endif
+#ifdef HAVE_INFINIBAND_VERBS_H
struct mlx5_flow_verbs_workspace verbs;
+#endif
};
struct mlx5_flow_handle *handle;
uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
/** PMD tunnel related context */
struct mlx5_flow_tunnel_hub {
+ /* Tunnels list
+ * Access to the list MUST be MT protected
+ */
LIST_HEAD(, mlx5_flow_tunnel) tunnels;
- rte_spinlock_t sl; /* Tunnel list spinlock. */
+ /* protect access to the tunnels list */
+ rte_spinlock_t sl;
struct mlx5_hlist *groups; /** non tunnel groups */
};
struct tunnel_tbl_entry {
struct mlx5_hlist_entry hash;
uint32_t flow_table;
+ uint32_t tunnel_id;
+ uint32_t group;
};
static inline uint32_t
uint32_t refcnt; /**< Atomically accessed refcnt. */
struct rte_flow_action_rss origin; /**< Original rte RSS action. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
- uint16_t *queue; /**< Queue indices to use. */
+ struct mlx5_ind_table_obj *ind_tbl;
+ /**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */
uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
/**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
uint32_t hrxq_tunnel[MLX5_RSS_HASH_FIELDS_LEN];
/**< Hash RX queue indexes for tunneled RSS */
+ rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */
};
struct rte_flow_shared_action {
struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
struct mlx5_flow_rss_desc rss_desc;
uint32_t rssq_num; /* Allocated queue num in rss_desc. */
- int flow_idx; /* Intermediate device flow index. */
+ uint32_t flow_idx; /* Intermediate device flow index. */
};
struct mlx5_flow_split_info {
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
- struct flow_grp_info flags,
- struct rte_flow_error *error);
+ const struct flow_grp_info *flags,
+ struct rte_flow_error *error);
uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
int tunnel, uint64_t layer_types,
uint64_t hash_fields);
/* Hash list callbacks for flow tables: */
struct mlx5_hlist_entry *flow_dv_tbl_create_cb(struct mlx5_hlist *list,
uint64_t key, void *entry_ctx);
+int flow_dv_tbl_match_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry, uint64_t key,
+ void *cb_ctx);
void flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
struct mlx5_hlist_entry *entry);
struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
struct mlx5_hlist_entry *flow_dv_tag_create_cb(struct mlx5_hlist *list,
uint64_t key, void *cb_ctx);
+int flow_dv_tag_match_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry, uint64_t key,
+ void *cb_ctx);
void flow_dv_tag_remove_cb(struct mlx5_hlist *list,
struct mlx5_hlist_entry *entry);
struct mlx5_hlist_entry *flow_dv_mreg_create_cb(struct mlx5_hlist *list,
uint64_t key, void *ctx);
+int flow_dv_mreg_match_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry, uint64_t key,
+ void *cb_ctx);
void flow_dv_mreg_remove_cb(struct mlx5_hlist *list,
struct mlx5_hlist_entry *entry);