MLX5_INDIRECT_ACTION_TYPE_RSS,
MLX5_INDIRECT_ACTION_TYPE_AGE,
MLX5_INDIRECT_ACTION_TYPE_COUNT,
+ MLX5_INDIRECT_ACTION_TYPE_CT,
};
+/* Now, the maximal ports will be supported is 256, action number is 4M. */
+#define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x100
+
+#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 22
+#define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1)
+
+/* 30-31: type, 22-29: owner port, 0-21: index. */
+#define MLX5_INDIRECT_ACT_CT_GEN_IDX(owner, index) \
+ ((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | \
+ (((owner) & MLX5_INDIRECT_ACT_CT_OWNER_MASK) << \
+ MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) | (index))
+
+#define MLX5_INDIRECT_ACT_CT_GET_OWNER(index) \
+ (((index) >> MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) & \
+ MLX5_INDIRECT_ACT_CT_OWNER_MASK)
+
+#define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \
+ ((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))
+
/* Matches on selected register. */
struct mlx5_rte_flow_item_tag {
enum modify_reg id;
/* INTEGRITY item bit */
#define MLX5_FLOW_ITEM_INTEGRITY (UINT64_C(1) << 34)
+/* Conntrack item. */
+#define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 35)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
#define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
#define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
#define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
+#define MLX5_FLOW_ACTION_CT (1ull << 41)
#define MLX5_FLOW_FATE_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
/* Maximum number of fields to modify in MODIFY_FIELD */
#define MLX5_ACT_MAX_MOD_FIELDS 5
+/* Syndrome bits definition for connection tracking. */
+#define MLX5_CT_SYNDROME_VALID (0x0 << 6)
+#define MLX5_CT_SYNDROME_INVALID (0x1 << 6)
+#define MLX5_CT_SYNDROME_TRAP (0x2 << 6)
+#define MLX5_CT_SYNDROME_STATE_CHANGE (0x1 << 1)
+#define MLX5_CT_SYNDROME_BAD_PACKET (0x1 << 0)
+
enum mlx5_flow_drv_type {
MLX5_FLOW_TYPE_MIN,
MLX5_FLOW_TYPE_DV,
/** Maximal number of device sub-flows supported. */
#define MLX5_NUM_MAX_DEV_FLOWS 32
+/**
+ * tunnel offload rules type
+ */
+enum mlx5_tof_rule_type {
+ MLX5_TUNNEL_OFFLOAD_NONE = 0,
+ MLX5_TUNNEL_OFFLOAD_SET_RULE,
+ MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
+ MLX5_TUNNEL_OFFLOAD_MISS_RULE,
+};
+
/** Device flow structure. */
__extension__
struct mlx5_flow {
struct mlx5_flow_handle *handle;
uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
const struct mlx5_flow_tunnel *tunnel;
+ enum mlx5_tof_rule_type tof_type;
};
/* Flow meter state. */
#define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u
#define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u
+#define MLX5_CT_POLL_WQE_CQE_TIMES MLX5_MTR_POLL_WQE_CQE_TIMES
+
#define MLX5_MAN_WIDTH 8
/* Legacy Meter parameter structure. */
struct mlx5_legacy_flow_meter {
}
static inline bool
-is_tunnel_offload_active(struct rte_eth_dev *dev)
+is_tunnel_offload_active(const struct rte_eth_dev *dev)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_priv *priv = dev->data->dev_private;
return !!priv->config.dv_miss_info;
#else
RTE_SET_USED(dev);
}
static inline bool
-is_flow_tunnel_match_rule(__rte_unused struct rte_eth_dev *dev,
- __rte_unused const struct rte_flow_attr *attr,
- __rte_unused const struct rte_flow_item items[],
- __rte_unused const struct rte_flow_action actions[])
+is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
{
- return (items[0].type == (typeof(items[0].type))
- MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL);
+ return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
}
static inline bool
-is_flow_tunnel_steer_rule(__rte_unused struct rte_eth_dev *dev,
- __rte_unused const struct rte_flow_attr *attr,
- __rte_unused const struct rte_flow_item items[],
- __rte_unused const struct rte_flow_action actions[])
+is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
{
- return (actions[0].type == (typeof(actions[0].type))
- MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET);
+ return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
}
static inline const struct mlx5_flow_tunnel *
uint32_t drv_type:2; /**< Driver type. */
uint32_t tunnel:1;
uint32_t meter:24; /**< Holds flow meter id. */
+ uint32_t indirect_type:2; /**< Indirect action type. */
uint32_t rix_mreg_copy;
/**< Index to metadata register copy table resource. */
uint32_t counter; /**< Holds flow counter. */
uint32_t tunnel_id; /**< Tunnel id */
- uint32_t age; /**< Holds ASO age bit index. */
+ union {
+ uint32_t age; /**< Holds ASO age bit index. */
+ uint32_t ct; /**< Holds ASO CT index. */
+ };
uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
} __rte_packed;
(struct rte_eth_dev *dev,
struct mlx5_flow_meter_policy *mtr_policy,
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
+typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
typedef uint32_t (*mlx5_flow_mtr_alloc_t)
(struct rte_eth_dev *dev);
typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
mlx5_flow_create_def_policy_t create_def_policy;
mlx5_flow_destroy_def_policy_t destroy_def_policy;
mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
+ mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
mlx5_flow_counter_alloc_t counter_alloc;
mlx5_flow_counter_free_t counter_free;
mlx5_flow_counter_query_t counter_query;
static inline bool
tunnel_use_standard_attr_group_translate
- (struct rte_eth_dev *dev,
- const struct mlx5_flow_tunnel *tunnel,
+ (const struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[])
+ const struct mlx5_flow_tunnel *tunnel,
+ enum mlx5_tof_rule_type tof_rule_type)
{
bool verdict;
* method
*/
verdict = !attr->group &&
- is_flow_tunnel_steer_rule(dev, attr, items, actions);
+ is_flow_tunnel_steer_rule(tof_rule_type);
} else {
/*
* non-tunnel group translation uses standard method for
return (test.value == 0);
}
+/*
+ * Get ASO CT action by device and index.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * Index to the ASO CT action.
+ *
+ * @return
+ * The specified ASO CT action pointer.
+ */
+static inline struct mlx5_aso_ct_action *
+flow_aso_ct_get_by_dev_idx(struct rte_eth_dev *dev, uint32_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+ struct mlx5_aso_ct_pool *pool;
+
+ idx--;
+ MLX5_ASSERT((idx / MLX5_ASO_CT_ACTIONS_PER_POOL) < mng->n);
+ /* Bit operation AND could be used. */
+ rte_rwlock_read_lock(&mng->resize_rwl);
+ pool = mng->pools[idx / MLX5_ASO_CT_ACTIONS_PER_POOL];
+ rte_rwlock_read_unlock(&mng->resize_rwl);
+ return &pool->actions[idx % MLX5_ASO_CT_ACTIONS_PER_POOL];
+}
+
+/*
+ * Get ASO CT action by owner & index.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * Index to the ASO CT action and owner port combination.
+ *
+ * @return
+ * The specified ASO CT action pointer.
+ */
+static inline struct mlx5_aso_ct_action *
+flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_action *ct;
+ uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
+ uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
+
+ if (owner == PORT_ID(priv)) {
+ ct = flow_aso_ct_get_by_dev_idx(dev, idx);
+ } else {
+ struct rte_eth_dev *owndev = &rte_eth_devices[owner];
+
+ MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
+ if (dev->data->dev_started != 1)
+ return NULL;
+ ct = flow_aso_ct_get_by_dev_idx(owndev, idx);
+ if (ct->peer != PORT_ID(priv))
+ return NULL;
+ }
+ return ct;
+}
+
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
(struct rte_eth_dev *dev,
struct mlx5_flow_meter_policy *mtr_policy,
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
+void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
int mlx5_action_handle_flush(struct rte_eth_dev *dev);
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev);
void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
struct mlx5_flow_handle *dev_handle);
+const struct mlx5_flow_tunnel *
+mlx5_get_tof(const struct rte_flow_item *items,
+ const struct rte_flow_action *actions,
+ enum mlx5_tof_rule_type *rule_type);
+
+
#endif /* RTE_PMD_MLX5_FLOW_H_ */