MLX5_MTR_ID,
MLX5_ASO_FLOW_HIT,
MLX5_ASO_CONNTRACK,
+ MLX5_SAMPLE_ID,
};
/* Default queue number. */
#define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
#define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
-/* INTEGRITY item bit */
-#define MLX5_FLOW_ITEM_INTEGRITY (UINT64_C(1) << 34)
+/* INTEGRITY item bits */
+#define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34)
+#define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35)
+#define MLX5_FLOW_ITEM_INTEGRITY \
+ (MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY)
/* Conntrack item. */
-#define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 35)
+#define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36)
+
+/* Flex item */
+#define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37)
+#define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
- MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP)
+ MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+ MLX5_FLOW_ITEM_FLEX_TUNNEL)
/* Inner Masks. */
#define MLX5_FLOW_LAYER_INNER_L3 \
/* Valid layer type for IPV4 RSS. */
#define MLX5_IPV4_LAYER_TYPES \
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_OTHER)
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
/* IBV hash source bits for IPV4. */
#define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
/* Valid layer type for IPV6 RSS. */
#define MLX5_IPV6_LAYER_TYPES \
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
/* IBV hash source bits for IPV6. */
#define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
MLX5_FLOW_TYPE_MIN,
MLX5_FLOW_TYPE_DV,
MLX5_FLOW_TYPE_VERBS,
+ MLX5_FLOW_TYPE_HW,
MLX5_FLOW_TYPE_MAX,
};
const struct mlx5_flow_tunnel *tunnel;
uint32_t group_id;
uint32_t external:1;
- uint32_t tunnel_offload:1; /* Tunnel offlod table or not. */
+ uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
uint32_t is_egress:1; /**< Egress table. */
uint32_t is_transfer:1; /**< Transfer table. */
uint32_t dummy:1; /**< DR table. */
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
void *drv_flow; /**< pointer to driver flow object. */
uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
- uint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */
- uint32_t mark:1; /**< Metadate rxq mark flag. */
+ uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
uint32_t fate_action:3; /**< Fate action type. */
+ uint32_t flex_item; /**< referenced Flex Item bitmask. */
union {
uint32_t rix_hrxq; /**< Hash Rx queue object index. */
uint32_t rix_jump; /**< Index to the jump action resource. */
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
const struct mlx5_priv *priv = dev->data->dev_private;
- return !!priv->config.dv_miss_info;
+ return !!priv->sh->config.dv_miss_info;
#else
RTE_SET_USED(dev);
return false;
uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
} __rte_packed;
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+
+/* Flow item template struct. */
+struct rte_flow_pattern_template {
+ LIST_ENTRY(rte_flow_pattern_template) next;
+ /* Template attributes. */
+ struct rte_flow_pattern_template_attr attr;
+ struct mlx5dr_match_template *mt; /* mlx5 match template. */
+ uint32_t refcnt; /* Reference counter. */
+};
+
+#endif
+
/*
* Define list of valid combinations of RX Hash fields
* (see enum ibv_rx_hash_fields).
/* The final policy when meter policy is hierarchy. */
uint32_t skip_matcher_reg:1;
/* Indicates if need to skip matcher register in translate. */
+ uint32_t mark:1; /* Indicates if flow contains mark action. */
};
struct mlx5_flow_split_info {
- bool external;
+ uint32_t external:1;
/**< True if flow is created by request external to PMD. */
- uint8_t skip_scale; /**< Skip the scale the table with factor. */
+ uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */
+ uint32_t skip_scale:8; /**< Skip the scale the table with factor. */
uint32_t flow_idx; /**< This memory pool index to the flow. */
- uint32_t prefix_mark; /**< Prefix subflow mark flag. */
- uint64_t prefix_layers; /**< Prefix subflow layers. */
uint32_t table_id; /**< Flow table identifier. */
+ uint64_t prefix_layers; /**< Prefix subflow layers. */
};
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
uint32_t cnt,
bool clear, uint64_t *pkts,
- uint64_t *bytes);
+ uint64_t *bytes, void **action);
typedef int (*mlx5_flow_get_aged_flows_t)
(struct rte_eth_dev *dev,
void **context,
(struct rte_eth_dev *dev);
typedef void (*mlx5_flow_destroy_def_policy_t)
(struct rte_eth_dev *dev);
+typedef int (*mlx5_flow_discover_priorities_t)
+ (struct rte_eth_dev *dev,
+ const uint16_t *vprio, int vprio_n);
+typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_release_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_update_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_info_get_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_port_configure_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *err);
+typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_pattern_template_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_pattern_template *template,
+ struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
mlx5_flow_action_update_t action_update;
mlx5_flow_action_query_t action_query;
mlx5_flow_sync_domain_t sync_domain;
+ mlx5_flow_discover_priorities_t discover_priorities;
+ mlx5_flow_item_create_t item_create;
+ mlx5_flow_item_release_t item_release;
+ mlx5_flow_item_update_t item_update;
+ mlx5_flow_info_get_t info_get;
+ mlx5_flow_port_configure_t configure;
+ mlx5_flow_pattern_template_create_t pattern_template_create;
+ mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
};
/* mlx5_flow.c */
/* Decrease to original index. */
idx--;
MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
+ rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
+ rte_rwlock_read_unlock(&pools_mng->resize_mtrwl);
return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
}
return ct;
}
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ return RTE_ETHER_TYPE_TEB;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ return RTE_ETHER_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ return RTE_ETHER_TYPE_IPV6;
+ else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+ return RTE_ETHER_TYPE_MPLS;
+ return 0;
+}
+
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr);
uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- uint32_t subpriority);
+ const struct rte_flow_attr *attr,
+ uint32_t subpriority, bool external);
int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
enum mlx5_feature_name feature,
uint32_t id,
struct mlx5_flow_meter_policy *mtr_policy);
int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev);
+int mlx5_action_handle_attach(struct rte_eth_dev *dev);
+int mlx5_action_handle_detach(struct rte_eth_dev *dev);
int mlx5_action_handle_flush(struct rte_eth_dev *dev);
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
mlx5_get_tof(const struct rte_flow_item *items,
const struct rte_flow_action *actions,
enum mlx5_tof_rule_type *rule_type);
-
-
+void
+flow_hw_resource_release(struct rte_eth_dev *dev);
#endif /* RTE_PMD_MLX5_FLOW_H_ */