X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.h;h=7fec79afb339e0904aba2b7c475ff40460be6685;hb=8e83ba285abe4341b7666927d3fc265b35446c06;hp=8fbc37feb7e519eedc8bc2630fc0f35560251192;hpb=ec4e11d41d129ebc7c395b567827492e56fb08b7;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 8fbc37feb7..7fec79afb3 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -109,6 +109,7 @@ enum mlx5_feature_name { MLX5_MTR_ID, MLX5_ASO_FLOW_HIT, MLX5_ASO_CONNTRACK, + MLX5_SAMPLE_ID, }; /* Default queue number. */ @@ -179,6 +180,11 @@ enum mlx5_feature_name { /* Conntrack item. */ #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36) +/* Flex item */ +#define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37) +#define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38) +#define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39) + /* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) @@ -193,7 +199,8 @@ enum mlx5_feature_name { (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \ MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \ - MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP) + MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \ + MLX5_FLOW_ITEM_FLEX_TUNNEL) /* Inner Masks. */ #define MLX5_FLOW_LAYER_INNER_L3 \ @@ -591,7 +598,7 @@ struct mlx5_flow_tbl_data_entry { const struct mlx5_flow_tunnel *tunnel; uint32_t group_id; uint32_t external:1; - uint32_t tunnel_offload:1; /* Tunnel offlod table or not. */ + uint32_t tunnel_offload:1; /* Tunnel offload table or not. */ uint32_t is_egress:1; /**< Egress table. */ uint32_t is_transfer:1; /**< Transfer table. */ uint32_t dummy:1; /**< DR table. */ @@ -689,9 +696,9 @@ struct mlx5_flow_handle { /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ void *drv_flow; /**< pointer to driver flow object. */ uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */ - uint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */ - uint32_t mark:1; /**< Metadate rxq mark flag. */ + uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */ uint32_t fate_action:3; /**< Fate action type. */ + uint32_t flex_item; /**< referenced Flex Item bitmask. */ union { uint32_t rix_hrxq; /**< Hash Rx queue object index. */ uint32_t rix_jump; /**< Index to the jump action resource. */ @@ -1100,16 +1107,17 @@ struct mlx5_flow_workspace { /* The final policy when meter policy is hierarchy. */ uint32_t skip_matcher_reg:1; /* Indicates if need to skip matcher register in translate. */ + uint32_t mark:1; /* Indicates if flow contains mark action. */ }; struct mlx5_flow_split_info { - bool external; + uint32_t external:1; /**< True if flow is created by request external to PMD. */ - uint8_t skip_scale; /**< Skip the scale the table with factor. */ + uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */ + uint32_t skip_scale:8; /**< Skip the scale the table with factor. */ uint32_t flow_idx; /**< This memory pool index to the flow. */ - uint32_t prefix_mark; /**< Prefix subflow mark flag. */ - uint64_t prefix_layers; /**< Prefix subflow layers. */ uint32_t table_id; /**< Flow table identifier. */ + uint64_t prefix_layers; /**< Prefix subflow layers. */ }; typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, @@ -1235,6 +1243,19 @@ typedef void (*mlx5_flow_destroy_def_policy_t) typedef int (*mlx5_flow_discover_priorities_t) (struct rte_eth_dev *dev, const uint16_t *vprio, int vprio_n); +typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t) + (struct rte_eth_dev *dev, + const struct rte_flow_item_flex_conf *conf, + struct rte_flow_error *error); +typedef int (*mlx5_flow_item_release_t) + (struct rte_eth_dev *dev, + const struct rte_flow_item_flex_handle *handle, + struct rte_flow_error *error); +typedef int (*mlx5_flow_item_update_t) + (struct rte_eth_dev *dev, + const struct rte_flow_item_flex_handle *handle, + const struct rte_flow_item_flex_conf *conf, + struct rte_flow_error *error); struct mlx5_flow_driver_ops { mlx5_flow_validate_t validate; @@ -1270,6 +1291,9 @@ struct mlx5_flow_driver_ops { mlx5_flow_action_query_t action_query; mlx5_flow_sync_domain_t sync_domain; mlx5_flow_discover_priorities_t discover_priorities; + mlx5_flow_item_create_t item_create; + mlx5_flow_item_release_t item_release; + mlx5_flow_item_update_t item_update; }; /* mlx5_flow.c */ @@ -1426,6 +1450,20 @@ flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx) return ct; } +static inline uint16_t +mlx5_translate_tunnel_etypes(uint64_t pattern_flags) +{ + if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2) + return RTE_ETHER_TYPE_TEB; + else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) + return RTE_ETHER_TYPE_IPV4; + else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6) + return RTE_ETHER_TYPE_IPV6; + else if (pattern_flags & MLX5_FLOW_LAYER_MPLS) + return RTE_ETHER_TYPE_MPLS; + return 0; +} + int mlx5_flow_group_to_table(struct rte_eth_dev *dev, const struct mlx5_flow_tunnel *tunnel, uint32_t group, uint32_t *table, @@ -1728,6 +1766,4 @@ const struct mlx5_flow_tunnel * mlx5_get_tof(const struct rte_flow_item *items, const struct rte_flow_action *actions, enum mlx5_tof_rule_type *rule_type); - - #endif /* RTE_PMD_MLX5_FLOW_H_ */