X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=6e34c7aa1d4b85e70f247d588d1e8014851287c9;hb=46a5e6bc6a85d7e5103e59509096964cbf637d83;hp=ee129b97b447dcaa56b840ab7cfc6581c2f51c86;hpb=8dd569abad0802f2e5adb0a4ac2483a07555ac89;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index ee129b97b4..6e34c7aa1d 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include @@ -30,9 +29,10 @@ #include "mlx5.h" #include "mlx5_defs.h" -#include "mlx5_prm.h" -#include "mlx5_glue.h" #include "mlx5_flow.h" +#include "mlx5_glue.h" +#include "mlx5_prm.h" +#include "mlx5_rxtx.h" /* Dev ops structure defined in mlx5.c */ extern const struct eth_dev_ops mlx5_dev_ops; @@ -42,7 +42,6 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate; #ifdef HAVE_IBV_FLOW_DV_SUPPORT extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; #endif -extern const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops; extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; @@ -52,7 +51,6 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = { #ifdef HAVE_IBV_FLOW_DV_SUPPORT [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, #endif - [MLX5_FLOW_TYPE_TCF] = &mlx5_flow_tcf_drv_ops, [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops }; @@ -129,7 +127,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { .next = RTE_FLOW_EXPAND_RSS_NEXT (MLX5_EXPANSION_OUTER_IPV4_UDP, MLX5_EXPANSION_OUTER_IPV4_TCP, - MLX5_EXPANSION_GRE), + MLX5_EXPANSION_GRE, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_IPV4, .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER, @@ -147,7 +147,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { [MLX5_EXPANSION_OUTER_IPV6] = { .next = RTE_FLOW_EXPAND_RSS_NEXT (MLX5_EXPANSION_OUTER_IPV6_UDP, - MLX5_EXPANSION_OUTER_IPV6_TCP), + MLX5_EXPANSION_OUTER_IPV6_TCP, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_IPV6, .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER, @@ -284,6 +286,10 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { .tunnel = MLX5_FLOW_LAYER_VXLAN, .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, }, + { + .tunnel = MLX5_FLOW_LAYER_GENEVE, + .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, + }, { .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, @@ -300,8 +306,156 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { .tunnel = MLX5_FLOW_LAYER_MPLS, .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, }, + { + .tunnel = MLX5_FLOW_LAYER_NVGRE, + .ptype = RTE_PTYPE_TUNNEL_NVGRE, + }, + { + .tunnel = MLX5_FLOW_LAYER_IPIP, + .ptype = RTE_PTYPE_TUNNEL_IP, + }, + { + .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, + .ptype = RTE_PTYPE_TUNNEL_IP, + }, }; +/** + * Translate tag ID to register. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] feature + * The feature that request the register. + * @param[in] id + * The request register ID. + * @param[out] error + * Error description in case of any. + * + * @return + * The request register on success, a negative errno + * value otherwise and rte_errno is set. + */ +enum modify_reg +mlx5_flow_get_reg_id(struct rte_eth_dev *dev, + enum mlx5_feature_name feature, + uint32_t id, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + enum modify_reg start_reg; + + switch (feature) { + case MLX5_HAIRPIN_RX: + return REG_B; + case MLX5_HAIRPIN_TX: + return REG_A; + case MLX5_METADATA_RX: + switch (config->dv_xmeta_en) { + case MLX5_XMETA_MODE_LEGACY: + return REG_B; + case MLX5_XMETA_MODE_META16: + return REG_C_0; + case MLX5_XMETA_MODE_META32: + return REG_C_1; + } + break; + case MLX5_METADATA_TX: + return REG_A; + case MLX5_METADATA_FDB: + return REG_C_0; + case MLX5_FLOW_MARK: + switch (config->dv_xmeta_en) { + case MLX5_XMETA_MODE_LEGACY: + return REG_NONE; + case MLX5_XMETA_MODE_META16: + return REG_C_1; + case MLX5_XMETA_MODE_META32: + return REG_C_0; + } + break; + case MLX5_COPY_MARK: + case MLX5_MTR_SFX: + /* + * Metadata COPY_MARK register using is in meter suffix sub + * flow while with meter. It's safe to share the same register. + */ + return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; + case MLX5_MTR_COLOR: + RTE_ASSERT(priv->mtr_color_reg != REG_NONE); + return priv->mtr_color_reg; + case MLX5_APP_TAG: + /* + * If meter is enable, it will engage two registers for color + * match and flow match. If meter color match is not using the + * REG_C_2, need to skip the REG_C_x be used by meter color + * match. + * If meter is disable, free to use all available registers. + */ + if (priv->mtr_color_reg != REG_NONE) + start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_3 : + REG_C_4; + else + start_reg = REG_C_2; + if (id > (REG_C_7 - start_reg)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "invalid tag id"); + if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "unsupported tag id"); + /* + * This case means meter is using the REG_C_x great than 2. + * Take care not to conflict with meter color REG_C_x. + * If the available index REG_C_y >= REG_C_x, skip the + * color register. + */ + if (start_reg == REG_C_3 && config->flow_mreg_c + [id + REG_C_3 - REG_C_0] >= priv->mtr_color_reg) { + if (config->flow_mreg_c[id + 1 + REG_C_3 - REG_C_0] != + REG_NONE) + return config->flow_mreg_c + [id + 1 + REG_C_3 - REG_C_0]; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "unsupported tag id"); + } + return config->flow_mreg_c[id + start_reg - REG_C_0]; + } + assert(false); + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "invalid feature name"); +} + +/** + * Check extensive flow metadata register support. + * + * @param dev + * Pointer to rte_eth_dev structure. + * + * @return + * True if device supports extensive flow metadata register, otherwise false. + */ +bool +mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + + /* + * Having available reg_c can be regarded inclusively as supporting + * extensive flow metadata register, which could mean, + * - metadata register copy action by modify header. + * - 16 modify header actions is supported. + * - reg_c's are preserved across different domain (FDB and NIC) on + * packet loopback by flow lookup miss. + */ + return config->flow_mreg_c[2] != REG_NONE; +} + /** * Discover the maximum number of priority available. * @@ -315,6 +469,7 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { int mlx5_flow_discover_priorities(struct rte_eth_dev *dev) { + struct mlx5_priv *priv = dev->data->dev_private; struct { struct ibv_flow_attr attr; struct ibv_flow_spec_eth eth; @@ -322,6 +477,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) } flow_attr = { .attr = { .num_of_specs = 2, + .port = (uint8_t)priv->ibv_port, }, .eth = { .type = IBV_FLOW_SPEC_ETH, @@ -350,6 +506,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) claim_zero(mlx5_glue->destroy_flow(flow)); priority = vprio[i]; } + mlx5_hrxq_drop_release(dev); switch (priority) { case 8: priority = RTE_DIM(priority_map_3); @@ -361,10 +518,9 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) rte_errno = ENOTSUP; DRV_LOG(ERR, "port %u verbs maximum priority: %d expected 8/16", - dev->data->port_id, vprio[i]); + dev->data->port_id, priority); return -rte_errno; } - mlx5_hrxq_drop_release(dev); DRV_LOG(INFO, "port %u flow maximum priority: %d", dev->data->port_id, priority); return priority; @@ -387,7 +543,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, uint32_t subpriority) { uint32_t res = 0; - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; switch (priv->config.flow_prio) { case RTE_DIM(priority_map_3): @@ -473,7 +629,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, * Item hash fields. * * @return - * The hash fileds that should be used. + * The hash fields that should be used. */ uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, @@ -536,20 +692,30 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) static void flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = dev_flow->flow; - const int mark = !!(flow->actions & + const int mark = !!(dev_flow->actions & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->queue)[i]; + int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - if (mark) { + /* + * To support metadata register copy on Tx loopback, + * this must be always enabled (metadata may arive + * from other port - not from local flows only. + */ + if (priv->config.dv_flow_en && + priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + mlx5_flow_ext_mreg_supported(dev)) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n = 1; + } else if (mark) { rxq_ctrl->rxq.mark = 1; rxq_ctrl->flow_mark_n++; } @@ -599,21 +765,26 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) static void flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = dev_flow->flow; - const int mark = !!(flow->actions & + const int mark = !!(dev_flow->actions & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; assert(dev->data->dev_started); for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->queue)[i]; + int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - if (mark) { + if (priv->config.dv_flow_en && + priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + mlx5_flow_ext_mreg_supported(dev)) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n = 1; + } else if (mark) { rxq_ctrl->flow_mark_n--; rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; } @@ -661,7 +832,7 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) static void flow_rxq_flags_clear(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { @@ -680,6 +851,29 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev) } } +/* + * return a pointer to the desired action in the list of actions. + * + * @param[in] actions + * The list of actions to search the action in. + * @param[in] action + * The action to find. + * + * @return + * Pointer to the action in the list, if found. NULL otherwise. + */ +const struct rte_flow_action * +mlx5_flow_find_action(const struct rte_flow_action *actions, + enum rte_flow_action_type action) +{ + if (actions == NULL) + return NULL; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) + if (actions->type == action) + return actions; + return NULL; +} + /* * Validate the flag action. * @@ -786,7 +980,7 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action, * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_drop(uint64_t action_flags, @@ -801,7 +995,8 @@ mlx5_flow_validate_action_drop(uint64_t action_flags, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't drop and mark in same flow"); - if (action_flags & MLX5_FLOW_FATE_ACTIONS) + if (action_flags & (MLX5_FLOW_FATE_ACTIONS | + MLX5_FLOW_FATE_ESWITCH_ACTIONS)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have 2 fate actions in" @@ -829,7 +1024,7 @@ mlx5_flow_validate_action_drop(uint64_t action_flags, * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_queue(const struct rte_flow_action *action, @@ -838,7 +1033,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, const struct rte_flow_attr *attr, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_queue *queue = action->conf; if (action_flags & MLX5_FLOW_FATE_ACTIONS) @@ -846,6 +1041,10 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have 2 fate actions in" " same flow"); + if (!priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No Rx queues configured"); if (queue->index >= priv->rxqs_n) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, @@ -875,21 +1074,25 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, * Pointer to the Ethernet device structure. * @param[in] attr * Attributes of flow that includes this action. + * @param[in] item_flags + * Items that were detected. * @param[out] error * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_rss(const struct rte_flow_action *action, uint64_t action_flags, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + uint64_t item_flags, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = action->conf; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; if (action_flags & MLX5_FLOW_FATE_ACTIONS) @@ -939,6 +1142,14 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, &rss->types, "some RSS protocols are not" " supported"); + if (!priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No Rx queues configured"); + if (!rss->queue_num) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No queues configured"); for (i = 0; i != rss->queue_num; ++i) { if (!(*priv->rxqs)[rss->queue[i]]) return rte_flow_error_set @@ -950,6 +1161,11 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "rss action not supported for " "egress"); + if (rss->level > 1 && !tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "inner RSS is not supported for " + "non-tunnel flows"); return 0; } @@ -964,7 +1180,7 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, @@ -998,7 +1214,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, const struct rte_flow_attr *attributes, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; uint32_t priority_max = priv->config.flow_prio - 1; if (attributes->group) @@ -1014,7 +1230,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); - if (attributes->transfer) + if (attributes->transfer && !priv->config.dv_esw_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); @@ -1026,6 +1242,110 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, return 0; } +/** + * Validate ICMP6 item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_icmp6 *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + + if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with ICMP6 layer"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv6 is mandatory to filter on" + " ICMP6"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); + if (!mask) + mask = &rte_flow_item_icmp6_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_icmp6_mask, + sizeof(struct rte_flow_item_icmp6), error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate ICMP item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_icmp *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + + if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with ICMP layer"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv4 is mandatory to filter" + " on ICMP"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); + if (!mask) + mask = &rte_flow_item_icmp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_icmp_mask, + sizeof(struct rte_flow_item_icmp), error); + if (ret < 0) + return ret; + return 0; +} + /** * Validate Ethernet item. * @@ -1059,6 +1379,17 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L2 layers not supported"); + if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || + (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L2 layer should not follow " + "L3 layers"); + if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || + (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L2 layer should not follow VLAN"); if (!mask) mask = &rte_flow_item_eth_mask; ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, @@ -1075,6 +1406,8 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item, * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. + * @param[in] dev + * Ethernet device flow is being created on. * @param[out] error * Pointer to error structure. * @@ -1084,13 +1417,14 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item, int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, uint64_t item_flags, + struct rte_eth_dev *dev, struct rte_flow_error *error) { const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; const struct rte_flow_item_vlan nic_mask = { - .tci = RTE_BE16(0x0fff), - .inner_type = RTE_BE16(0xffff), + .tci = RTE_BE16(UINT16_MAX), + .inner_type = RTE_BE16(UINT16_MAX), }; uint16_t vlan_tag = 0; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); @@ -1109,7 +1443,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, else if ((item_flags & l34m) != 0) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "L2 layer cannot follow L3/L4 layer"); + "VLAN cannot follow L3/L4 layer"); if (!mask) mask = &rte_flow_item_vlan_mask; ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, @@ -1118,6 +1452,25 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, error); if (ret) return ret; + if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->vmwa_context) { + /* + * Non-NULL context means we have a virtual machine + * and SR-IOV enabled, we have to create VLAN interface + * to make hypervisor to setup E-Switch vport + * context correctly. We avoid creating the multiple + * VLAN interfaces, so we cannot support VLAN tag mask. + */ + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN tag mask is not" + " supported in virtual" + " environment"); + } + } if (spec) { vlan_tag = spec->tci; vlan_tag &= mask->tci; @@ -1141,6 +1494,9 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. * @param[out] error * Pointer to error structure. * @@ -1150,9 +1506,13 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, + const struct rte_flow_item_ipv4 *acc_mask, struct rte_flow_error *error) { const struct rte_flow_item_ipv4 *mask = item->mask; + const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 nic_mask = { .hdr = { .src_addr = RTE_BE32(0xffffffff), @@ -1167,7 +1527,33 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : MLX5_FLOW_LAYER_OUTER_L4; int ret; + uint8_t next_proto = 0xFF; + const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN | + MLX5_FLOW_LAYER_INNER_VLAN); + if ((last_item & l2_vlan) && ether_type && + ether_type != RTE_ETHER_TYPE_IPV4) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv4 cannot follow L2/VLAN layer " + "which ether type is not IPv4"); + if (item_flags & MLX5_FLOW_LAYER_IPIP) { + if (mask && spec) + next_proto = mask->hdr.next_proto_id & + spec->hdr.next_proto_id; + if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "multiple tunnel " + "not supported"); + } + if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "wrong tunnel type - IPv6 specified " + "but IPv4 item provided"); if (item_flags & l3m) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1176,6 +1562,11 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 cannot follow an L4 layer."); + else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && + !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 cannot follow an NVGRE layer."); if (!mask) mask = &rte_flow_item_ipv4_mask; else if (mask->hdr.next_proto_id != 0 && @@ -1185,7 +1576,8 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, "partial mask is not supported" " for protocol"); ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_ipv4), error); if (ret < 0) @@ -1200,6 +1592,9 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. * @param[out] error * Pointer to error structure. * @@ -1209,9 +1604,13 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, + const struct rte_flow_item_ipv6 *acc_mask, struct rte_flow_error *error) { const struct rte_flow_item_ipv6 *mask = item->mask; + const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 nic_mask = { .hdr = { .src_addr = @@ -1231,7 +1630,32 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : MLX5_FLOW_LAYER_OUTER_L4; int ret; + uint8_t next_proto = 0xFF; + const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN | + MLX5_FLOW_LAYER_INNER_VLAN); + if ((last_item & l2_vlan) && ether_type && + ether_type != RTE_ETHER_TYPE_IPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv6 cannot follow L2/VLAN layer " + "which ether type is not IPv6"); + if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { + if (mask && spec) + next_proto = mask->hdr.proto & spec->hdr.proto; + if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "multiple tunnel " + "not supported"); + } + if (item_flags & MLX5_FLOW_LAYER_IPIP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "wrong tunnel type - IPv4 specified " + "but IPv6 item provided"); if (item_flags & l3m) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1240,10 +1664,16 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 cannot follow an L4 layer."); + else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && + !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 cannot follow an NVGRE layer."); if (!mask) mask = &rte_flow_item_ipv6_mask; ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_ipv6), error); if (ret < 0) @@ -1261,7 +1691,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, * @param[in] target_protocol * The next protocol in the previous item. * @param[in] flow_mask - * mlx5 flow-specific (TCF, DV, verbs, etc.) supported header fields mask. + * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. * @param[out] error * Pointer to error structure. * @@ -1462,7 +1892,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, struct rte_eth_dev *dev, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_item_vxlan_gpe *spec = item->spec; const struct rte_flow_item_vxlan_gpe *mask = item->mask; int ret; @@ -1531,6 +1961,61 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, " defined"); return 0; } +/** + * Validate GRE Key item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit flags to mark detected items. + * @param[in] gre_item + * Pointer to gre_item + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, + uint64_t item_flags, + const struct rte_flow_item *gre_item, + struct rte_flow_error *error) +{ + const rte_be32_t *mask = item->mask; + int ret = 0; + rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); + const struct rte_flow_item_gre *gre_spec = gre_item->spec; + const struct rte_flow_item_gre *gre_mask = gre_item->mask; + + if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Multiple GRE key not support"); + if (!(item_flags & MLX5_FLOW_LAYER_GRE)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "No preceding GRE header"); + if (item_flags & MLX5_FLOW_LAYER_INNER) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "GRE key following a wrong item"); + if (!gre_mask) + gre_mask = &rte_flow_item_gre_mask; + if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && + !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Key bit must be on"); + + if (!mask) + mask = &gre_key_default_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&gre_key_default_mask, + sizeof(rte_be32_t), error); + return ret; +} /** * Validate GRE item. @@ -1556,6 +2041,10 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item, const struct rte_flow_item_gre *spec __rte_unused = item->spec; const struct rte_flow_item_gre *mask = item->mask; int ret; + const struct rte_flow_item_gre nic_mask = { + .c_rsvd0_ver = RTE_BE16(0xB000), + .protocol = RTE_BE16(UINT16_MAX), + }; if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) return rte_flow_error_set(error, EINVAL, @@ -1575,10 +2064,11 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item, mask = &rte_flow_item_gre_mask; ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, - (const uint8_t *)&rte_flow_item_gre_mask, + (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_gre), error); if (ret < 0) return ret; +#ifndef HAVE_MLX5DV_DR #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT if (spec && (spec->protocol & mask->protocol)) return rte_flow_error_set(error, ENOTSUP, @@ -1586,56 +2076,146 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item, "without MPLS support the" " specification cannot be used for" " filtering"); +#endif #endif return 0; } /** - * Validate MPLS item. + * Validate Geneve item. * - * @param[in] dev - * Pointer to the rte_eth_dev structure. * @param[in] item * Item specification. - * @param[in] item_flags + * @param[in] itemFlags * Bit-fields that holds the items detected until now. - * @param[in] prev_layer - * The protocol layer indicated in previous item. + * @param[in] enPriv + * Pointer to the private data structure. * @param[out] error * Pointer to error structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ + int -mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, - const struct rte_flow_item *item __rte_unused, - uint64_t item_flags __rte_unused, - uint64_t prev_layer __rte_unused, - struct rte_flow_error *error) +mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) { -#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT - const struct rte_flow_item_mpls *mask = item->mask; - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_item_geneve *spec = item->spec; + const struct rte_flow_item_geneve *mask = item->mask; int ret; + uint16_t gbhdr; + uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? + MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; + const struct rte_flow_item_geneve nic_mask = { + .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), + .vni = "\xff\xff\xff", + .protocol = RTE_BE16(UINT16_MAX), + }; - if (!priv->config.mpls_en) + if (!(priv->config.hca_attr.flex_parser_protocols & + MLX5_HCA_FLEX_GENEVE_ENABLED) || + !priv->config.hca_attr.tunnel_stateless_geneve_rx) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "MPLS not supported or" - " disabled in firmware" - " configuration."); - /* MPLS over IP, UDP, GRE is allowed */ - if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | - MLX5_FLOW_LAYER_OUTER_L4_UDP | - MLX5_FLOW_LAYER_GRE))) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "protocol filtering not compatible" - " with MPLS layer"); - /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ - if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && - !(item_flags & MLX5_FLOW_LAYER_GRE)) + "L3 Geneve is not enabled by device" + " parameter and/or not configured in" + " firmware"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_geneve_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_geneve), error); + if (ret) + return ret; + if (spec) { + gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); + if (MLX5_GENEVE_VER_VAL(gbhdr) || + MLX5_GENEVE_CRITO_VAL(gbhdr) || + MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Geneve protocol unsupported" + " fields are being used"); + if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported Geneve options length"); + } + if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve tunnel must be fully defined"); + return 0; +} + +/** + * Validate MPLS item. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] prev_layer + * The protocol layer indicated in previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_item *item __rte_unused, + uint64_t item_flags __rte_unused, + uint64_t prev_layer __rte_unused, + struct rte_flow_error *error) +{ +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + const struct rte_flow_item_mpls *mask = item->mask; + struct mlx5_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->config.mpls_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "MPLS not supported or" + " disabled in firmware" + " configuration."); + /* MPLS over IP, UDP, GRE is allowed */ + if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | + MLX5_FLOW_LAYER_OUTER_L4_UDP | + MLX5_FLOW_LAYER_GRE))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with MPLS layer"); + /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ + if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && + !(item_flags & MLX5_FLOW_LAYER_GRE)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple tunnel layers not" @@ -1656,24 +2236,118 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, " update."); } +/** + * Validate NVGRE item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit flags to mark detected items. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_nvgre *mask = item->mask; + int ret; + + if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with this GRE layer"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 Layer is missing"); + if (!mask) + mask = &rte_flow_item_nvgre_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_nvgre_mask, + sizeof(struct rte_flow_item_nvgre), error); + if (ret < 0) + return ret; + return 0; +} + +/* Allocate unique ID for the split Q/RSS subflows. */ +static uint32_t +flow_qrss_get_id(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t qrss_id, ret; + + ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); + if (ret) + return 0; + assert(qrss_id); + return qrss_id; +} + +/* Free unique ID for the split Q/RSS subflows. */ +static void +flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (qrss_id) + mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); +} + +/** + * Release resource related QUEUE/RSS action split. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Flow to release id's from. + */ +static void +flow_mreg_split_qrss_release(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_flow *dev_flow; + + LIST_FOREACH(dev_flow, &flow->dev_flows, next) + if (dev_flow->qrss_id) + flow_qrss_free_id(dev, dev_flow->qrss_id); +} + static int flow_null_validate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused) + bool external __rte_unused, + struct rte_flow_error *error) { - rte_errno = ENOTSUP; - return -rte_errno; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); } static struct mlx5_flow * flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { - rte_errno = ENOTSUP; + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); return NULL; } @@ -1683,19 +2357,19 @@ flow_null_translate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { - rte_errno = ENOTSUP; - return -rte_errno; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); } static int flow_null_apply(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow __rte_unused, - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { - rte_errno = ENOTSUP; - return -rte_errno; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); } static void @@ -1715,10 +2389,10 @@ flow_null_query(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow __rte_unused, const struct rte_flow_action *actions __rte_unused, void *data __rte_unused, - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { - rte_errno = ENOTSUP; - return -rte_errno; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); } /* Void driver to protect from null pointer reference. */ @@ -1747,12 +2421,12 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { static enum mlx5_flow_drv_type flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; - if (attr->transfer) - type = MLX5_FLOW_TYPE_TCF; - else + if (attr->transfer && priv->config.dv_esw_en) + type = MLX5_FLOW_TYPE_DV; + if (!attr->transfer) type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : MLX5_FLOW_TYPE_VERBS; return type; @@ -1772,24 +2446,26 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) * Pointer to the list of items. * @param[in] actions * Pointer to the list of actions. + * @param[in] external + * This flow rule is created by request external to PMD. * @param[out] error * Pointer to the error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static inline int flow_drv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - struct rte_flow_error *error) + bool external, struct rte_flow_error *error) { const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); fops = flow_get_drv_ops(type); - return fops->validate(dev, attr, items, actions, error); + return fops->validate(dev, attr, items, actions, external, error); } /** @@ -1799,7 +2475,7 @@ flow_drv_validate(struct rte_eth_dev *dev, * initializes the device flow and returns the pointer. * * @note - * This function initializes device flow structure such as dv, tcf or verbs in + * This function initializes device flow structure such as dv or verbs in * struct mlx5_flow. However, it is caller's responsibility to initialize the * rest. For example, adding returning device flow to flow->dev_flow list and * setting backward reference to the flow should be done out of this function. @@ -1815,7 +2491,7 @@ flow_drv_validate(struct rte_eth_dev *dev, * Pointer to the error structure. * * @return - * Pointer to device flow on success, otherwise NULL and rte_ernno is set. + * Pointer to device flow on success, otherwise NULL and rte_errno is set. */ static inline struct mlx5_flow * flow_drv_prepare(const struct rte_flow *flow, @@ -1859,7 +2535,7 @@ flow_drv_prepare(const struct rte_flow *flow, * Pointer to the error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static inline int flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, @@ -1941,6 +2617,7 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow->drv_type; + flow_mreg_split_qrss_release(dev, flow); assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); fops->destroy(dev, flow); @@ -1961,53 +2638,1127 @@ mlx5_flow_validate(struct rte_eth_dev *dev, { int ret; - ret = flow_drv_validate(dev, attr, items, actions, error); - if (ret < 0) - return ret; - return 0; + ret = flow_drv_validate(dev, attr, items, actions, true, error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Get RSS action from the action list. + * + * @param[in] actions + * Pointer to the list of actions. + * + * @return + * Pointer to the RSS action if exist, else return NULL. + */ +static const struct rte_flow_action_rss* +flow_get_rss_action(const struct rte_flow_action actions[]) +{ + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_RSS: + return (const struct rte_flow_action_rss *) + actions->conf; + default: + break; + } + } + return NULL; +} + +static unsigned int +find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +{ + const struct rte_flow_item *item; + unsigned int has_vlan = 0; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + has_vlan = 1; + break; + } + } + if (has_vlan) + return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; + return rss_level < 2 ? MLX5_EXPANSION_ROOT : + MLX5_EXPANSION_ROOT_OUTER; +} + +/** + * Get QUEUE/RSS action from the action list. + * + * @param[in] actions + * Pointer to the list of actions. + * @param[out] qrss + * Pointer to the return pointer. + * @param[out] qrss_type + * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned + * if no QUEUE/RSS is found. + * + * @return + * Total number of actions. + */ +static int +flow_parse_qrss_action(const struct rte_flow_action actions[], + const struct rte_flow_action **qrss) +{ + int actions_n = 0; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + case RTE_FLOW_ACTION_TYPE_RSS: + *qrss = actions; + break; + default: + break; + } + actions_n++; + } + /* Count RTE_FLOW_ACTION_TYPE_END. */ + return actions_n + 1; +} + +/** + * Check if the flow should be splited due to hairpin. + * The reason for the split is that in current HW we can't + * support encap on Rx, so if a flow have encap we move it + * to Tx. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] attr + * Flow rule attributes. + * @param[in] actions + * Associated actions (list terminated by the END action). + * + * @return + * > 0 the number of actions and the flow should be split, + * 0 when no split required. + */ +static int +flow_check_hairpin_split(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[]) +{ + int queue_action = 0; + int action_n = 0; + int encap = 0; + const struct rte_flow_action_queue *queue; + const struct rte_flow_action_rss *rss; + const struct rte_flow_action_raw_encap *raw_encap; + + if (!attr->ingress) + return 0; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + queue = actions->conf; + if (mlx5_rxq_get_type(dev, queue->index) != + MLX5_RXQ_TYPE_HAIRPIN) + return 0; + queue_action = 1; + action_n++; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + rss = actions->conf; + if (mlx5_rxq_get_type(dev, rss->queue[0]) != + MLX5_RXQ_TYPE_HAIRPIN) + return 0; + queue_action = 1; + action_n++; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + encap = 1; + action_n++; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap = actions->conf; + if (raw_encap->size > + (sizeof(struct rte_flow_item_eth) + + sizeof(struct rte_flow_item_ipv4))) + encap = 1; + action_n++; + break; + default: + action_n++; + break; + } + } + if (encap == 1 && queue_action) + return action_n; + return 0; +} + +/* Declare flow create/destroy prototype in advance. */ +static struct rte_flow * +flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, struct rte_flow_error *error); + +static void +flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, + struct rte_flow *flow); + +/** + * Add a flow of copying flow metadata registers in RX_CP_TBL. + * + * As mark_id is unique, if there's already a registered flow for the mark_id, + * return by increasing the reference counter of the resource. Otherwise, create + * the resource (mcp_res) and flow. + * + * Flow looks like, + * - If ingress port is ANY and reg_c[1] is mark_id, + * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * For default flow (zero mark_id), flow is like, + * - If ingress port is ANY, + * reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param mark_id + * ID of MARK action, zero means default flow for META. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * Associated resource on success, NULL otherwise and rte_errno is set. + */ +static struct mlx5_flow_mreg_copy_resource * +flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_attr attr = { + .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, + .ingress = 1, + }; + struct mlx5_rte_flow_item_tag tag_spec = { + .data = mark_id, + }; + struct rte_flow_item items[] = { + [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, + }; + struct rte_flow_action_mark ftag = { + .id = mark_id, + }; + struct mlx5_flow_action_copy_mreg cp_mreg = { + .dst = REG_B, + .src = 0, + }; + struct rte_flow_action_jump jump = { + .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, + }; + struct rte_flow_action actions[] = { + [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, + }; + struct mlx5_flow_mreg_copy_resource *mcp_res; + int ret; + + /* Fill the register fileds in the flow. */ + ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); + if (ret < 0) + return NULL; + tag_spec.id = ret; + ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); + if (ret < 0) + return NULL; + cp_mreg.src = ret; + /* Check if already registered. */ + assert(priv->mreg_cp_tbl); + mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); + if (mcp_res) { + /* For non-default rule. */ + if (mark_id) + mcp_res->refcnt++; + assert(mark_id || mcp_res->refcnt == 1); + return mcp_res; + } + /* Provide the full width of FLAG specific value. */ + if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) + tag_spec.data = MLX5_FLOW_MARK_DEFAULT; + /* Build a new flow. */ + if (mark_id) { + items[0] = (struct rte_flow_item){ + .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, + .spec = &tag_spec, + }; + items[1] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_END, + }; + actions[0] = (struct rte_flow_action){ + .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, + .conf = &ftag, + }; + actions[1] = (struct rte_flow_action){ + .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = &cp_mreg, + }; + actions[2] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }; + actions[3] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + } else { + /* Default rule, wildcard match. */ + attr.priority = MLX5_FLOW_PRIO_RSVD; + items[0] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_END, + }; + actions[0] = (struct rte_flow_action){ + .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = &cp_mreg, + }; + actions[1] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }; + actions[2] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + } + /* Build a new entry. */ + mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0); + if (!mcp_res) { + rte_errno = ENOMEM; + return NULL; + } + /* + * The copy Flows are not included in any list. There + * ones are referenced from other Flows and can not + * be applied, removed, deleted in ardbitrary order + * by list traversing. + */ + mcp_res->flow = flow_list_create(dev, NULL, &attr, items, + actions, false, error); + if (!mcp_res->flow) + goto error; + mcp_res->refcnt++; + mcp_res->hlist_ent.key = mark_id; + ret = mlx5_hlist_insert(priv->mreg_cp_tbl, + &mcp_res->hlist_ent); + assert(!ret); + if (ret) + goto error; + return mcp_res; +error: + if (mcp_res->flow) + flow_list_destroy(dev, NULL, mcp_res->flow); + rte_free(mcp_res); + return NULL; +} + +/** + * Release flow in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @flow + * Parent flow for wich copying is provided. + */ +static void +flow_mreg_del_copy_action(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + struct mlx5_priv *priv = dev->data->dev_private; + + if (!mcp_res || !priv->mreg_cp_tbl) + return; + if (flow->copy_applied) { + assert(mcp_res->appcnt); + flow->copy_applied = 0; + --mcp_res->appcnt; + if (!mcp_res->appcnt) + flow_drv_remove(dev, mcp_res->flow); + } + /* + * We do not check availability of metadata registers here, + * because copy resources are allocated in this case. + */ + if (--mcp_res->refcnt) + return; + assert(mcp_res->flow); + flow_list_destroy(dev, NULL, mcp_res->flow); + mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); + rte_free(mcp_res); + flow->mreg_copy = NULL; +} + +/** + * Start flow in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @flow + * Parent flow for wich copying is provided. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_mreg_start_copy_action(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + int ret; + + if (!mcp_res || flow->copy_applied) + return 0; + if (!mcp_res->appcnt) { + ret = flow_drv_apply(dev, mcp_res->flow, NULL); + if (ret) + return ret; + } + ++mcp_res->appcnt; + flow->copy_applied = 1; + return 0; +} + +/** + * Stop flow in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @flow + * Parent flow for wich copying is provided. + */ +static void +flow_mreg_stop_copy_action(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + + if (!mcp_res || !flow->copy_applied) + return; + assert(mcp_res->appcnt); + --mcp_res->appcnt; + flow->copy_applied = 0; + if (!mcp_res->appcnt) + flow_drv_remove(dev, mcp_res->flow); +} + +/** + * Remove the default copy action from RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_priv *priv = dev->data->dev_private; + + /* Check if default flow is registered. */ + if (!priv->mreg_cp_tbl) + return; + mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 0ULL); + if (!mcp_res) + return; + assert(mcp_res->flow); + flow_list_destroy(dev, NULL, mcp_res->flow); + mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); + rte_free(mcp_res); +} + +/** + * Add the default copy action in in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 for success, negative value otherwise and rte_errno is set. + */ +static int +flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_mreg_copy_resource *mcp_res; + + /* Check whether extensive metadata feature is engaged. */ + if (!priv->config.dv_flow_en || + priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + !mlx5_flow_ext_mreg_supported(dev) || + !priv->sh->dv_regc0_mask) + return 0; + mcp_res = flow_mreg_add_copy_action(dev, 0, error); + if (!mcp_res) + return -rte_errno; + return 0; +} + +/** + * Add a flow of copying flow metadata registers in RX_CP_TBL. + * + * All the flow having Q/RSS action should be split by + * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL + * performs the following, + * - CQE->flow_tag := reg_c[1] (MARK) + * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) + * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] + * but there should be a flow per each MARK ID set by MARK action. + * + * For the aforementioned reason, if there's a MARK action in flow's action + * list, a corresponding flow should be added to the RX_CP_TBL in order to copy + * the MARK ID to CQE's flow_tag like, + * - If reg_c[1] is mark_id, + * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * For SET_META action which stores value in reg_c[0], as the destination is + * also a flow metadata register (reg_b), adding a default flow is enough. Zero + * MARK ID means the default flow. The default flow looks like, + * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to flow structure. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +flow_mreg_update_copy_table(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + struct mlx5_flow_mreg_copy_resource *mcp_res; + const struct rte_flow_action_mark *mark; + + /* Check whether extensive metadata feature is engaged. */ + if (!config->dv_flow_en || + config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + !mlx5_flow_ext_mreg_supported(dev) || + !priv->sh->dv_regc0_mask) + return 0; + /* Find MARK action. */ + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_FLAG: + mcp_res = flow_mreg_add_copy_action + (dev, MLX5_FLOW_MARK_DEFAULT, error); + if (!mcp_res) + return -rte_errno; + flow->mreg_copy = mcp_res; + if (dev->data->dev_started) { + mcp_res->appcnt++; + flow->copy_applied = 1; + } + return 0; + case RTE_FLOW_ACTION_TYPE_MARK: + mark = (const struct rte_flow_action_mark *) + actions->conf; + mcp_res = + flow_mreg_add_copy_action(dev, mark->id, error); + if (!mcp_res) + return -rte_errno; + flow->mreg_copy = mcp_res; + if (dev->data->dev_started) { + mcp_res->appcnt++; + flow->copy_applied = 1; + } + return 0; + default: + break; + } + } + return 0; +} + +#define MLX5_MAX_SPLIT_ACTIONS 24 +#define MLX5_MAX_SPLIT_ITEMS 24 + +/** + * Split the hairpin flow. + * Since HW can't support encap on Rx we move the encap to Tx. + * If the count action is after the encap then we also + * move the count action. in this case the count will also measure + * the outer bytes. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] actions_rx + * Rx flow actions. + * @param[out] actions_tx + * Tx flow actions.. + * @param[out] pattern_tx + * The pattern items for the Tx flow. + * @param[out] flow_id + * The flow ID connected to this flow. + * + * @return + * 0 on success. + */ +static int +flow_hairpin_split(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct rte_flow_action actions_rx[], + struct rte_flow_action actions_tx[], + struct rte_flow_item pattern_tx[], + uint32_t *flow_id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_raw_encap *raw_encap; + const struct rte_flow_action_raw_decap *raw_decap; + struct mlx5_rte_flow_action_set_tag *set_tag; + struct rte_flow_action *tag_action; + struct mlx5_rte_flow_item_tag *tag_item; + struct rte_flow_item *item; + char *addr; + int encap = 0; + + mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + rte_memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + if (encap) { + rte_memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; + } else { + rte_memcpy(actions_rx, actions, + sizeof(struct rte_flow_action)); + actions_rx++; + } + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap = actions->conf; + if (raw_encap->size > + (sizeof(struct rte_flow_item_eth) + + sizeof(struct rte_flow_item_ipv4))) { + memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; + encap = 1; + } else { + rte_memcpy(actions_rx, actions, + sizeof(struct rte_flow_action)); + actions_rx++; + } + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + raw_decap = actions->conf; + if (raw_decap->size < + (sizeof(struct rte_flow_item_eth) + + sizeof(struct rte_flow_item_ipv4))) { + memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; + } else { + rte_memcpy(actions_rx, actions, + sizeof(struct rte_flow_action)); + actions_rx++; + } + break; + default: + rte_memcpy(actions_rx, actions, + sizeof(struct rte_flow_action)); + actions_rx++; + break; + } + } + /* Add set meta action and end action for the Rx flow. */ + tag_action = actions_rx; + tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; + actions_rx++; + rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); + actions_rx++; + set_tag = (void *)actions_rx; + set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); + assert(set_tag->id > REG_NONE); + set_tag->data = *flow_id; + tag_action->conf = set_tag; + /* Create Tx item list. */ + rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); + addr = (void *)&pattern_tx[2]; + item = pattern_tx; + item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; + tag_item = (void *)addr; + tag_item->data = *flow_id; + tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); + assert(set_tag->id > REG_NONE); + item->spec = tag_item; + addr += sizeof(struct mlx5_rte_flow_item_tag); + tag_item = (void *)addr; + tag_item->data = UINT32_MAX; + tag_item->id = UINT16_MAX; + item->mask = tag_item; + addr += sizeof(struct mlx5_rte_flow_item_tag); + item->last = NULL; + item++; + item->type = RTE_FLOW_ITEM_TYPE_END; + return 0; +} + +/** + * The last stage of splitting chain, just creates the subflow + * without any modification. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Parent flow structure pointer. + * @param[in, out] sub_flow + * Pointer to return the created subflow, may be NULL. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[out] error + * Perform verbose error reporting if not NULL. + * @return + * 0 on success, negative value otherwise + */ +static int +flow_create_split_inner(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct mlx5_flow **sub_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, struct rte_flow_error *error) +{ + struct mlx5_flow *dev_flow; + + dev_flow = flow_drv_prepare(flow, attr, items, actions, error); + if (!dev_flow) + return -rte_errno; + dev_flow->flow = flow; + dev_flow->external = external; + /* Subflow object was created, we must include one in the list. */ + LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); + if (sub_flow) + *sub_flow = dev_flow; + return flow_drv_translate(dev, dev_flow, attr, items, actions, error); +} + +/** + * Split action list having QUEUE/RSS for metadata register copy. + * + * Once Q/RSS action is detected in user's action list, the flow action + * should be split in order to copy metadata registers, which will happen in + * RX_CP_TBL like, + * - CQE->flow_tag := reg_c[1] (MARK) + * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) + * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. + * This is because the last action of each flow must be a terminal action + * (QUEUE, RSS or DROP). + * + * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is + * stored and kept in the mlx5_flow structure per each sub_flow. + * + * The Q/RSS action is replaced with, + * - SET_TAG, setting the allocated flow ID to reg_c[2]. + * And the following JUMP action is added at the end, + * - JUMP, to RX_CP_TBL. + * + * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by + * flow_create_split_metadata() routine. The flow will look like, + * - If flow ID matches (reg_c[2]), perform Q/RSS. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] split_actions + * Pointer to store split actions to jump to CP_TBL. + * @param[in] actions + * Pointer to the list of original flow actions. + * @param[in] qrss + * Pointer to the Q/RSS action. + * @param[in] actions_n + * Number of original actions. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * non-zero unique flow_id on success, otherwise 0 and + * error/rte_error are set. + */ +static uint32_t +flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, + struct rte_flow_action *split_actions, + const struct rte_flow_action *actions, + const struct rte_flow_action *qrss, + int actions_n, struct rte_flow_error *error) +{ + struct mlx5_rte_flow_action_set_tag *set_tag; + struct rte_flow_action_jump *jump; + const int qrss_idx = qrss - actions; + uint32_t flow_id; + int ret = 0; + + /* + * Given actions will be split + * - Replace QUEUE/RSS action with SET_TAG to set flow ID. + * - Add jump to mreg CP_TBL. + * As a result, there will be one more action. + */ + ++actions_n; + /* + * Allocate the new subflow ID. This one is unique within + * device and not shared with representors. Otherwise, + * we would have to resolve multi-thread access synch + * issue. Each flow on the shared device is appended + * with source vport identifier, so the resulting + * flows will be unique in the shared (by master and + * representors) domain even if they have coinciding + * IDs. + */ + flow_id = flow_qrss_get_id(dev); + if (!flow_id) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "can't allocate id " + "for split Q/RSS subflow"); + /* Internal SET_TAG action to set flow ID. */ + set_tag = (void *)(split_actions + actions_n); + *set_tag = (struct mlx5_rte_flow_action_set_tag){ + .data = flow_id, + }; + ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); + if (ret < 0) + return ret; + set_tag->id = ret; + /* JUMP action to jump to mreg copy table (CP_TBL). */ + jump = (void *)(set_tag + 1); + *jump = (struct rte_flow_action_jump){ + .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, + }; + /* Construct new actions array. */ + memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); + /* Replace QUEUE/RSS action. */ + split_actions[qrss_idx] = (struct rte_flow_action){ + .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG, + .conf = set_tag, + }; + split_actions[actions_n - 2] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = jump, + }; + split_actions[actions_n - 1] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + return flow_id; +} + +/** + * Extend the given action list for Tx metadata copy. + * + * Copy the given action list to the ext_actions and add flow metadata register + * copy action in order to copy reg_a set by WQE to reg_c[0]. + * + * @param[out] ext_actions + * Pointer to the extended action list. + * @param[in] actions + * Pointer to the list of actions. + * @param[in] actions_n + * Number of actions in the list. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, negative value otherwise + */ +static int +flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, + struct rte_flow_action *ext_actions, + const struct rte_flow_action *actions, + int actions_n, struct rte_flow_error *error) +{ + struct mlx5_flow_action_copy_mreg *cp_mreg = + (struct mlx5_flow_action_copy_mreg *) + (ext_actions + actions_n + 1); + int ret; + + ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); + if (ret < 0) + return ret; + cp_mreg->dst = ret; + ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); + if (ret < 0) + return ret; + cp_mreg->src = ret; + memcpy(ext_actions, actions, + sizeof(*ext_actions) * actions_n); + ext_actions[actions_n - 1] = (struct rte_flow_action){ + .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = cp_mreg, + }; + ext_actions[actions_n] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + return 0; +} + +/** + * The splitting for metadata feature. + * + * - Q/RSS action on NIC Rx should be split in order to pass by + * the mreg copy table (RX_CP_TBL) and then it jumps to the + * action table (RX_ACT_TBL) which has the split Q/RSS action. + * + * - All the actions on NIC Tx should have a mreg copy action to + * copy reg_a from WQE to reg_c[0]. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Parent flow structure pointer. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[out] error + * Perform verbose error reporting if not NULL. + * @return + * 0 on success, negative value otherwise + */ +static int +flow_create_split_metadata(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + const struct rte_flow_action *qrss = NULL; + struct rte_flow_action *ext_actions = NULL; + struct mlx5_flow *dev_flow = NULL; + uint32_t qrss_id = 0; + size_t act_size; + int actions_n; + int ret; + + /* Check whether extensive metadata feature is engaged. */ + if (!config->dv_flow_en || + config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + !mlx5_flow_ext_mreg_supported(dev)) + return flow_create_split_inner(dev, flow, NULL, attr, items, + actions, external, error); + actions_n = flow_parse_qrss_action(actions, &qrss); + if (qrss) { + /* Exclude hairpin flows from splitting. */ + if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + const struct rte_flow_action_queue *queue; + + queue = qrss->conf; + if (mlx5_rxq_get_type(dev, queue->index) == + MLX5_RXQ_TYPE_HAIRPIN) + qrss = NULL; + } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { + const struct rte_flow_action_rss *rss; + + rss = qrss->conf; + if (mlx5_rxq_get_type(dev, rss->queue[0]) == + MLX5_RXQ_TYPE_HAIRPIN) + qrss = NULL; + } + } + if (qrss) { + /* + * Q/RSS action on NIC Rx should be split in order to pass by + * the mreg copy table (RX_CP_TBL) and then it jumps to the + * action table (RX_ACT_TBL) which has the split Q/RSS action. + */ + act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + + sizeof(struct rte_flow_action_set_tag) + + sizeof(struct rte_flow_action_jump); + ext_actions = rte_zmalloc(__func__, act_size, 0); + if (!ext_actions) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no memory to split " + "metadata flow"); + /* + * Create the new actions list with removed Q/RSS action + * and appended set tag and jump to register copy table + * (RX_CP_TBL). We should preallocate unique tag ID here + * in advance, because it is needed for set tag action. + */ + qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, + qrss, actions_n, error); + if (!qrss_id) { + ret = -rte_errno; + goto exit; + } + } else if (attr->egress && !attr->transfer) { + /* + * All the actions on NIC Tx should have a metadata register + * copy action to copy reg_a from WQE to reg_c[meta] + */ + act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + + sizeof(struct mlx5_flow_action_copy_mreg); + ext_actions = rte_zmalloc(__func__, act_size, 0); + if (!ext_actions) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no memory to split " + "metadata flow"); + /* Create the action list appended with copy register. */ + ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, + actions_n, error); + if (ret < 0) + goto exit; + } + /* Add the unmodified original or prefix subflow. */ + ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, + ext_actions ? ext_actions : actions, + external, error); + if (ret < 0) + goto exit; + assert(dev_flow); + if (qrss_id) { + const struct rte_flow_attr q_attr = { + .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, + .ingress = 1, + }; + /* Internal PMD action to set register. */ + struct mlx5_rte_flow_item_tag q_tag_spec = { + .data = qrss_id, + .id = 0, + }; + struct rte_flow_item q_items[] = { + { + .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, + .spec = &q_tag_spec, + .last = NULL, + .mask = NULL, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action q_actions[] = { + { + .type = qrss->type, + .conf = qrss->conf, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + uint64_t hash_fields = dev_flow->hash_fields; + /* + * Put unique id in prefix flow due to it is destroyed after + * prefix flow and id will be freed after there is no actual + * flows with this id and identifier reallocation becomes + * possible (for example, for other flows in other threads). + */ + dev_flow->qrss_id = qrss_id; + qrss_id = 0; + dev_flow = NULL; + ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); + if (ret < 0) + goto exit; + q_tag_spec.id = ret; + /* Add suffix subflow to execute Q/RSS. */ + ret = flow_create_split_inner(dev, flow, &dev_flow, + &q_attr, q_items, q_actions, + external, error); + if (ret < 0) + goto exit; + assert(dev_flow); + dev_flow->hash_fields = hash_fields; + } + +exit: + /* + * We do not destroy the partially created sub_flows in case of error. + * These ones are included into parent flow list and will be destroyed + * by flow_drv_destroy. + */ + flow_qrss_free_id(dev, qrss_id); + rte_free(ext_actions); + return ret; } /** - * Get RSS action from the action list. + * Split the flow to subflow set. The splitters might be linked + * in the chain, like this: + * flow_create_split_outer() calls: + * flow_create_split_meter() calls: + * flow_create_split_metadata(meter_subflow_0) calls: + * flow_create_split_inner(metadata_subflow_0) + * flow_create_split_inner(metadata_subflow_1) + * flow_create_split_inner(metadata_subflow_2) + * flow_create_split_metadata(meter_subflow_1) calls: + * flow_create_split_inner(metadata_subflow_0) + * flow_create_split_inner(metadata_subflow_1) + * flow_create_split_inner(metadata_subflow_2) * - * @param[in] actions - * Pointer to the list of actions. + * This provide flexible way to add new levels of flow splitting. + * The all of successfully created subflows are included to the + * parent flow dev_flow list. * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Parent flow structure pointer. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[out] error + * Perform verbose error reporting if not NULL. * @return - * Pointer to the RSS action if exist, else return NULL. + * 0 on success, negative value otherwise */ -static const struct rte_flow_action_rss* -flow_get_rss_action(const struct rte_flow_action actions[]) -{ - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { - switch (actions->type) { - case RTE_FLOW_ACTION_TYPE_RSS: - return (const struct rte_flow_action_rss *) - actions->conf; - default: - break; - } - } - return NULL; -} - -static unsigned int -find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +static int +flow_create_split_outer(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, struct rte_flow_error *error) { - const struct rte_flow_item *item; - unsigned int has_vlan = 0; + int ret; - for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - has_vlan = 1; - break; - } - } - if (has_vlan) - return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : - MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; - return rss_level < 2 ? MLX5_EXPANSION_ROOT : - MLX5_EXPANSION_ROOT_OUTER; + ret = flow_create_split_metadata(dev, flow, attr, items, + actions, external, error); + assert(ret <= 0); + return ret; } /** @@ -2016,13 +3767,18 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) * @param dev * Pointer to Ethernet device. * @param list - * Pointer to a TAILQ flow list. + * Pointer to a TAILQ flow list. If this parameter NULL, + * no list insertion occurred, flow is just created, + * this is caller's responsibility to track the + * created flow. * @param[in] attr * Flow rule attributes. * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -2034,8 +3790,9 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - struct rte_flow_error *error) + bool external, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = NULL; struct mlx5_flow *dev_flow; const struct rte_flow_action_rss *rss; @@ -2043,26 +3800,69 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, struct rte_flow_expand_rss buf; uint8_t buffer[2048]; } expand_buffer; + union { + struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; + uint8_t buffer[2048]; + } actions_rx; + union { + struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; + uint8_t buffer[2048]; + } actions_hairpin_tx; + union { + struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; + uint8_t buffer[2048]; + } items_tx; struct rte_flow_expand_rss *buf = &expand_buffer.buf; + const struct rte_flow_action *p_actions_rx = actions; int ret; uint32_t i; uint32_t flow_size; + int hairpin_flow = 0; + uint32_t hairpin_id = 0; + struct rte_flow_attr attr_tx = { .priority = 0 }; - ret = flow_drv_validate(dev, attr, items, actions, error); + hairpin_flow = flow_check_hairpin_split(dev, attr, actions); + if (hairpin_flow > 0) { + if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { + rte_errno = EINVAL; + return NULL; + } + flow_hairpin_split(dev, actions, actions_rx.actions, + actions_hairpin_tx.actions, items_tx.items, + &hairpin_id); + p_actions_rx = actions_rx.actions; + } + ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, + error); if (ret < 0) - return NULL; + goto error_before_flow; flow_size = sizeof(struct rte_flow); - rss = flow_get_rss_action(actions); + rss = flow_get_rss_action(p_actions_rx); if (rss) flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), sizeof(void *)); else flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); flow = rte_calloc(__func__, 1, flow_size, 0); + if (!flow) { + rte_errno = ENOMEM; + goto error_before_flow; + } flow->drv_type = flow_get_drv_type(dev, attr); + if (hairpin_id != 0) + flow->hairpin_flow_id = hairpin_id; assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); - flow->queue = (void *)(flow + 1); + flow->rss.queue = (void *)(flow + 1); + if (rss) { + /* + * The following information is required by + * mlx5_flow_hashfields_adjust() in advance. + */ + flow->rss.level = rss->level; + /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ + flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; + } LIST_INIT(&flow->dev_flows); if (rss && rss->types) { unsigned int graph_root; @@ -2079,28 +3879,73 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, buf->entry[0].pattern = (void *)(uintptr_t)items; } for (i = 0; i < buf->entries; ++i) { - dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern, - actions, error); + /* + * The splitter may create multiple dev_flows, + * depending on configuration. In the simplest + * case it just creates unmodified original flow. + */ + ret = flow_create_split_outer(dev, flow, attr, + buf->entry[i].pattern, + p_actions_rx, external, + error); + if (ret < 0) + goto error; + } + /* Create the tx flow. */ + if (hairpin_flow) { + attr_tx.group = MLX5_HAIRPIN_TX_TABLE; + attr_tx.ingress = 0; + attr_tx.egress = 1; + dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items, + actions_hairpin_tx.actions, error); if (!dev_flow) goto error; dev_flow->flow = flow; + dev_flow->external = 0; LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); - ret = flow_drv_translate(dev, dev_flow, attr, - buf->entry[i].pattern, - actions, error); + ret = flow_drv_translate(dev, dev_flow, &attr_tx, + items_tx.items, + actions_hairpin_tx.actions, error); if (ret < 0) goto error; } + /* + * Update the metadata register copy table. If extensive + * metadata feature is enabled and registers are supported + * we might create the extra rte_flow for each unique + * MARK/FLAG action ID. + * + * The table is updated for ingress Flows only, because + * the egress Flows belong to the different device and + * copy table should be updated in peer NIC Rx domain. + */ + if (attr->ingress && + (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { + ret = flow_mreg_update_copy_table(dev, flow, actions, error); + if (ret) + goto error; + } if (dev->data->dev_started) { ret = flow_drv_apply(dev, flow, error); if (ret < 0) goto error; } - TAILQ_INSERT_TAIL(list, flow, next); + if (list) + TAILQ_INSERT_TAIL(list, flow, next); flow_rxq_flags_set(dev, flow); return flow; +error_before_flow: + if (hairpin_id) + mlx5_flow_id_release(priv->sh->flow_id_pool, + hairpin_id); + return NULL; error: + assert(flow); + flow_mreg_del_copy_action(dev, flow); ret = rte_errno; /* Save rte_errno before cleanup. */ + if (flow->hairpin_flow_id) + mlx5_flow_id_release(priv->sh->flow_id_pool, + flow->hairpin_flow_id); assert(flow); flow_drv_destroy(dev, flow); rte_free(flow); @@ -2108,6 +3953,55 @@ error: return NULL; } +/** + * Create a dedicated flow rule on e-switch table 0 (root table), to direct all + * incoming packets to table 1. + * + * Other flow rules, requested for group n, will be created in + * e-switch table n+1. + * Jump action to e-switch group n will be created to group n+1. + * + * Used when working in switchdev mode, to utilise advantages of table 1 + * and above. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * Pointer to flow on success, NULL otherwise and rte_errno is set. + */ +struct rte_flow * +mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) +{ + const struct rte_flow_attr attr = { + .group = 0, + .priority = 0, + .ingress = 1, + .egress = 0, + .transfer = 1, + }; + const struct rte_flow_item pattern = { + .type = RTE_FLOW_ITEM_TYPE_END, + }; + struct rte_flow_action_jump jump = { + .group = 1, + }; + const struct rte_flow_action actions[] = { + { + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_error error; + + return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, + actions, false, &error); +} + /** * Create a flow. * @@ -2121,9 +4015,10 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - return flow_list_create(dev, - &((struct priv *)dev->data->dev_private)->flows, - attr, items, actions, error); + struct mlx5_priv *priv = dev->data->dev_private; + + return flow_list_create(dev, &priv->flows, + attr, items, actions, true, error); } /** @@ -2132,7 +4027,8 @@ mlx5_flow_create(struct rte_eth_dev *dev, * @param dev * Pointer to Ethernet device. * @param list - * Pointer to a TAILQ flow list. + * Pointer to a TAILQ flow list. If this parameter NULL, + * there is no flow removal from the list. * @param[in] flow * Flow to destroy. */ @@ -2140,14 +4036,21 @@ static void flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, struct rte_flow *flow) { + struct mlx5_priv *priv = dev->data->dev_private; + /* * Update RX queue flags only if port is started, otherwise it is * already clean. */ if (dev->data->dev_started) flow_rxq_flags_trim(dev, flow); + if (flow->hairpin_flow_id) + mlx5_flow_id_release(priv->sh->flow_id_pool, + flow->hairpin_flow_id); flow_drv_destroy(dev, flow); - TAILQ_REMOVE(list, flow, next); + if (list) + TAILQ_REMOVE(list, flow, next); + flow_mreg_del_copy_action(dev, flow); rte_free(flow->fdir); rte_free(flow); } @@ -2184,8 +4087,11 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) { struct rte_flow *flow; - TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) + TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { flow_drv_remove(dev, flow); + flow_mreg_stop_copy_action(dev, flow); + } + flow_mreg_del_default_copy_action(dev); flow_rxq_flags_clear(dev); } @@ -2207,7 +4113,15 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) struct rte_flow_error error; int ret = 0; + /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ + ret = flow_mreg_add_default_copy_action(dev, &error); + if (ret < 0) + return -rte_errno; + /* Apply Flows created by application. */ TAILQ_FOREACH(flow, list, next) { + ret = flow_mreg_start_copy_action(dev, flow); + if (ret < 0) + goto error; ret = flow_drv_apply(dev, flow, &error); if (ret < 0) goto error; @@ -2232,7 +4146,7 @@ error: int mlx5_flow_verify(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow; int ret = 0; @@ -2244,6 +4158,66 @@ mlx5_flow_verify(struct rte_eth_dev *dev) return ret; } +/** + * Enable default hairpin egress flow. + * + * @param dev + * Pointer to Ethernet device. + * @param queue + * The queue index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, + uint32_t queue) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_attr attr = { + .egress = 1, + .priority = 0, + }; + struct mlx5_rte_flow_item_tx_queue queue_spec = { + .queue = queue, + }; + struct mlx5_rte_flow_item_tx_queue queue_mask = { + .queue = UINT32_MAX, + }; + struct rte_flow_item items[] = { + { + .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, + .spec = &queue_spec, + .last = NULL, + .mask = &queue_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action_jump jump = { + .group = MLX5_HAIRPIN_TX_TABLE, + }; + struct rte_flow_action actions[2]; + struct rte_flow *flow; + struct rte_flow_error error; + + actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; + actions[0].conf = &jump; + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + flow = flow_list_create(dev, &priv->ctrl_flows, + &attr, items, actions, false, &error); + if (!flow) { + DRV_LOG(DEBUG, + "Failed to create ctrl flow: rte_errno(%d)," + " type(%d), message(%s)", + rte_errno, error.type, + error.message ? error.message : " (no stated reason)"); + return -rte_errno; + } + return 0; +} + /** * Enable a control flow configured from the control plane. * @@ -2268,7 +4242,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow_item_vlan *vlan_spec, struct rte_flow_item_vlan *vlan_mask) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .ingress = 1, .priority = MLX5_FLOW_PRIO_RSVD, @@ -2315,13 +4289,12 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, unsigned int i; if (!priv->reta_idx_n || !priv->rxqs_n) { - rte_errno = EINVAL; - return -rte_errno; + return 0; } for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; flow = flow_list_create(dev, &priv->ctrl_flows, - &attr, items, actions, &error); + &attr, items, actions, false, &error); if (!flow) return -rte_errno; return 0; @@ -2359,7 +4332,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; flow_list_destroy(dev, &priv->flows, flow); return 0; @@ -2375,7 +4348,7 @@ int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->flows); return 0; @@ -2392,7 +4365,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; if (dev->data->dev_started) { rte_flow_error_set(error, EBUSY, @@ -2470,7 +4443,7 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, struct mlx5_fdir *attributes) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_eth_fdir_input *input = &fdir_filter->input; const struct rte_eth_fdir_masks *mask = &dev->data->dev_conf.fdir_conf.mask; @@ -2513,13 +4486,13 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev, case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ + attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ .src_addr = input->flow.ip4_flow.src_ip, .dst_addr = input->flow.ip4_flow.dst_ip, .time_to_live = input->flow.ip4_flow.ttl, .type_of_service = input->flow.ip4_flow.tos, }; - attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){ + attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ .src_addr = mask->ipv4_mask.src_ip, .dst_addr = mask->ipv4_mask.dst_ip, .time_to_live = mask->ipv4_mask.ttl, @@ -2535,7 +4508,7 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev, case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ + attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ .hop_limits = input->flow.ipv6_flow.hop_limits, .proto = input->flow.ipv6_flow.proto, }; @@ -2567,11 +4540,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev, /* Handle L4. */ switch (fdir_filter->input.flow_type) { case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: - attributes->l4.udp.hdr = (struct udp_hdr){ + attributes->l4.udp.hdr = (struct rte_udp_hdr){ .src_port = input->flow.udp4_flow.src_port, .dst_port = input->flow.udp4_flow.dst_port, }; - attributes->l4_mask.udp.hdr = (struct udp_hdr){ + attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2582,11 +4555,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: - attributes->l4.tcp.hdr = (struct tcp_hdr){ + attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ .src_port = input->flow.tcp4_flow.src_port, .dst_port = input->flow.tcp4_flow.dst_port, }; - attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2597,11 +4570,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: - attributes->l4.udp.hdr = (struct udp_hdr){ + attributes->l4.udp.hdr = (struct rte_udp_hdr){ .src_port = input->flow.udp6_flow.src_port, .dst_port = input->flow.udp6_flow.dst_port, }; - attributes->l4_mask.udp.hdr = (struct udp_hdr){ + attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2612,11 +4585,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: - attributes->l4.tcp.hdr = (struct tcp_hdr){ + attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ .src_port = input->flow.tcp6_flow.src_port, .dst_port = input->flow.tcp6_flow.dst_port, }; - attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2687,7 +4660,7 @@ flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) static struct rte_flow * flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = NULL; assert(fdir_flow); @@ -2716,7 +4689,7 @@ static int flow_fdir_filter_add(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_fdir *fdir_flow; struct rte_flow *flow; int ret; @@ -2735,7 +4708,8 @@ flow_fdir_filter_add(struct rte_eth_dev *dev, goto error; } flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, - fdir_flow->items, fdir_flow->actions, NULL); + fdir_flow->items, fdir_flow->actions, true, + NULL); if (!flow) goto error; assert(!flow->fdir); @@ -2763,7 +4737,7 @@ static int flow_fdir_filter_delete(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow; struct mlx5_fdir fdir_flow = { .attr.group = 0, @@ -2816,7 +4790,7 @@ flow_fdir_filter_update(struct rte_eth_dev *dev, static void flow_fdir_filter_flush(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->flows); } @@ -2936,3 +4910,296 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, } return 0; } + +/** + * Create the needed meter and suffix tables. + * + * @param[in] dev + * Pointer to Ethernet device. + * + * @return + * Pointer to table set on success, NULL otherwise. + */ +struct mlx5_meter_domains_infos * +mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->create_mtr_tbls(dev); +} + +/** + * Destroy the meter table set. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] tbl + * Pointer to the meter table set. + * + * @return + * 0 on success. + */ +int +mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, + struct mlx5_meter_domains_infos *tbls) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->destroy_mtr_tbls(dev, tbls); +} + +#define MLX5_POOL_QUERY_FREQ_US 1000000 + +/** + * Set the periodic procedure for triggering asynchronous batch queries for all + * the counter pools. + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object. + */ +void +mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) +{ + struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); + uint32_t pools_n = rte_atomic16_read(&cont->n_valid); + uint32_t us; + + cont = MLX5_CNT_CONTAINER(sh, 1, 0); + pools_n += rte_atomic16_read(&cont->n_valid); + us = MLX5_POOL_QUERY_FREQ_US / pools_n; + DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); + if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { + sh->cmng.query_thread_on = 0; + DRV_LOG(ERR, "Cannot reinitialize query alarm"); + } else { + sh->cmng.query_thread_on = 1; + } +} + +/** + * The periodic procedure for triggering asynchronous batch queries for all the + * counter pools. This function is probably called by the host thread. + * + * @param[in] arg + * The parameter for the alarm process. + */ +void +mlx5_flow_query_alarm(void *arg) +{ + struct mlx5_ibv_shared *sh = arg; + struct mlx5_devx_obj *dcs; + uint16_t offset; + int ret; + uint8_t batch = sh->cmng.batch; + uint16_t pool_index = sh->cmng.pool_index; + struct mlx5_pools_container *cont; + struct mlx5_pools_container *mcont; + struct mlx5_flow_counter_pool *pool; + + if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) + goto set_alarm; +next_container: + cont = MLX5_CNT_CONTAINER(sh, batch, 1); + mcont = MLX5_CNT_CONTAINER(sh, batch, 0); + /* Check if resize was done and need to flip a container. */ + if (cont != mcont) { + if (cont->pools) { + /* Clean the old container. */ + rte_free(cont->pools); + memset(cont, 0, sizeof(*cont)); + } + rte_cio_wmb(); + /* Flip the host container. */ + sh->cmng.mhi[batch] ^= (uint8_t)2; + cont = mcont; + } + if (!cont->pools) { + /* 2 empty containers case is unexpected. */ + if (unlikely(batch != sh->cmng.batch)) + goto set_alarm; + batch ^= 0x1; + pool_index = 0; + goto next_container; + } + pool = cont->pools[pool_index]; + if (pool->raw_hw) + /* There is a pool query in progress. */ + goto set_alarm; + pool->raw_hw = + LIST_FIRST(&sh->cmng.free_stat_raws); + if (!pool->raw_hw) + /* No free counter statistics raw memory. */ + goto set_alarm; + dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read + (&pool->a64_dcs); + offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; + ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - + offset, NULL, NULL, + pool->raw_hw->mem_mng->dm->id, + (void *)(uintptr_t) + (pool->raw_hw->data + offset), + sh->devx_comp, + (uint64_t)(uintptr_t)pool); + if (ret) { + DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" + " %d", pool->min_dcs->id); + pool->raw_hw = NULL; + goto set_alarm; + } + pool->raw_hw->min_dcs_id = dcs->id; + LIST_REMOVE(pool->raw_hw, next); + sh->cmng.pending_queries++; + pool_index++; + if (pool_index >= rte_atomic16_read(&cont->n_valid)) { + batch ^= 0x1; + pool_index = 0; + } +set_alarm: + sh->cmng.batch = batch; + sh->cmng.pool_index = pool_index; + mlx5_set_query_alarm(sh); +} + +/** + * Handler for the HW respond about ready values from an asynchronous batch + * query. This function is probably called by the host thread. + * + * @param[in] sh + * The pointer to the shared IB device context. + * @param[in] async_id + * The Devx async ID. + * @param[in] status + * The status of the completion. + */ +void +mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, + uint64_t async_id, int status) +{ + struct mlx5_flow_counter_pool *pool = + (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; + struct mlx5_counter_stats_raw *raw_to_free; + + if (unlikely(status)) { + raw_to_free = pool->raw_hw; + } else { + raw_to_free = pool->raw; + rte_spinlock_lock(&pool->sl); + pool->raw = pool->raw_hw; + rte_spinlock_unlock(&pool->sl); + rte_atomic64_add(&pool->query_gen, 1); + /* Be sure the new raw counters data is updated in memory. */ + rte_cio_wmb(); + } + LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); + pool->raw_hw = NULL; + sh->cmng.pending_queries--; +} + +/** + * Translate the rte_flow group index to HW table value. + * + * @param[in] attributes + * Pointer to flow attributes + * @param[in] external + * Value is part of flow rule created by request external to PMD. + * @param[in] group + * rte_flow group index value. + * @param[out] table + * HW table value. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, + uint32_t group, uint32_t *table, + struct rte_flow_error *error) +{ + if (attributes->transfer && external) { + if (group == UINT32_MAX) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "group index not supported"); + *table = group + 1; + } else { + *table = group; + } + return 0; +} + +/** + * Discover availability of metadata reg_c's. + * + * Iteratively use test flows to check availability. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + enum modify_reg idx; + int n = 0; + + /* reg_c[0] and reg_c[1] are reserved. */ + config->flow_mreg_c[n++] = REG_C_0; + config->flow_mreg_c[n++] = REG_C_1; + /* Discover availability of other reg_c's. */ + for (idx = REG_C_2; idx <= REG_C_7; ++idx) { + struct rte_flow_attr attr = { + .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, + .priority = MLX5_FLOW_PRIO_RSVD, + .ingress = 1, + }; + struct rte_flow_item items[] = { + [0] = { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action actions[] = { + [0] = { + .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = &(struct mlx5_flow_action_copy_mreg){ + .src = REG_C_1, + .dst = idx, + }, + }, + [1] = { + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &(struct rte_flow_action_jump){ + .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, + }, + }, + [2] = { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + struct rte_flow *flow; + struct rte_flow_error error; + + if (!config->dv_flow_en) + break; + /* Create internal flow, validation skips copy action. */ + flow = flow_list_create(dev, NULL, &attr, items, + actions, false, &error); + if (!flow) + continue; + if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) + config->flow_mreg_c[n++] = idx; + flow_list_destroy(dev, NULL, flow); + } + for (; n < MLX5_MREG_C_NUM; ++n) + config->flow_mreg_c[n] = REG_NONE; + return 0; +}