X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=167293920051aea71196aa47848eda1f75ec2294;hb=08dfff78f200bb9f077b0006ef05ab59dbc40ac8;hp=84e6f3048cee2efb9577b362cbf04bad7a86e0b7;hpb=16f4aa57ca381c4283826cdfce2cd4e172744ca7;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 84e6f3048c..1672939200 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -76,6 +76,7 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = { [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, + [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops, #endif [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops @@ -164,128 +165,143 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) return false; } +/** + * Network Service Header (NSH) and its next protocol values + * are described in RFC-8393. + */ +static enum rte_flow_item_type +mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) +{ + enum rte_flow_item_type type; + + switch (proto_mask & proto_spec) { + case RTE_VXLAN_GPE_TYPE_IPV4: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case RTE_VXLAN_GPE_TYPE_IPV6: + type = RTE_VXLAN_GPE_TYPE_IPV6; + break; + case RTE_VXLAN_GPE_TYPE_ETH: + type = RTE_FLOW_ITEM_TYPE_ETH; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + +static enum rte_flow_item_type +mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) +{ + enum rte_flow_item_type type; + + switch (proto_mask & proto_spec) { + case IPPROTO_UDP: + type = RTE_FLOW_ITEM_TYPE_UDP; + break; + case IPPROTO_TCP: + type = RTE_FLOW_ITEM_TYPE_TCP; + break; + case IPPROTO_IP: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case IPPROTO_IPV6: + type = RTE_FLOW_ITEM_TYPE_IPV6; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + +static enum rte_flow_item_type +mlx5_ethertype_to_item_type(rte_be16_t type_spec, + rte_be16_t type_mask, bool is_tunnel) +{ + enum rte_flow_item_type type; + + switch (rte_be_to_cpu_16(type_spec & type_mask)) { + case RTE_ETHER_TYPE_TEB: + type = is_tunnel ? + RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_ETHER_TYPE_VLAN: + type = !is_tunnel ? + RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_ETHER_TYPE_IPV4: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case RTE_ETHER_TYPE_IPV6: + type = RTE_FLOW_ITEM_TYPE_IPV6; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + static enum rte_flow_item_type mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) { - enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID; - uint16_t ether_type = 0; - uint16_t ether_type_m; - uint8_t ip_next_proto = 0; - uint8_t ip_next_proto_m; +#define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \ + do { \ + const void *m = item->mask; \ + const void *s = item->spec; \ + mask = m ? \ + ((const struct rte_flow_item_##type *)m)->fld : \ + rte_flow_item_##type##_mask.fld; \ + spec = ((const struct rte_flow_item_##type *)s)->fld; \ + } while (0) + + enum rte_flow_item_type ret; + uint16_t spec, mask; if (item == NULL || item->spec == NULL) - return ret; + return RTE_FLOW_ITEM_TYPE_VOID; switch (item->type) { case RTE_FLOW_ITEM_TYPE_ETH: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_eth *) - (item->mask))->type; - else - ether_type_m = rte_flow_item_eth_mask.type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_eth *) - (item->spec))->type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(eth, type); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_ethertype_to_item_type(spec, mask, false); break; case RTE_FLOW_ITEM_TYPE_VLAN: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_vlan *) - (item->mask))->inner_type; - else - ether_type_m = rte_flow_item_vlan_mask.inner_type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_vlan *) - (item->spec))->inner_type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_ethertype_to_item_type(spec, mask, false); break; case RTE_FLOW_ITEM_TYPE_IPV4: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv4 *) - (item->mask))->hdr.next_proto_id; - else - ip_next_proto_m = - rte_flow_item_ipv4_mask.hdr.next_proto_id; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv4 *) - (item->spec))->hdr.next_proto_id; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_inet_proto_to_item_type(spec, mask); break; case RTE_FLOW_ITEM_TYPE_IPV6: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv6 *) - (item->mask))->hdr.proto; - else - ip_next_proto_m = - rte_flow_item_ipv6_mask.hdr.proto; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv6 *) - (item->spec))->hdr.proto; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_inet_proto_to_item_type(spec, mask); break; case RTE_FLOW_ITEM_TYPE_GENEVE: - ether_type_m = item->mask ? - ((const struct rte_flow_item_geneve *) - (item->mask))->protocol : - rte_flow_item_geneve_mask.protocol; - ether_type = ((const struct rte_flow_item_geneve *) - (item->spec))->protocol; - ether_type_m = rte_be_to_cpu_16(ether_type_m); - ether_type = rte_be_to_cpu_16(ether_type); - switch (ether_type_m & ether_type) { - case RTE_ETHER_TYPE_TEB: - ret = RTE_FLOW_ITEM_TYPE_ETH; - break; - case RTE_ETHER_TYPE_IPV4: - ret = RTE_FLOW_ITEM_TYPE_IPV4; - break; - case RTE_ETHER_TYPE_IPV6: - ret = RTE_FLOW_ITEM_TYPE_IPV6; - break; - default: - ret = RTE_FLOW_ITEM_TYPE_END; - } + MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol); + ret = mlx5_ethertype_to_item_type(spec, mask, true); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + MLX5_XSET_ITEM_MASK_SPEC(gre, protocol); + ret = mlx5_ethertype_to_item_type(spec, mask, true); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol); + ret = mlx5_nsh_proto_to_item_type(spec, mask); break; default: ret = RTE_FLOW_ITEM_TYPE_VOID; break; } return ret; +#undef MLX5_XSET_ITEM_MASK_SPEC } static const int * @@ -789,6 +805,79 @@ static int mlx5_flow_flex_item_release(struct rte_eth_dev *dev, const struct rte_flow_item_flex_handle *handle, struct rte_flow_error *error); +static int +mlx5_flow_info_get(struct rte_eth_dev *dev, + struct rte_flow_port_info *port_info, + struct rte_flow_queue_info *queue_info, + struct rte_flow_error *error); +static int +mlx5_flow_port_configure(struct rte_eth_dev *dev, + const struct rte_flow_port_attr *port_attr, + uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *err); + +static struct rte_flow_pattern_template * +mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, + const struct rte_flow_pattern_template_attr *attr, + const struct rte_flow_item items[], + struct rte_flow_error *error); + +static int +mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *template, + struct rte_flow_error *error); +static struct rte_flow_actions_template * +mlx5_flow_actions_template_create(struct rte_eth_dev *dev, + const struct rte_flow_actions_template_attr *attr, + const struct rte_flow_action actions[], + const struct rte_flow_action masks[], + struct rte_flow_error *error); +static int +mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_actions_template *template, + struct rte_flow_error *error); + +static struct rte_flow_template_table * +mlx5_flow_table_create(struct rte_eth_dev *dev, + const struct rte_flow_template_table_attr *attr, + struct rte_flow_pattern_template *item_templates[], + uint8_t nb_item_templates, + struct rte_flow_actions_template *action_templates[], + uint8_t nb_action_templates, + struct rte_flow_error *error); +static int +mlx5_flow_table_destroy(struct rte_eth_dev *dev, + struct rte_flow_template_table *table, + struct rte_flow_error *error); +static struct rte_flow * +mlx5_flow_async_flow_create(struct rte_eth_dev *dev, + uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_template_table *table, + const struct rte_flow_item items[], + uint8_t pattern_template_index, + const struct rte_flow_action actions[], + uint8_t action_template_index, + void *user_data, + struct rte_flow_error *error); +static int +mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev, + uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow *flow, + void *user_data, + struct rte_flow_error *error); +static int +mlx5_flow_pull(struct rte_eth_dev *dev, + uint32_t queue, + struct rte_flow_op_result res[], + uint16_t n_res, + struct rte_flow_error *error); +static int +mlx5_flow_push(struct rte_eth_dev *dev, + uint32_t queue, + struct rte_flow_error *error); static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, @@ -810,6 +899,18 @@ static const struct rte_flow_ops mlx5_flow_ops = { .get_restore_info = mlx5_flow_tunnel_get_restore_info, .flex_item_create = mlx5_flow_flex_item_create, .flex_item_release = mlx5_flow_flex_item_release, + .info_get = mlx5_flow_info_get, + .configure = mlx5_flow_port_configure, + .pattern_template_create = mlx5_flow_pattern_template_create, + .pattern_template_destroy = mlx5_flow_pattern_template_destroy, + .actions_template_create = mlx5_flow_actions_template_create, + .actions_template_destroy = mlx5_flow_actions_template_destroy, + .template_table_create = mlx5_flow_table_create, + .template_table_destroy = mlx5_flow_table_destroy, + .async_create = mlx5_flow_async_flow_create, + .async_destroy = mlx5_flow_async_flow_destroy, + .pull = mlx5_flow_pull, + .push = mlx5_flow_push, }; /* Tunnel information. */ @@ -886,7 +987,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; enum modify_reg start_reg; bool skip_mtr_reg = false; @@ -1206,7 +1307,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) } /** - * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device * flow. * * @param[in] dev @@ -1219,7 +1320,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; @@ -1254,15 +1354,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, * this must be always enabled (metadata may arive * from other port - not from local flows only. */ - if (priv->config.dv_flow_en && - priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && - mlx5_flow_ext_mreg_supported(dev)) { - rxq_ctrl->rxq.mark = 1; - rxq_ctrl->flow_mark_n = 1; - } else if (mark) { - rxq_ctrl->rxq.mark = 1; - rxq_ctrl->flow_mark_n++; - } if (tunnel) { unsigned int j; @@ -1280,6 +1371,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, } } +static void +flow_rxq_mark_flag_set(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ctrl *rxq_ctrl; + + if (priv->mark_enabled) + return; + LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { + rxq_ctrl->rxq.mark = 1; + } + priv->mark_enabled = 1; +} + /** * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow * @@ -1294,7 +1399,11 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) struct mlx5_priv *priv = dev->data->dev_private; uint32_t handle_idx; struct mlx5_flow_handle *dev_handle; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); + if (wks->mark) + flow_rxq_mark_flag_set(dev); SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dev_handle, next) flow_drv_rxq_flags_set(dev, dev_handle); @@ -1314,7 +1423,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; @@ -1345,15 +1453,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, MLX5_ASSERT(rxq_ctrl != NULL); if (rxq_ctrl == NULL) continue; - if (priv->config.dv_flow_en && - priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && - mlx5_flow_ext_mreg_supported(dev)) { - rxq_ctrl->rxq.mark = 1; - rxq_ctrl->flow_mark_n = 1; - } else if (mark) { - rxq_ctrl->flow_mark_n--; - rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; - } if (tunnel) { unsigned int j; @@ -1410,12 +1509,12 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev) if (rxq == NULL || rxq->ctrl == NULL) continue; - rxq->ctrl->flow_mark_n = 0; rxq->ctrl->rxq.mark = 0; for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) rxq->ctrl->flow_tunnels_n[j] = 0; rxq->ctrl->rxq.tunnel = 0; } + priv->mark_enabled = 0; } /** @@ -1746,7 +1845,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->key_len, "RSS hash key too large"); - if (rss->queue_num > priv->config.ind_table_max_size) + if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->queue_num, @@ -1981,7 +2080,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); - if (attributes->transfer && !priv->config.dv_esw_en) + if (attributes->transfer && !priv->sh->config.dv_esw_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); @@ -2698,7 +2797,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, uint8_t vni[4]; } id = { .vlan_id = 0, }; - if (!priv->config.l3_vxlan_en) + if (!priv->sh->config.l3_vxlan_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN is not enabled by device" @@ -2893,7 +2992,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, const struct rte_flow_item_geneve *mask = item->mask; int ret; uint16_t gbhdr; - uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? + uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ? MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; const struct rte_flow_item_geneve nic_mask = { .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), @@ -2901,7 +3000,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, .protocol = RTE_BE16(UINT16_MAX), }; - if (!priv->config.hca_attr.tunnel_stateless_geneve_rx) + if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 Geneve is not enabled by device" @@ -2981,10 +3080,9 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_geneve_tlv_option_resource *geneve_opt_resource; - struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr; + struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr; uint8_t data_max_supported = hca_attr->max_geneve_tlv_option_data_len * 4; - struct mlx5_dev_config *config = &priv->config; const struct rte_flow_item_geneve *geneve_spec; const struct rte_flow_item_geneve *geneve_mask; const struct rte_flow_item_geneve_opt *spec = item->spec; @@ -3008,7 +3106,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "Geneve TLV opt length exceeeds the limit (31)"); + "Geneve TLV opt length exceeds the limit (31)"); /* Check if class type and length masks are full. */ if (full_mask.option_class != mask->option_class || full_mask.option_type != mask->option_type || @@ -3018,11 +3116,11 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, "Geneve TLV opt class/type/length masks must be full"); /* Check if length is supported */ if ((uint32_t)spec->option_len > - config->hca_attr.max_geneve_tlv_option_data_len) + hca_attr->max_geneve_tlv_option_data_len) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "Geneve TLV opt length not supported"); - if (config->hca_attr.max_geneve_tlv_options > 1) + if (hca_attr->max_geneve_tlv_options > 1) DRV_LOG(DEBUG, "max_geneve_tlv_options supports more than 1 option"); /* Check GENEVE item preceding. */ @@ -3077,7 +3175,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, "Data mask is of unsupported size"); } /* Check GENEVE option is supported in NIC. */ - if (!config->hca_attr.geneve_tlv_opt) + if (!hca_attr->geneve_tlv_opt) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "Geneve TLV opt not supported"); @@ -3126,7 +3224,7 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, struct mlx5_priv *priv = dev->data->dev_private; int ret; - if (!priv->config.mpls_en) + if (!priv->sh->dev_cap.mpls_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "MPLS not supported or" @@ -3416,12 +3514,18 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) if (type != MLX5_FLOW_TYPE_MAX) return type; + /* + * Currently when dv_flow_en == 2, only HW steering engine is + * supported. New engines can also be chosen here if ready. + */ + if (priv->sh->config.dv_flow_en == 2) + return MLX5_FLOW_TYPE_HW; /* If no OS specific type - continue with DV/VERBS selection */ - if (attr->transfer && priv->config.dv_esw_en) + if (attr->transfer && priv->sh->config.dv_esw_en) type = MLX5_FLOW_TYPE_DV; if (!attr->transfer) - type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : - MLX5_FLOW_TYPE_VERBS; + type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : + MLX5_FLOW_TYPE_VERBS; return type; } @@ -3957,7 +4061,7 @@ find_graph_root(uint32_t rss_level) * subflow. * * @param[in] dev_flow - * Pointer the created preifx subflow. + * Pointer the created prefix subflow. * * @return * The layers get from prefix subflow. @@ -4093,7 +4197,7 @@ static bool flow_check_modify_action_type(struct rte_eth_dev *dev, return true; case RTE_FLOW_ACTION_TYPE_FLAG: case RTE_FLOW_ACTION_TYPE_MARK: - if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) + if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) return true; else return false; @@ -4284,7 +4388,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, }; - /* Fill the register fileds in the flow. */ + /* Fill the register fields in the flow. */ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); if (ret < 0) return NULL; @@ -4353,7 +4457,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) /* * The copy Flows are not included in any list. There * ones are referenced from other Flows and can not - * be applied, removed, deleted in ardbitrary order + * be applied, removed, deleted in arbitrary order * by list traversing. */ mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP, @@ -4532,8 +4636,8 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, uint32_t mark_id; /* Check whether extensive metadata feature is engaged. */ - if (!priv->config.dv_flow_en || - priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + if (!priv->sh->config.dv_flow_en || + priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || !mlx5_flow_ext_mreg_supported(dev) || !priv->sh->dv_regc0_mask) return 0; @@ -4592,7 +4696,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; struct mlx5_flow_mreg_copy_resource *mcp_res; const struct rte_flow_action_mark *mark; @@ -4796,6 +4900,7 @@ flow_create_split_inner(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_flow *dev_flow; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, flow_split_info->flow_idx, error); @@ -4810,12 +4915,14 @@ flow_create_split_inner(struct rte_eth_dev *dev, /* * If dev_flow is as one of the suffix flow, some actions in suffix * flow may need some user defined item layer flags, and pass the - * Metadate rxq mark flag to suffix flow as well. + * Metadata rxq mark flag to suffix flow as well. */ if (flow_split_info->prefix_layers) dev_flow->handle->layers = flow_split_info->prefix_layers; - if (flow_split_info->prefix_mark) - dev_flow->handle->mark = 1; + if (flow_split_info->prefix_mark) { + MLX5_ASSERT(wks); + wks->mark = 1; + } if (sub_flow) *sub_flow = dev_flow; #ifdef HAVE_IBV_FLOW_DV_SUPPORT @@ -5006,6 +5113,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, uint32_t tag_id = 0; struct rte_flow_item *vlan_item_dst = NULL; const struct rte_flow_item *vlan_item_src = NULL; + const struct rte_flow_item *orig_items = items; struct rte_flow_action *hw_mtr_action; struct rte_flow_action *action_pre_head = NULL; int32_t flow_src_port = priv->representor_id; @@ -5130,7 +5238,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, if (!fm->def_policy) { sub_policy = get_meter_sub_policy(dev, flow, wks, - attr, items, error); + attr, orig_items, + error); if (!sub_policy) return -rte_errno; } else { @@ -5359,7 +5468,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * @param[out] error * Perform verbose error reporting if not NULL. * @param[in] encap_idx - * The encap action inndex. + * The encap action index. * * @return * 0 on success, negative value otherwise @@ -5725,7 +5834,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; const struct rte_flow_action *qrss = NULL; struct rte_flow_action *ext_actions = NULL; struct mlx5_flow *dev_flow = NULL; @@ -5881,6 +5990,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, /* Add suffix subflow to execute Q/RSS. */ flow_split_info->prefix_layers = layers; flow_split_info->prefix_mark = 0; + flow_split_info->table_id = 0; ret = flow_create_split_inner(dev, flow, &dev_flow, &q_attr, mtr_sfx ? items : q_items, q_actions, @@ -6095,7 +6205,6 @@ flow_create_split_meter(struct rte_eth_dev *dev, goto exit; } /* Add the prefix subflow. */ - flow_split_info->prefix_mark = 0; skip_scale_restore = flow_split_info->skip_scale; flow_split_info->skip_scale |= 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; @@ -6128,7 +6237,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, MLX5_FLOW_TABLE_LEVEL_METER; flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); - flow_split_info->prefix_mark = dev_flow->handle->mark; + flow_split_info->prefix_mark |= wks->mark; flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; } /* Add the prefix subflow. */ @@ -6194,6 +6303,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, struct mlx5_flow_dv_sample_resource *sample_res; struct mlx5_flow_tbl_data_entry *sfx_tbl_data; struct mlx5_flow_tbl_resource *sfx_tbl; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); #endif size_t act_size; size_t item_size; @@ -6232,7 +6342,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, * When reg_c_preserve is set, metadata registers Cx preserve * their value even through packet duplication. */ - add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve); + add_tag = (!fdb_tx || + priv->sh->cdev->config.hca_attr.reg_c_preserve); if (add_tag) sfx_items = (struct rte_flow_item *)((char *)sfx_actions + act_size); @@ -6280,7 +6391,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, } flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); - flow_split_info->prefix_mark = dev_flow->handle->mark; + MLX5_ASSERT(wks); + flow_split_info->prefix_mark |= wks->mark; /* Suffix group level already be scaled with factor, set * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale * again in translation. @@ -6819,6 +6931,15 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q creation not supported"); + return NULL; + } /* * If the device is not started yet, it is not allowed to created a * flow from application. PMD default flows and traffic control flows @@ -6884,7 +7005,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, * @param type * Flow type to be flushed. * @param active - * If flushing is called avtively. + * If flushing is called actively. */ void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, @@ -6894,6 +7015,14 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, uint32_t num_flushed = 0, fidx = 1; struct rte_flow *flow; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (priv->sh->config.dv_flow_en == 2 && + type == MLX5_FLOW_TYPE_GEN) { + flow_hw_q_flow_flush(dev, NULL); + return; + } +#endif + MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) { flow_list_destroy(dev, type, fidx); num_flushed++; @@ -6990,8 +7119,7 @@ flow_alloc_thread_workspace(void) data->rssq_num = MLX5_RSSQ_DEFAULT_NUM; return data; err: - if (data->rss_desc.queue) - free(data->rss_desc.queue); + free(data->rss_desc.queue); free(data); return NULL; } @@ -7316,6 +7444,13 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error __rte_unused) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q destruction not supported"); flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, (uintptr_t)(void *)flow); return 0; @@ -7413,7 +7548,13 @@ mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow_error *error) { int ret; + struct mlx5_priv *priv = dev->data->dev_private; + if (priv->sh->config.dv_flow_en == 2) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q query not supported"); ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data, error); if (ret < 0) @@ -7780,14 +7921,15 @@ mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) */ int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, - bool clear, uint64_t *pkts, uint64_t *bytes) + bool clear, uint64_t *pkts, uint64_t *bytes, void **action) { const struct mlx5_flow_driver_ops *fops; struct rte_flow_attr attr = { .transfer = 0 }; if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); - return fops->counter_query(dev, cnt, clear, pkts, bytes); + return fops->counter_query(dev, cnt, clear, pkts, + bytes, action); } DRV_LOG(ERR, "port %u counter query is not supported.", @@ -7795,6 +7937,436 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, return -ENOTSUP; } +/** + * Get information about HWS pre-configurable resources. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[out] port_info + * Pointer to port information. + * @param[out] queue_info + * Pointer to queue information. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_info_get(struct rte_eth_dev *dev, + struct rte_flow_port_info *port_info, + struct rte_flow_queue_info *queue_info, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "info get with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->info_get(dev, port_info, queue_info, error); +} + +/** + * Configure port HWS resources. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] port_attr + * Port configuration attributes. + * @param[in] nb_queue + * Number of queue. + * @param[in] queue_attr + * Array that holds attributes for each flow queue. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_port_configure(struct rte_eth_dev *dev, + const struct rte_flow_port_attr *port_attr, + uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "port configure with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->configure(dev, port_attr, nb_queue, queue_attr, error); +} + +/** + * Create flow item template. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] attr + * Pointer to the item template attributes. + * @param[in] items + * The template item pattern. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static struct rte_flow_pattern_template * +mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, + const struct rte_flow_pattern_template_attr *attr, + const struct rte_flow_item items[], + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "pattern create with incorrect steering mode"); + return NULL; + } + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->pattern_template_create(dev, attr, items, error); +} + +/** + * Destroy flow item template. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] template + * Pointer to the item template to be destroyed. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *template, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "pattern destroy with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->pattern_template_destroy(dev, template, error); +} + +/** + * Create flow item template. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] attr + * Pointer to the action template attributes. + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] masks + * List of actions that marks which of the action's member is constant. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static struct rte_flow_actions_template * +mlx5_flow_actions_template_create(struct rte_eth_dev *dev, + const struct rte_flow_actions_template_attr *attr, + const struct rte_flow_action actions[], + const struct rte_flow_action masks[], + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "action create with incorrect steering mode"); + return NULL; + } + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->actions_template_create(dev, attr, actions, masks, error); +} + +/** + * Destroy flow action template. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] template + * Pointer to the action template to be destroyed. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_actions_template *template, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "action destroy with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->actions_template_destroy(dev, template, error); +} + +/** + * Create flow table. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] attr + * Pointer to the table attributes. + * @param[in] item_templates + * Item template array to be binded to the table. + * @param[in] nb_item_templates + * Number of item template. + * @param[in] action_templates + * Action template array to be binded to the table. + * @param[in] nb_action_templates + * Number of action template. + * @param[out] error + * Pointer to error structure. + * + * @return + * Table on success, NULL otherwise and rte_errno is set. + */ +static struct rte_flow_template_table * +mlx5_flow_table_create(struct rte_eth_dev *dev, + const struct rte_flow_template_table_attr *attr, + struct rte_flow_pattern_template *item_templates[], + uint8_t nb_item_templates, + struct rte_flow_actions_template *action_templates[], + uint8_t nb_action_templates, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "table create with incorrect steering mode"); + return NULL; + } + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->template_table_create(dev, + attr, + item_templates, + nb_item_templates, + action_templates, + nb_action_templates, + error); +} + +/** + * PMD destroy flow table. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] table + * Pointer to the table to be destroyed. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_table_destroy(struct rte_eth_dev *dev, + struct rte_flow_template_table *table, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "table destroy with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->template_table_destroy(dev, table, error); +} + +/** + * Enqueue flow creation. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue_id + * The queue to create the flow. + * @param[in] attr + * Pointer to the flow operation attributes. + * @param[in] items + * Items with flow spec value. + * @param[in] pattern_template_index + * The item pattern flow follows from the table. + * @param[in] actions + * Action with flow spec value. + * @param[in] action_template_index + * The action pattern flow follows from the table. + * @param[in] user_data + * Pointer to the user_data. + * @param[out] error + * Pointer to error structure. + * + * @return + * Flow pointer on success, NULL otherwise and rte_errno is set. + */ +static struct rte_flow * +mlx5_flow_async_flow_create(struct rte_eth_dev *dev, + uint32_t queue_id, + const struct rte_flow_op_attr *attr, + struct rte_flow_template_table *table, + const struct rte_flow_item items[], + uint8_t pattern_template_index, + const struct rte_flow_action actions[], + uint8_t action_template_index, + void *user_data, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow_q create with incorrect steering mode"); + return NULL; + } + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->async_flow_create(dev, queue_id, attr, table, + items, pattern_template_index, + actions, action_template_index, + user_data, error); +} + +/** + * Enqueue flow destruction. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * The queue to destroy the flow. + * @param[in] attr + * Pointer to the flow operation attributes. + * @param[in] flow + * Pointer to the flow to be destroyed. + * @param[in] user_data + * Pointer to the user_data. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev, + uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow *flow, + void *user_data, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow_q destroy with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->async_flow_destroy(dev, queue, attr, flow, + user_data, error); +} + +/** + * Pull the enqueued flows. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * The queue to pull the result. + * @param[in/out] res + * Array to save the results. + * @param[in] n_res + * Available result with the array. + * @param[out] error + * Pointer to error structure. + * + * @return + * Result number on success, negative value otherwise and rte_errno is set. + */ +static int +mlx5_flow_pull(struct rte_eth_dev *dev, + uint32_t queue, + struct rte_flow_op_result res[], + uint16_t n_res, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow_q pull with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->pull(dev, queue, res, n_res, error); +} + +/** + * Push the enqueued flows. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * The queue to push the flows. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +mlx5_flow_push(struct rte_eth_dev *dev, + uint32_t queue, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow_q push with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->push(dev, queue, error); +} + /** * Allocate a new memory for the counter values wrapped by all the needed * management. @@ -8231,7 +8803,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) struct rte_flow *flow; struct rte_flow_error error; - if (!priv->config.dv_flow_en) + if (!priv->sh->config.dv_flow_en) break; /* Create internal flow, validation skips copy action. */ flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, @@ -8359,6 +8931,16 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, "invalid flow handle"); } handle_idx = flow->dev_handles; + /* query counter */ + if (flow->counter && + (!mlx5_counter_query(dev, flow->counter, false, + &count.hits, &count.bytes, &action)) && action) { + id = (uint64_t)(uintptr_t)action; + type = DR_DUMP_REC_TYPE_PMD_COUNTER; + save_dump_file(NULL, 0, type, + id, (void *)&count, file); + } + while (handle_idx) { dh = mlx5_ipool_get(priv->sh->ipool [MLX5_IPOOL_MLX5_FLOW], handle_idx); @@ -8366,16 +8948,6 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, continue; handle_idx = dh->next.next; - /* query counter */ - type = DR_DUMP_REC_TYPE_PMD_COUNTER; - flow_dv_query_count_ptr(dev, flow->counter, - &action, error); - if (action) { - id = (uint64_t)(uintptr_t)action; - if (!mlx5_flow_query_counter(dev, flow, &count, error)) - save_dump_file(NULL, 0, type, - id, (void *)&count, file); - } /* Get modify_hdr and encap_decap buf from ipools. */ encap_decap = NULL; modify_hdr = dh->dvh.modify_hdr; @@ -8421,7 +8993,7 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, */ static int mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, - FILE *file, struct rte_flow_error *error) + FILE *file, struct rte_flow_error *error __rte_unused) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; @@ -8506,14 +9078,12 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, max = MLX5_COUNTERS_PER_POOL * cmng->n_valid; for (j = 1; j <= max; j++) { action = NULL; - flow_dv_query_count_ptr(dev, j, &action, error); - if (action) { - if (!flow_dv_query_count(dev, j, &count, error)) { - type = DR_DUMP_REC_TYPE_PMD_COUNTER; - id = (uint64_t)(uintptr_t)action; - save_dump_file(NULL, 0, type, - id, (void *)&count, file); - } + if ((!mlx5_counter_query(dev, j, false, &count.hits, + &count.bytes, &action)) && action) { + id = (uint64_t)(uintptr_t)action; + type = DR_DUMP_REC_TYPE_PMD_COUNTER; + save_dump_file(NULL, 0, type, + id, (void *)&count, file); } } return 0; @@ -8531,7 +9101,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. PMDs initialize this * structure in case of error only. * @return - * 0 on success, a nagative value otherwise. + * 0 on success, a negative value otherwise. */ int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, @@ -8545,7 +9115,7 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, struct mlx5_flow_handle *dh; struct rte_flow *flow; - if (!priv->config.dv_flow_en) { + if (!sh->config.dv_flow_en) { if (fputs("device dv flow disabled\n", file) <= 0) return -errno; return -ENOTSUP; @@ -9009,7 +9579,7 @@ mlx5_get_tof(const struct rte_flow_item *item, } /** - * tunnel offload functionalilty is defined for DV environment only + * tunnel offload functionality is defined for DV environment only */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT __extension__ @@ -9529,7 +10099,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; - if (!priv->config.dv_flow_en) + if (!priv->sh->config.dv_flow_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "flow DV interface is off"); @@ -9948,7 +10518,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) type = mlx5_flow_os_get_type(); if (type == MLX5_FLOW_TYPE_MAX) { type = MLX5_FLOW_TYPE_VERBS; - if (priv->sh->devx && priv->config.dv_flow_en) + if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en) type = MLX5_FLOW_TYPE_DV; } fops = flow_get_drv_ops(type);