X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=ae5ccc2eddbb8904979c54b526a56acd6433e70f;hb=e4ed8de39b31c2a5d2c529726315106421abdc97;hp=41072da6dfc823b823884e03cfc1df9ed22b4e89;hpb=6bd5efb23db9e419dec51fbea7f71c2fd3703d75;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 41072da6df..ae5ccc2edd 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -8,6 +8,7 @@ #include #include #include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ @@ -23,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -34,16 +36,10 @@ #include "mlx5_defs.h" #include "mlx5.h" #include "mlx5_flow.h" +#include "mlx5_flow_os.h" #include "mlx5_rxtx.h" -/* Dev ops structure defined in mlx5.c */ -extern const struct eth_dev_ops mlx5_dev_ops; -extern const struct eth_dev_ops mlx5_dev_ops_isolate; - /** Device flow drivers. */ -#ifdef HAVE_IBV_FLOW_DV_SUPPORT -extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; -#endif extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; @@ -241,6 +237,7 @@ static const struct rte_flow_ops mlx5_flow_ops = { .isolate = mlx5_flow_isolate, .query = mlx5_flow_query, .dev_dump = mlx5_flow_dev_dump, + .get_aged_flows = mlx5_flow_get_aged_flows, }; /* Convert FDIR request to Generic flow. */ @@ -439,6 +436,10 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, */ if (skip_mtr_reg && config->flow_mreg_c [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { + if (id >= (REG_C_7 - start_reg)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "invalid tag id"); if (config->flow_mreg_c [id + 1 + start_reg - REG_C_0] != REG_NONE) return config->flow_mreg_c @@ -502,7 +503,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) } flow_attr = { .attr = { .num_of_specs = 2, - .port = (uint8_t)priv->ibv_port, + .port = (uint8_t)priv->dev_port, }, .eth = { .type = IBV_FLOW_SPEC_ETH, @@ -657,13 +658,12 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, * The hash fields that should be used. */ uint64_t -mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, +mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc, int tunnel __rte_unused, uint64_t layer_types, uint64_t hash_fields) { - struct rte_flow *flow = dev_flow->flow; #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - int rss_request_inner = flow->rss.level >= 2; + int rss_request_inner = rss_desc->level >= 2; /* Check RSS hash level for tunnel. */ if (tunnel && rss_request_inner) @@ -672,7 +672,7 @@ mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, return 0; #endif /* Check if requested layer matches RSS hash fields. */ - if (!(flow->rss.types & layer_types)) + if (!(rss_desc->types & layer_types)) return 0; return hash_fields; } @@ -711,21 +711,27 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] dev_flow - * Pointer to device flow structure. + * @param[in] dev_handle + * Pointer to device flow handle structure. */ static void -flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) +flow_drv_rxq_flags_set(struct rte_eth_dev *dev, + struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = dev_flow->flow; - const int mark = !!(dev_flow->actions & - (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int mark = dev_handle->mark; + const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); + struct mlx5_hrxq *hrxq; unsigned int i; - for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->rss.queue)[i]; + if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) + return; + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + dev_handle->rix_hrxq); + if (!hrxq) + return; + for (i = 0; i != hrxq->ind_table->queues_n; ++i) { + int idx = hrxq->ind_table->queues[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -750,7 +756,7 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) /* Increase the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { if ((tunnels_info[j].tunnel & - dev_flow->layers) == + dev_handle->layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]++; break; @@ -772,10 +778,13 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) static void flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + struct mlx5_flow_handle *dev_handle; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - flow_drv_rxq_flags_set(dev, dev_flow); + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dev_handle, next) + flow_drv_rxq_flags_set(dev, dev_handle); } /** @@ -784,22 +793,28 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) * * @param dev * Pointer to Ethernet device. - * @param[in] dev_flow - * Pointer to the device flow. + * @param[in] dev_handle + * Pointer to the device flow handle structure. */ static void -flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) +flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, + struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = dev_flow->flow; - const int mark = !!(dev_flow->actions & - (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int mark = dev_handle->mark; + const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); + struct mlx5_hrxq *hrxq; unsigned int i; + if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) + return; + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + dev_handle->rix_hrxq); + if (!hrxq) + return; MLX5_ASSERT(dev->data->dev_started); - for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->rss.queue)[i]; + for (i = 0; i != hrxq->ind_table->queues_n; ++i) { + int idx = hrxq->ind_table->queues[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -819,7 +834,7 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) /* Decrease the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { if ((tunnels_info[j].tunnel & - dev_flow->layers) == + dev_handle->layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]--; break; @@ -842,10 +857,13 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) static void flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + struct mlx5_flow_handle *dev_handle; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - flow_drv_rxq_flags_trim(dev, dev_flow); + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dev_handle, next) + flow_drv_rxq_flags_trim(dev, dev_handle); } /** @@ -876,6 +894,35 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev) } } +/** + * Set the Rx queue dynamic metadata (mask and offset) for a flow + * + * @param[in] dev + * Pointer to the Ethernet device structure. + */ +void +mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *data; + unsigned int i; + + for (i = 0; i != priv->rxqs_n; ++i) { + if (!(*priv->rxqs)[i]) + continue; + data = (*priv->rxqs)[i]; + if (!rte_flow_dynf_metadata_avail()) { + data->dynf_meta = 0; + data->flow_meta_mask = 0; + data->flow_meta_offset = -1; + } else { + data->dynf_meta = 1; + data->flow_meta_mask = rte_flow_dynf_metadata_mask; + data->flow_meta_offset = rte_flow_dynf_metadata_offs; + } + } +} + /* * return a pointer to the desired action in the list of actions. * @@ -1188,6 +1235,43 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, return 0; } +/* + * Validate the default miss action. + * + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_default_miss(uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions in" + " same flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "default miss action not supported " + "for egress"); + if (attr->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, + "only group 0 is supported"); + if (attr->transfer) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, "transfer is not supported"); + return 0; +} + /* * Validate the count action. * @@ -1836,7 +1920,6 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; - uint32_t vlan_id = 0; if (item_flags & MLX5_FLOW_LAYER_TUNNEL) @@ -1863,23 +1946,8 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, return ret; if (spec) { memcpy(&id.vni[1], spec->vni, 3); - vlan_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); - vlan_id &= id.vlan_id; } - /* - * Tunnel id 0 is equivalent as not adding a VXLAN layer, if - * only this layer is defined in the Verbs specification it is - * interpreted as wildcard and all packets will match this - * rule, if it follows a full stack layer (ex: eth / ipv4 / - * udp), all packets matching the layers before will also - * match this rule. To avoid such situation, VNI 0 is - * currently refused. - */ - if (!vlan_id) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "VXLAN vni cannot be 0"); if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1918,7 +1986,6 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; - uint32_t vlan_id = 0; if (!priv->config.l3_vxlan_en) return rte_flow_error_set(error, ENOTSUP, @@ -1956,22 +2023,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, "VxLAN-GPE protocol" " not supported"); memcpy(&id.vni[1], spec->vni, 3); - vlan_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); - vlan_id &= id.vlan_id; } - /* - * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this - * layer is defined in the Verbs specification it is interpreted as - * wildcard and all packets will match this rule, if it follows a full - * stack layer (ex: eth / ipv4 / udp), all packets matching the layers - * before will also match this rule. To avoid such situation, VNI 0 - * is currently refused. - */ - if (!vlan_id) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "VXLAN-GPE vni cannot be 0"); if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -2247,11 +2300,12 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, if (ret < 0) return ret; return 0; -#endif +#else return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "MPLS is not supported by Verbs, please" " update."); +#endif } /** @@ -2339,11 +2393,14 @@ static void flow_mreg_split_qrss_release(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + struct mlx5_flow_handle *dev_handle; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - if (dev_flow->qrss_id) - flow_qrss_free_id(dev, dev_flow->qrss_id); + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dev_handle, next) + if (dev_handle->split_flow_id) + flow_qrss_free_id(dev, dev_handle->split_flow_id); } static int @@ -2352,6 +2409,7 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, bool external __rte_unused, + int hairpin __rte_unused, struct rte_flow_error *error) { return rte_flow_error_set(error, ENOTSUP, @@ -2359,7 +2417,8 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused, } static struct mlx5_flow * -flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, +flow_null_prepare(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error) @@ -2440,8 +2499,12 @@ static enum mlx5_flow_drv_type flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) { struct mlx5_priv *priv = dev->data->dev_private; - enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; + /* The OS can determine first a specific flow type (DV, VERBS) */ + enum mlx5_flow_drv_type type = mlx5_flow_os_get_type(); + if (type != MLX5_FLOW_TYPE_MAX) + return type; + /* If no OS specific type - continue with DV/VERBS selection */ if (attr->transfer && priv->config.dv_esw_en) type = MLX5_FLOW_TYPE_DV; if (!attr->transfer) @@ -2466,6 +2529,8 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) * Pointer to the list of actions. * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] hairpin + * Number of hairpin TX actions, 0 means classic flow. * @param[out] error * Pointer to the error structure. * @@ -2477,13 +2542,14 @@ flow_drv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + bool external, int hairpin, struct rte_flow_error *error) { const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); fops = flow_get_drv_ops(type); - return fops->validate(dev, attr, items, actions, external, error); + return fops->validate(dev, attr, items, actions, external, + hairpin, error); } /** @@ -2499,12 +2565,16 @@ flow_drv_validate(struct rte_eth_dev *dev, * setting backward reference to the flow should be done out of this function. * layers field is not filled either. * + * @param[in] dev + * Pointer to the dev structure. * @param[in] attr * Pointer to the flow attributes. * @param[in] items * Pointer to the list of items. * @param[in] actions * Pointer to the list of actions. + * @param[in] flow_idx + * This memory pool index to the flow. * @param[out] error * Pointer to the error structure. * @@ -2512,18 +2582,24 @@ flow_drv_validate(struct rte_eth_dev *dev, * Pointer to device flow on success, otherwise NULL and rte_errno is set. */ static inline struct mlx5_flow * -flow_drv_prepare(const struct rte_flow *flow, +flow_drv_prepare(struct rte_eth_dev *dev, + const struct rte_flow *flow, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], + uint32_t flow_idx, struct rte_flow_error *error) { const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow->drv_type; + struct mlx5_flow *mlx5_flow = NULL; MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); - return fops->prepare(attr, items, actions, error); + mlx5_flow = fops->prepare(dev, attr, items, actions, error); + if (mlx5_flow) + mlx5_flow->flow_idx = flow_idx; + return mlx5_flow; } /** @@ -2641,27 +2717,6 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) fops->destroy(dev, flow); } -/** - * Validate a flow supported by the NIC. - * - * @see rte_flow_validate() - * @see rte_flow_ops - */ -int -mlx5_flow_validate(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - int ret; - - ret = flow_drv_validate(dev, attr, items, actions, true, error); - if (ret < 0) - return ret; - return 0; -} - /** * Get RSS action from the action list. * @@ -2726,18 +2781,22 @@ flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow) { uint64_t layers = 0; - /* If no decap actions, use the layers directly. */ - if (!(dev_flow->actions & MLX5_FLOW_ACTION_DECAP)) - return dev_flow->layers; + /* + * Layers bits could be localization, but usually the compiler will + * help to do the optimization work for source code. + * If no decap actions, use the layers directly. + */ + if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP)) + return dev_flow->handle->layers; /* Convert L3 layers with decap action. */ - if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) + if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; - else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) + else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; /* Convert L4 layers with decap action. */ - if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_TCP) + if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP) layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; - else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_UDP) + else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP) layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; return layers; } @@ -2908,16 +2967,16 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, } /* Declare flow create/destroy prototype in advance. */ -static struct rte_flow * -flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, +static uint32_t +flow_list_create(struct rte_eth_dev *dev, uint32_t *list, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], bool external, struct rte_flow_error *error); static void -flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, - struct rte_flow *flow); +flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, + uint32_t flow_idx); /** * Add a flow of copying flow metadata registers in RX_CP_TBL. @@ -2973,6 +3032,7 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, }; struct mlx5_flow_mreg_copy_resource *mcp_res; + uint32_t idx = 0; int ret; /* Fill the register fileds in the flow. */ @@ -3001,18 +3061,21 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, /* Build a new flow. */ if (mark_id != MLX5_DEFAULT_COPY_ID) { items[0] = (struct rte_flow_item){ - .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG, .spec = &tag_spec, }; items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END, }; actions[0] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_MARK, .conf = &ftag, }; actions[1] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, .conf = &cp_mreg, }; actions[2] = (struct rte_flow_action){ @@ -3029,7 +3092,8 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, .type = RTE_FLOW_ITEM_TYPE_END, }; actions[0] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, .conf = &cp_mreg, }; actions[1] = (struct rte_flow_action){ @@ -3041,20 +3105,21 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, }; } /* Build a new entry. */ - mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0); + mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); if (!mcp_res) { rte_errno = ENOMEM; return NULL; } + mcp_res->idx = idx; /* * The copy Flows are not included in any list. There * ones are referenced from other Flows and can not * be applied, removed, deleted in ardbitrary order * by list traversing. */ - mcp_res->flow = flow_list_create(dev, NULL, &attr, items, + mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items, actions, false, error); - if (!mcp_res->flow) + if (!mcp_res->rix_flow) goto error; mcp_res->refcnt++; mcp_res->hlist_ent.key = mark_id; @@ -3065,9 +3130,9 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, goto error; return mcp_res; error: - if (mcp_res->flow) - flow_list_destroy(dev, NULL, mcp_res->flow); - rte_free(mcp_res); + if (mcp_res->rix_flow) + flow_list_destroy(dev, NULL, mcp_res->rix_flow); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); return NULL; } @@ -3083,17 +3148,27 @@ static void flow_mreg_del_copy_action(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + struct mlx5_flow_mreg_copy_resource *mcp_res; struct mlx5_priv *priv = dev->data->dev_private; + if (!flow->rix_mreg_copy) + return; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); if (!mcp_res || !priv->mreg_cp_tbl) return; if (flow->copy_applied) { MLX5_ASSERT(mcp_res->appcnt); flow->copy_applied = 0; --mcp_res->appcnt; - if (!mcp_res->appcnt) - flow_drv_remove(dev, mcp_res->flow); + if (!mcp_res->appcnt) { + struct rte_flow *mcp_flow = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + mcp_res->rix_flow); + + if (mcp_flow) + flow_drv_remove(dev, mcp_flow); + } } /* * We do not check availability of metadata registers here, @@ -3101,11 +3176,11 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev, */ if (--mcp_res->refcnt) return; - MLX5_ASSERT(mcp_res->flow); - flow_list_destroy(dev, NULL, mcp_res->flow); + MLX5_ASSERT(mcp_res->rix_flow); + flow_list_destroy(dev, NULL, mcp_res->rix_flow); mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); - rte_free(mcp_res); - flow->mreg_copy = NULL; + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); + flow->rix_mreg_copy = 0; } /** @@ -3123,15 +3198,26 @@ static int flow_mreg_start_copy_action(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_priv *priv = dev->data->dev_private; int ret; - if (!mcp_res || flow->copy_applied) + if (!flow->rix_mreg_copy || flow->copy_applied) + return 0; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); + if (!mcp_res) return 0; if (!mcp_res->appcnt) { - ret = flow_drv_apply(dev, mcp_res->flow, NULL); - if (ret) - return ret; + struct rte_flow *mcp_flow = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + mcp_res->rix_flow); + + if (mcp_flow) { + ret = flow_drv_apply(dev, mcp_flow, NULL); + if (ret) + return ret; + } } ++mcp_res->appcnt; flow->copy_applied = 1; @@ -3150,15 +3236,26 @@ static void flow_mreg_stop_copy_action(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_priv *priv = dev->data->dev_private; - if (!mcp_res || !flow->copy_applied) + if (!flow->rix_mreg_copy || !flow->copy_applied) + return; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); + if (!mcp_res) return; MLX5_ASSERT(mcp_res->appcnt); --mcp_res->appcnt; flow->copy_applied = 0; - if (!mcp_res->appcnt) - flow_drv_remove(dev, mcp_res->flow); + if (!mcp_res->appcnt) { + struct rte_flow *mcp_flow = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + mcp_res->rix_flow); + + if (mcp_flow) + flow_drv_remove(dev, mcp_flow); + } } /** @@ -3180,10 +3277,10 @@ flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) MLX5_DEFAULT_COPY_ID); if (!mcp_res) return; - MLX5_ASSERT(mcp_res->flow); - flow_list_destroy(dev, NULL, mcp_res->flow); + MLX5_ASSERT(mcp_res->rix_flow); + flow_list_destroy(dev, NULL, mcp_res->rix_flow); mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); - rte_free(mcp_res); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); } /** @@ -3275,7 +3372,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev, (dev, MLX5_FLOW_MARK_DEFAULT, error); if (!mcp_res) return -rte_errno; - flow->mreg_copy = mcp_res; + flow->rix_mreg_copy = mcp_res->idx; if (dev->data->dev_started) { mcp_res->appcnt++; flow->copy_applied = 1; @@ -3288,7 +3385,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev, flow_mreg_add_copy_action(dev, mark->id, error); if (!mcp_res) return -rte_errno; - flow->mreg_copy = mcp_res; + flow->rix_mreg_copy = mcp_res->idx; if (dev->data->dev_started) { mcp_res->appcnt++; flow->copy_applied = 1; @@ -3403,7 +3500,8 @@ flow_hairpin_split(struct rte_eth_dev *dev, } /* Add set meta action and end action for the Rx flow. */ tag_action = actions_rx; - tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; + tag_action->type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG; actions_rx++; rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); actions_rx++; @@ -3416,7 +3514,8 @@ flow_hairpin_split(struct rte_eth_dev *dev, rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); addr = (void *)&pattern_tx[2]; item = pattern_tx; - item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; + item->type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG; tag_item = (void *)addr; tag_item->data = *flow_id; tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); @@ -3438,7 +3537,7 @@ flow_hairpin_split(struct rte_eth_dev *dev, * The last stage of splitting chain, just creates the subflow * without any modification. * - * @param dev + * @param[in] dev * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. @@ -3454,6 +3553,8 @@ flow_hairpin_split(struct rte_eth_dev *dev, * Associated actions (list terminated by the END action). * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -3467,23 +3568,26 @@ flow_create_split_inner(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + bool external, uint32_t flow_idx, + struct rte_flow_error *error) { struct mlx5_flow *dev_flow; - dev_flow = flow_drv_prepare(flow, attr, items, actions, error); + dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, + flow_idx, error); if (!dev_flow) return -rte_errno; dev_flow->flow = flow; dev_flow->external = external; /* Subflow object was created, we must include one in the list. */ - LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); + SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, + dev_flow->handle, next); /* * If dev_flow is as one of the suffix flow, some actions in suffix * flow may need some user defined item layer flags. */ if (prefix_layers) - dev_flow->layers = prefix_layers; + dev_flow->handle->layers = prefix_layers; if (sub_flow) *sub_flow = dev_flow; return flow_drv_translate(dev, dev_flow, attr, items, actions, error); @@ -3548,7 +3652,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_METER: /* Add the extra tag action first. */ tag_action = actions_pre; - tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; + tag_action->type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG; actions_pre++; action_cur = &actions_pre; break; @@ -3609,7 +3714,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, * Convert to internal match item, it is used * for vlan push and set vid. */ - sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_VLAN; + sfx_items->type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_VLAN; sfx_items++; } break; @@ -3624,7 +3730,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); tag_mask = tag_spec + 1; tag_mask->data = 0xffffff00; - tag_item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; + tag_item->type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG; tag_item->spec = tag_spec; tag_item->last = NULL; tag_item->mask = tag_mask; @@ -3727,7 +3834,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, /* Construct new actions array. */ /* Replace QUEUE/RSS action. */ split_actions[qrss_idx] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG, .conf = set_tag, }; } @@ -3790,7 +3898,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx); if (encap_idx == actions_n - 1) { ext_actions[actions_n - 1] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, .conf = cp_mreg, }; ext_actions[actions_n] = (struct rte_flow_action){ @@ -3798,7 +3907,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, }; } else { ext_actions[encap_idx] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, .conf = cp_mreg, }; memcpy(ext_actions + encap_idx + 1, actions + encap_idx, @@ -3831,6 +3941,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, * Associated actions (list terminated by the END action). * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -3843,7 +3955,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + bool external, uint32_t flow_idx, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; @@ -3863,7 +3976,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, !mlx5_flow_ext_mreg_supported(dev)) return flow_create_split_inner(dev, flow, NULL, prefix_layers, attr, items, actions, external, - error); + flow_idx, error); actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, &encap_idx); if (qrss) { @@ -3912,6 +4025,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, RTE_FLOW_ACTION_TYPE_VOID; else ext_actions[qrss - actions].type = + (enum rte_flow_action_type) MLX5_RTE_FLOW_ACTION_TYPE_TAG; /* * Create the new actions list with removed Q/RSS action @@ -3947,7 +4061,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, /* Add the unmodified original or prefix subflow. */ ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr, items, ext_actions ? ext_actions : - actions, external, error); + actions, external, flow_idx, error); if (ret < 0) goto exit; MLX5_ASSERT(dev_flow); @@ -3963,7 +4077,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, }; struct rte_flow_item q_items[] = { { - .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG, .spec = &q_tag_spec, .last = NULL, .mask = NULL, @@ -3998,8 +4113,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, * reallocation becomes possible (for example, for * other flows in other threads). */ - dev_flow->qrss_id = qrss_id; - qrss_id = 0; + dev_flow->handle->split_flow_id = qrss_id; ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); if (ret < 0) @@ -4011,9 +4125,11 @@ flow_create_split_metadata(struct rte_eth_dev *dev, ret = flow_create_split_inner(dev, flow, &dev_flow, layers, &q_attr, mtr_sfx ? items : q_items, q_actions, - external, error); + external, flow_idx, error); if (ret < 0) goto exit; + /* qrss ID should be freed if failed. */ + qrss_id = 0; MLX5_ASSERT(dev_flow); } @@ -4050,6 +4166,8 @@ exit: * Associated actions (list terminated by the END action). * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -4061,7 +4179,8 @@ flow_create_split_meter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + bool external, uint32_t flow_idx, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_action *sfx_actions = NULL; @@ -4105,12 +4224,12 @@ flow_create_split_meter(struct rte_eth_dev *dev, /* Add the prefix subflow. */ ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr, items, pre_actions, external, - error); + flow_idx, error); if (ret) { ret = -rte_errno; goto exit; } - dev_flow->mtr_flow_id = mtr_tag_id; + dev_flow->handle->split_flow_id = mtr_tag_id; /* Setting the sfx group atrr. */ sfx_attr.group = sfx_attr.transfer ? (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : @@ -4122,7 +4241,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, 0, &sfx_attr, sfx_items ? sfx_items : items, sfx_actions ? sfx_actions : actions, - external, error); + external, flow_idx, error); exit: if (sfx_actions) rte_free(sfx_actions); @@ -4159,6 +4278,8 @@ exit: * Associated actions (list terminated by the END action). * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -4170,12 +4291,13 @@ flow_create_split_outer(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + bool external, uint32_t flow_idx, + struct rte_flow_error *error) { int ret; ret = flow_create_split_meter(dev, flow, attr, items, - actions, external, error); + actions, external, flow_idx, error); MLX5_ASSERT(ret <= 0); return ret; } @@ -4202,10 +4324,10 @@ flow_create_split_outer(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. * * @return - * A flow on success, NULL otherwise and rte_errno is set. + * A flow index on success, 0 otherwise and rte_errno is set. */ -static struct rte_flow * -flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, +static uint32_t +flow_list_create(struct rte_eth_dev *dev, uint32_t *list, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -4232,36 +4354,32 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, uint8_t buffer[2048]; } items_tx; struct rte_flow_expand_rss *buf = &expand_buffer.buf; + struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) + priv->rss_desc)[!!priv->flow_idx]; const struct rte_flow_action *p_actions_rx = actions; uint32_t i; - uint32_t flow_size; - int hairpin_flow = 0; + uint32_t idx = 0; + int hairpin_flow; uint32_t hairpin_id = 0; struct rte_flow_attr attr_tx = { .priority = 0 }; - int ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, - error); + int ret; - if (ret < 0) - return NULL; hairpin_flow = flow_check_hairpin_split(dev, attr, actions); + ret = flow_drv_validate(dev, attr, items, p_actions_rx, + external, hairpin_flow, error); + if (ret < 0) + return 0; if (hairpin_flow > 0) { if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { rte_errno = EINVAL; - return NULL; + return 0; } flow_hairpin_split(dev, actions, actions_rx.actions, actions_hairpin_tx.actions, items_tx.items, &hairpin_id); p_actions_rx = actions_rx.actions; } - flow_size = sizeof(struct rte_flow); - rss = flow_get_rss_action(p_actions_rx); - if (rss) - flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), - sizeof(void *)); - else - flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); - flow = rte_calloc(__func__, 1, flow_size, 0); + flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); if (!flow) { rte_errno = ENOMEM; goto error_before_flow; @@ -4271,17 +4389,18 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, flow->hairpin_flow_id = hairpin_id; MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); - flow->rss.queue = (void *)(flow + 1); + memset(rss_desc, 0, sizeof(*rss_desc)); + rss = flow_get_rss_action(p_actions_rx); if (rss) { /* * The following information is required by * mlx5_flow_hashfields_adjust() in advance. */ - flow->rss.level = rss->level; + rss_desc->level = rss->level; /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ - flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; + rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types; } - LIST_INIT(&flow->dev_flows); + flow->dev_handles = 0; if (rss && rss->types) { unsigned int graph_root; @@ -4296,6 +4415,15 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, buf->entries = 1; buf->entry[0].pattern = (void *)(uintptr_t)items; } + /* + * Record the start index when there is a nested call. All sub-flows + * need to be translated before another calling. + * No need to use ping-pong buffer to save memory here. + */ + if (priv->flow_idx) { + MLX5_ASSERT(!priv->flow_nested_idx); + priv->flow_nested_idx = priv->flow_idx; + } for (i = 0; i < buf->entries; ++i) { /* * The splitter may create multiple dev_flows, @@ -4304,7 +4432,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, */ ret = flow_create_split_outer(dev, flow, attr, buf->entry[i].pattern, - p_actions_rx, external, + p_actions_rx, external, idx, error); if (ret < 0) goto error; @@ -4314,13 +4442,15 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, attr_tx.group = MLX5_HAIRPIN_TX_TABLE; attr_tx.ingress = 0; attr_tx.egress = 1; - dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items, - actions_hairpin_tx.actions, error); + dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items, + actions_hairpin_tx.actions, + idx, error); if (!dev_flow) goto error; dev_flow->flow = flow; dev_flow->external = 0; - LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); + SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, + dev_flow->handle, next); ret = flow_drv_translate(dev, dev_flow, &attr_tx, items_tx.items, actions_hairpin_tx.actions, error); @@ -4343,32 +4473,41 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, if (ret) goto error; } - if (dev->data->dev_started) { + /* + * If the flow is external (from application) OR device is started, then + * the flow will be applied immediately. + */ + if (external || dev->data->dev_started) { ret = flow_drv_apply(dev, flow, error); if (ret < 0) goto error; } if (list) - TAILQ_INSERT_TAIL(list, flow, next); + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx, + flow, next); flow_rxq_flags_set(dev, flow); - return flow; -error_before_flow: - if (hairpin_id) - mlx5_flow_id_release(priv->sh->flow_id_pool, - hairpin_id); - return NULL; + /* Nested flow creation index recovery. */ + priv->flow_idx = priv->flow_nested_idx; + if (priv->flow_nested_idx) + priv->flow_nested_idx = 0; + return idx; error: MLX5_ASSERT(flow); - flow_mreg_del_copy_action(dev, flow); ret = rte_errno; /* Save rte_errno before cleanup. */ - if (flow->hairpin_flow_id) - mlx5_flow_id_release(priv->sh->flow_id_pool, - flow->hairpin_flow_id); - MLX5_ASSERT(flow); + flow_mreg_del_copy_action(dev, flow); flow_drv_destroy(dev, flow); - rte_free(flow); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); rte_errno = ret; /* Restore rte_errno. */ - return NULL; +error_before_flow: + ret = rte_errno; + if (hairpin_id) + mlx5_flow_id_release(priv->sh->flow_id_pool, + hairpin_id); + rte_errno = ret; + priv->flow_idx = priv->flow_nested_idx; + if (priv->flow_nested_idx) + priv->flow_nested_idx = 0; + return 0; } /** @@ -4416,8 +4555,29 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_error error; - return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, - actions, false, &error); + return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows, + &attr, &pattern, + actions, false, &error); +} + +/** + * Validate a flow supported by the NIC. + * + * @see rte_flow_validate() + * @see rte_flow_ops + */ +int +mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int hairpin_flow; + + hairpin_flow = flow_check_hairpin_split(dev, attr, actions); + return flow_drv_validate(dev, attr, items, actions, + true, hairpin_flow, error); } /** @@ -4435,8 +4595,22 @@ mlx5_flow_create(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; - return flow_list_create(dev, &priv->flows, - attr, items, actions, true, error); + /* + * If the device is not started yet, it is not allowed to created a + * flow from application. PMD default flows and traffic control flows + * are not affected. + */ + if (unlikely(!dev->data->dev_started)) { + DRV_LOG(DEBUG, "port %u is not started when " + "inserting a flow", dev->data->port_id); + rte_flow_error_set(error, ENODEV, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "port not started"); + return NULL; + } + return (void *)(uintptr_t)flow_list_create(dev, &priv->flows, + attr, items, actions, true, error); } /** @@ -4445,17 +4619,24 @@ mlx5_flow_create(struct rte_eth_dev *dev, * @param dev * Pointer to Ethernet device. * @param list - * Pointer to a TAILQ flow list. If this parameter NULL, - * there is no flow removal from the list. - * @param[in] flow - * Flow to destroy. + * Pointer to the Indexed flow list. If this parameter NULL, + * there is no flow removal from the list. Be noted that as + * flow is add to the indexed list, memory of the indexed + * list points to maybe changed as flow destroyed. + * @param[in] flow_idx + * Index of flow to destroy. */ static void -flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, - struct rte_flow *flow) +flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, + uint32_t flow_idx) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_RTE_FLOW], flow_idx); + if (!flow) + return; /* * Update RX queue flags only if port is started, otherwise it is * already clean. @@ -4467,10 +4648,21 @@ flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, flow->hairpin_flow_id); flow_drv_destroy(dev, flow); if (list) - TAILQ_REMOVE(list, flow, next); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, + flow_idx, flow, next); flow_mreg_del_copy_action(dev, flow); - rte_free(flow->fdir); - rte_free(flow); + if (flow->fdir) { + LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { + if (priv_fdir_flow->rix_flow == flow_idx) + break; + } + if (priv_fdir_flow) { + LIST_REMOVE(priv_fdir_flow, next); + rte_free(priv_fdir_flow->fdir); + rte_free(priv_fdir_flow); + } + } + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); } /** @@ -4479,16 +4671,22 @@ flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, * @param dev * Pointer to Ethernet device. * @param list - * Pointer to a TAILQ flow list. + * Pointer to the Indexed flow list. + * @param active + * If flushing is called avtively. */ void -mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) +mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active) { - while (!TAILQ_EMPTY(list)) { - struct rte_flow *flow; + uint32_t num_flushed = 0; - flow = TAILQ_FIRST(list); - flow_list_destroy(dev, list, flow); + while (*list) { + flow_list_destroy(dev, list, *list); + num_flushed++; + } + if (active) { + DRV_LOG(INFO, "port %u: %u flows flushed before stopping", + dev->data->port_id, num_flushed); } } @@ -4498,14 +4696,17 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) * @param dev * Pointer to Ethernet device. * @param list - * Pointer to a TAILQ flow list. + * Pointer to the Indexed flow list. */ void -mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) +mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list) { - struct rte_flow *flow; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = NULL; + uint32_t idx; - TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx, + flow, next) { flow_drv_remove(dev, flow); flow_mreg_stop_copy_action(dev, flow); } @@ -4519,16 +4720,18 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) * @param dev * Pointer to Ethernet device. * @param list - * Pointer to a TAILQ flow list. + * Pointer to the Indexed flow list. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) +mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list) { - struct rte_flow *flow; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = NULL; struct rte_flow_error error; + uint32_t idx; int ret = 0; /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ @@ -4536,7 +4739,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) if (ret < 0) return -rte_errno; /* Apply Flows created by application. */ - TAILQ_FOREACH(flow, list, next) { + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx, + flow, next) { ret = flow_mreg_start_copy_action(dev, flow); if (ret < 0) goto error; @@ -4553,6 +4757,80 @@ error: return -rte_errno; } +/** + * Stop all default actions for flows. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_stop_default(struct rte_eth_dev *dev) +{ + flow_mreg_del_default_copy_action(dev); + flow_rxq_flags_clear(dev); +} + +/** + * Start all default actions for flows. + * + * @param dev + * Pointer to Ethernet device. + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_start_default(struct rte_eth_dev *dev) +{ + struct rte_flow_error error; + + /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ + return flow_mreg_add_default_copy_action(dev, &error); +} + +/** + * Allocate intermediate resources for flow creation. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!priv->inter_flows) { + priv->inter_flows = rte_calloc(__func__, 1, + MLX5_NUM_MAX_DEV_FLOWS * + sizeof(struct mlx5_flow) + + (sizeof(struct mlx5_flow_rss_desc) + + sizeof(uint16_t) * UINT16_MAX) * 2, 0); + if (!priv->inter_flows) { + DRV_LOG(ERR, "can't allocate intermediate memory."); + return; + } + } + priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows) + [MLX5_NUM_MAX_DEV_FLOWS]; + /* Reset the index. */ + priv->flow_idx = 0; + priv->flow_nested_idx = 0; +} + +/** + * Free intermediate resources for flows. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_free_intermediate(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + rte_free(priv->inter_flows); + priv->inter_flows = NULL; +} + /** * Verify the flow list is empty * @@ -4566,9 +4844,11 @@ mlx5_flow_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow; + uint32_t idx; int ret = 0; - TAILQ_FOREACH(flow, &priv->flows, next) { + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx, + flow, next) { DRV_LOG(DEBUG, "port %u flow %p still referenced", dev->data->port_id, (void *)flow); ++ret; @@ -4604,7 +4884,8 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, }; struct rte_flow_item items[] = { { - .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, .spec = &queue_spec, .last = NULL, .mask = &queue_mask, @@ -4617,15 +4898,15 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, .group = MLX5_HAIRPIN_TX_TABLE, }; struct rte_flow_action actions[2]; - struct rte_flow *flow; + uint32_t flow_idx; struct rte_flow_error error; actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; actions[0].conf = &jump; actions[1].type = RTE_FLOW_ACTION_TYPE_END; - flow = flow_list_create(dev, &priv->ctrl_flows, + flow_idx = flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions, false, &error); - if (!flow) { + if (!flow_idx) { DRV_LOG(DEBUG, "Failed to create ctrl flow: rte_errno(%d)," " type(%d), message(%s)", @@ -4702,18 +4983,20 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, .type = RTE_FLOW_ACTION_TYPE_END, }, }; - struct rte_flow *flow; + uint32_t flow_idx; struct rte_flow_error error; unsigned int i; if (!priv->reta_idx_n || !priv->rxqs_n) { return 0; } + if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + action_rss.types = 0; for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; - flow = flow_list_create(dev, &priv->ctrl_flows, + flow_idx = flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions, false, &error); - if (!flow) + if (!flow_idx) return -rte_errno; return 0; } @@ -4739,6 +5022,62 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); } +/** + * Create default miss flow rule matching lacp traffic + * + * @param dev + * Pointer to Ethernet device. + * @param eth_spec + * An Ethernet flow spec to apply. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_lacp_miss(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + /* + * The LACP matching is done by only using ether type since using + * a multicast dst mac causes kernel to give low priority to this flow. + */ + static const struct rte_flow_item_eth lacp_spec = { + .type = RTE_BE16(0x8809), + }; + static const struct rte_flow_item_eth lacp_mask = { + .type = 0xffff, + }; + const struct rte_flow_attr attr = { + .ingress = 1, + }; + struct rte_flow_item items[] = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = &lacp_spec, + .mask = &lacp_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action actions[] = { + { + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + struct rte_flow_error error; + uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows, + &attr, items, actions, false, &error); + + if (!flow_idx) + return -rte_errno; + return 0; +} + /** * Destroy a flow. * @@ -4752,7 +5091,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; - flow_list_destroy(dev, &priv->flows, flow); + flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow); return 0; } @@ -4768,7 +5107,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; - mlx5_flow_list_flush(dev, &priv->flows); + mlx5_flow_list_flush(dev, &priv->flows, false); return 0; } @@ -4794,9 +5133,9 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, } priv->isolated = !!enable; if (enable) - dev->dev_ops = &mlx5_dev_ops_isolate; + dev->dev_ops = &mlx5_os_dev_ops_isolate; else - dev->dev_ops = &mlx5_dev_ops; + dev->dev_ops = &mlx5_os_dev_ops; return 0; } @@ -4808,14 +5147,25 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, */ static int flow_drv_query(struct rte_eth_dev *dev, - struct rte_flow *flow, + uint32_t flow_idx, const struct rte_flow_action *actions, void *data, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; const struct mlx5_flow_driver_ops *fops; - enum mlx5_flow_drv_type ftype = flow->drv_type; + struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_RTE_FLOW], + flow_idx); + enum mlx5_flow_drv_type ftype; + if (!flow) { + return rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "invalid flow handle"); + } + ftype = flow->drv_type; MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(ftype); @@ -4837,7 +5187,8 @@ mlx5_flow_query(struct rte_eth_dev *dev, { int ret; - ret = flow_drv_query(dev, flow, actions, data, error); + ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data, + error); if (ret < 0) return ret; return 0; @@ -5073,23 +5424,25 @@ flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) * FDIR flow to lookup. * * @return - * Pointer of flow if found, NULL otherwise. + * Index of flow if found, 0 otherwise. */ -static struct rte_flow * +static uint32_t flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = NULL; + uint32_t flow_idx = 0; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; MLX5_ASSERT(fdir_flow); - TAILQ_FOREACH(flow, &priv->flows, next) { - if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { - DRV_LOG(DEBUG, "port %u found FDIR flow %p", - dev->data->port_id, (void *)flow); + LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { + if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) { + DRV_LOG(DEBUG, "port %u found FDIR flow %u", + dev->data->port_id, flow_idx); + flow_idx = priv_fdir_flow->rix_flow; break; } } - return flow; + return flow_idx; } /** @@ -5110,6 +5463,8 @@ flow_fdir_filter_add(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_fdir *fdir_flow; struct rte_flow *flow; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + uint32_t flow_idx; int ret; fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); @@ -5120,22 +5475,32 @@ flow_fdir_filter_add(struct rte_eth_dev *dev, ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); if (ret) goto error; - flow = flow_fdir_filter_lookup(dev, fdir_flow); - if (flow) { + flow_idx = flow_fdir_filter_lookup(dev, fdir_flow); + if (flow_idx) { rte_errno = EEXIST; goto error; } - flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, - fdir_flow->items, fdir_flow->actions, true, - NULL); + priv_fdir_flow = rte_zmalloc(__func__, sizeof(struct mlx5_fdir_flow), + 0); + if (!priv_fdir_flow) { + rte_errno = ENOMEM; + goto error; + } + flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr, + fdir_flow->items, fdir_flow->actions, true, + NULL); + flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); if (!flow) goto error; - MLX5_ASSERT(!flow->fdir); - flow->fdir = fdir_flow; + flow->fdir = 1; + priv_fdir_flow->fdir = fdir_flow; + priv_fdir_flow->rix_flow = flow_idx; + LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next); DRV_LOG(DEBUG, "port %u created FDIR flow %p", dev->data->port_id, (void *)flow); return 0; error: + rte_free(priv_fdir_flow); rte_free(fdir_flow); return -rte_errno; } @@ -5156,23 +5521,30 @@ flow_fdir_filter_delete(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow; + uint32_t flow_idx; struct mlx5_fdir fdir_flow = { .attr.group = 0, }; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; int ret; ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); if (ret) return -rte_errno; - flow = flow_fdir_filter_lookup(dev, &fdir_flow); - if (!flow) { - rte_errno = ENOENT; - return -rte_errno; + LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { + /* Find the fdir in priv list */ + if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow)) + break; } - flow_list_destroy(dev, &priv->flows, flow); - DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", - dev->data->port_id, (void *)flow); + if (!priv_fdir_flow) + return 0; + LIST_REMOVE(priv_fdir_flow, next); + flow_idx = priv_fdir_flow->rix_flow; + flow_list_destroy(dev, &priv->flows, flow_idx); + rte_free(priv_fdir_flow->fdir); + rte_free(priv_fdir_flow); + DRV_LOG(DEBUG, "port %u deleted FDIR flow %u", + dev->data->port_id, flow_idx); return 0; } @@ -5209,8 +5581,15 @@ static void flow_fdir_filter_flush(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - - mlx5_flow_list_flush(dev, &priv->flows); + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + + while (!LIST_EMPTY(&priv->fdir_flows)) { + priv_fdir_flow = LIST_FIRST(&priv->fdir_flows); + LIST_REMOVE(priv_fdir_flow, next); + flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow); + rte_free(priv_fdir_flow->fdir); + rte_free(priv_fdir_flow); + } } /** @@ -5424,9 +5803,9 @@ mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, * Pointer to Ethernet device structure. * * @return - * Pointer to allocated counter on success, NULL otherwise. + * Index to allocated counter on success, 0 otherwise. */ -struct mlx5_flow_counter * +uint32_t mlx5_counter_alloc(struct rte_eth_dev *dev) { const struct mlx5_flow_driver_ops *fops; @@ -5439,7 +5818,7 @@ mlx5_counter_alloc(struct rte_eth_dev *dev) DRV_LOG(ERR, "port %u counter allocate is not supported.", dev->data->port_id); - return NULL; + return 0; } /** @@ -5448,10 +5827,10 @@ mlx5_counter_alloc(struct rte_eth_dev *dev) * @param[in] dev * Pointer to Ethernet device structure. * @param[in] cnt - * Pointer to counter to be free. + * Index to counter to be free. */ void -mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) +mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) { const struct mlx5_flow_driver_ops *fops; struct rte_flow_attr attr = { .transfer = 0 }; @@ -5472,7 +5851,7 @@ mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) * @param[in] dev * Pointer to Ethernet device structure. * @param[in] cnt - * Pointer to counter to query. + * Index to counter to query. * @param[in] clear * Set to clear counter statistics. * @param[out] pkts @@ -5484,7 +5863,7 @@ mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) * 0 on success, a negative errno value otherwise. */ int -mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, +mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, bool clear, uint64_t *pkts, uint64_t *bytes) { const struct mlx5_flow_driver_ops *fops; @@ -5502,22 +5881,39 @@ mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, #define MLX5_POOL_QUERY_FREQ_US 1000000 +/** + * Get number of all validate pools. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object. + * + * @return + * The number of all validate pools. + */ +static uint32_t +mlx5_get_all_valid_pool_count(struct mlx5_dev_ctx_shared *sh) +{ + int i; + uint32_t pools_n = 0; + + for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) + pools_n += rte_atomic16_read(&sh->cmng.ccont[i].n_valid); + return pools_n; +} + /** * Set the periodic procedure for triggering asynchronous batch queries for all * the counter pools. * * @param[in] sh - * Pointer to mlx5_ibv_shared object. + * Pointer to mlx5_dev_ctx_shared object. */ void -mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) +mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh) { - struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); - uint32_t pools_n = rte_atomic16_read(&cont->n_valid); - uint32_t us; + uint32_t pools_n, us; - cont = MLX5_CNT_CONTAINER(sh, 1, 0); - pools_n += rte_atomic16_read(&cont->n_valid); + pools_n = mlx5_get_all_valid_pool_count(sh); us = MLX5_POOL_QUERY_FREQ_US / pools_n; DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { @@ -5538,42 +5934,38 @@ mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) void mlx5_flow_query_alarm(void *arg) { - struct mlx5_ibv_shared *sh = arg; + struct mlx5_dev_ctx_shared *sh = arg; struct mlx5_devx_obj *dcs; uint16_t offset; int ret; uint8_t batch = sh->cmng.batch; + uint8_t age = sh->cmng.age; uint16_t pool_index = sh->cmng.pool_index; struct mlx5_pools_container *cont; - struct mlx5_pools_container *mcont; struct mlx5_flow_counter_pool *pool; + int cont_loop = MLX5_CCONT_TYPE_MAX; if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) goto set_alarm; next_container: - cont = MLX5_CNT_CONTAINER(sh, batch, 1); - mcont = MLX5_CNT_CONTAINER(sh, batch, 0); - /* Check if resize was done and need to flip a container. */ - if (cont != mcont) { - if (cont->pools) { - /* Clean the old container. */ - rte_free(cont->pools); - memset(cont, 0, sizeof(*cont)); - } - rte_cio_wmb(); - /* Flip the host container. */ - sh->cmng.mhi[batch] ^= (uint8_t)2; - cont = mcont; - } + cont = MLX5_CNT_CONTAINER(sh, batch, age); + rte_spinlock_lock(&cont->resize_sl); if (!cont->pools) { - /* 2 empty containers case is unexpected. */ - if (unlikely(batch != sh->cmng.batch)) + rte_spinlock_unlock(&cont->resize_sl); + /* Check if all the containers are empty. */ + if (unlikely(--cont_loop == 0)) goto set_alarm; batch ^= 0x1; pool_index = 0; + if (batch == 0 && pool_index == 0) { + age ^= 0x1; + sh->cmng.batch = batch; + sh->cmng.age = age; + } goto next_container; } pool = cont->pools[pool_index]; + rte_spinlock_unlock(&cont->resize_sl); if (pool->raw_hw) /* There is a pool query in progress. */ goto set_alarm; @@ -5585,6 +5977,13 @@ next_container: dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read (&pool->a64_dcs); offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; + /* + * Identify the counters released between query trigger and query + * handle more effiecntly. The counter released in this gap period + * should wait for a new round of query as the new arrived packets + * will not be taken into account. + */ + pool->query_gen++; ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - offset, NULL, NULL, pool->raw_hw->mem_mng->dm->id, @@ -5605,42 +6004,120 @@ next_container: if (pool_index >= rte_atomic16_read(&cont->n_valid)) { batch ^= 0x1; pool_index = 0; + if (batch == 0 && pool_index == 0) + age ^= 0x1; } set_alarm: sh->cmng.batch = batch; sh->cmng.pool_index = pool_index; + sh->cmng.age = age; mlx5_set_query_alarm(sh); } +/** + * Check and callback event for new aged flow in the counter pool + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object. + * @param[in] pool + * Pointer to Current counter pool. + */ +static void +mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh, + struct mlx5_flow_counter_pool *pool) +{ + struct mlx5_priv *priv; + struct mlx5_flow_counter *cnt; + struct mlx5_age_info *age_info; + struct mlx5_age_param *age_param; + struct mlx5_counter_stats_raw *cur = pool->raw_hw; + struct mlx5_counter_stats_raw *prev = pool->raw; + uint16_t curr = rte_rdtsc() / (rte_get_tsc_hz() / 10); + uint32_t i; + + for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { + cnt = MLX5_POOL_GET_CNT(pool, i); + age_param = MLX5_CNT_TO_AGE(cnt); + if (rte_atomic16_read(&age_param->state) != AGE_CANDIDATE) + continue; + if (cur->data[i].hits != prev->data[i].hits) { + age_param->expire = curr + age_param->timeout; + continue; + } + if ((uint16_t)(curr - age_param->expire) >= (UINT16_MAX / 2)) + continue; + /** + * Hold the lock first, or if between the + * state AGE_TMOUT and tailq operation the + * release happened, the release procedure + * may delete a non-existent tailq node. + */ + priv = rte_eth_devices[age_param->port_id].data->dev_private; + age_info = GET_PORT_AGE_INFO(priv); + rte_spinlock_lock(&age_info->aged_sl); + /* If the cpmset fails, release happens. */ + if (rte_atomic16_cmpset((volatile uint16_t *) + &age_param->state, + AGE_CANDIDATE, + AGE_TMOUT) == + AGE_CANDIDATE) { + TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next); + MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW); + } + rte_spinlock_unlock(&age_info->aged_sl); + } + for (i = 0; i < sh->max_port; i++) { + age_info = &sh->port[i].age_info; + if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW)) + continue; + if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) + _rte_eth_dev_callback_process + (&rte_eth_devices[sh->port[i].devx_ih_port_id], + RTE_ETH_EVENT_FLOW_AGED, NULL); + age_info->flags = 0; + } +} + /** * Handler for the HW respond about ready values from an asynchronous batch * query. This function is probably called by the host thread. * * @param[in] sh - * The pointer to the shared IB device context. + * The pointer to the shared device context. * @param[in] async_id * The Devx async ID. * @param[in] status * The status of the completion. */ void -mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, +mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh, uint64_t async_id, int status) { struct mlx5_flow_counter_pool *pool = (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; struct mlx5_counter_stats_raw *raw_to_free; + uint8_t age = !!IS_AGE_POOL(pool); + uint8_t query_gen = pool->query_gen ^ 1; + struct mlx5_pools_container *cont = + MLX5_CNT_CONTAINER(sh, !IS_EXT_POOL(pool), age); if (unlikely(status)) { raw_to_free = pool->raw_hw; } else { raw_to_free = pool->raw; + if (IS_AGE_POOL(pool)) + mlx5_flow_aging_check(sh, pool); rte_spinlock_lock(&pool->sl); pool->raw = pool->raw_hw; rte_spinlock_unlock(&pool->sl); - rte_atomic64_add(&pool->query_gen, 1); /* Be sure the new raw counters data is updated in memory. */ rte_cio_wmb(); + if (!TAILQ_EMPTY(&pool->counters[query_gen])) { + rte_spinlock_lock(&cont->csl); + TAILQ_CONCAT(&cont->counters, + &pool->counters[query_gen], next); + rte_spinlock_unlock(&cont->csl); + } } LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); pool->raw_hw = NULL; @@ -5721,7 +6198,8 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) }; struct rte_flow_action actions[] = { [0] = { - .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, .conf = &(struct mlx5_flow_action_copy_mreg){ .src = REG_C_1, .dst = idx, @@ -5737,19 +6215,22 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; + uint32_t flow_idx; struct rte_flow *flow; struct rte_flow_error error; if (!config->dv_flow_en) break; /* Create internal flow, validation skips copy action. */ - flow = flow_list_create(dev, NULL, &attr, items, - actions, false, &error); + flow_idx = flow_list_create(dev, NULL, &attr, items, + actions, false, &error); + flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + flow_idx); if (!flow) continue; if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) config->flow_mreg_c[n++] = idx; - flow_list_destroy(dev, NULL, flow); + flow_list_destroy(dev, NULL, flow_idx); } for (; n < MLX5_MREG_C_NUM; ++n) config->flow_mreg_c[n] = REG_NONE; @@ -5775,8 +6256,45 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow_error *error __rte_unused) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain, sh->tx_domain, file); } + +/** + * Get aged-out flows. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] context + * The address of an array of pointers to the aged-out flows contexts. + * @param[in] nb_countexts + * The length of context array pointers. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * how many contexts get in success, otherwise negative errno value. + * if nb_contexts is 0, return the amount of all aged contexts. + * if nb_contexts is not 0 , return the amount of aged flows reported + * in the context array. + */ +int +mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts, + uint32_t nb_contexts, struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + struct rte_flow_attr attr = { .transfer = 0 }; + + if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->get_aged_flows(dev, contexts, nb_contexts, + error); + } + DRV_LOG(ERR, + "port %u get aged flows is not supported.", + dev->data->port_id); + return -ENOTSUP; +}