X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=2cadf615ec4197e7f67d1ccd0655bfc5f953d290;hb=f3f1f576f43804b30f81deae967bc09aff7000df;hp=9904bc5863d10fe628c3fd7821f15325933bb80b;hpb=ec4e11d41d129ebc7c395b567827492e56fb08b7;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 9904bc5863..2cadf615ec 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -164,105 +164,143 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) return false; } +/** + * Network Service Header (NSH) and its next protocol values + * are described in RFC-8393. + */ +static enum rte_flow_item_type +mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) +{ + enum rte_flow_item_type type; + + switch (proto_mask & proto_spec) { + case RTE_VXLAN_GPE_TYPE_IPV4: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case RTE_VXLAN_GPE_TYPE_IPV6: + type = RTE_VXLAN_GPE_TYPE_IPV6; + break; + case RTE_VXLAN_GPE_TYPE_ETH: + type = RTE_FLOW_ITEM_TYPE_ETH; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + +static enum rte_flow_item_type +mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) +{ + enum rte_flow_item_type type; + + switch (proto_mask & proto_spec) { + case IPPROTO_UDP: + type = RTE_FLOW_ITEM_TYPE_UDP; + break; + case IPPROTO_TCP: + type = RTE_FLOW_ITEM_TYPE_TCP; + break; + case IPPROTO_IP: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case IPPROTO_IPV6: + type = RTE_FLOW_ITEM_TYPE_IPV6; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + +static enum rte_flow_item_type +mlx5_ethertype_to_item_type(rte_be16_t type_spec, + rte_be16_t type_mask, bool is_tunnel) +{ + enum rte_flow_item_type type; + + switch (rte_be_to_cpu_16(type_spec & type_mask)) { + case RTE_ETHER_TYPE_TEB: + type = is_tunnel ? + RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_ETHER_TYPE_VLAN: + type = !is_tunnel ? + RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_ETHER_TYPE_IPV4: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case RTE_ETHER_TYPE_IPV6: + type = RTE_FLOW_ITEM_TYPE_IPV6; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + static enum rte_flow_item_type mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) { - enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID; - uint16_t ether_type = 0; - uint16_t ether_type_m; - uint8_t ip_next_proto = 0; - uint8_t ip_next_proto_m; +#define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \ + do { \ + const void *m = item->mask; \ + const void *s = item->spec; \ + mask = m ? \ + ((const struct rte_flow_item_##type *)m)->fld : \ + rte_flow_item_##type##_mask.fld; \ + spec = ((const struct rte_flow_item_##type *)s)->fld; \ + } while (0) + + enum rte_flow_item_type ret; + uint16_t spec, mask; if (item == NULL || item->spec == NULL) - return ret; + return RTE_FLOW_ITEM_TYPE_VOID; switch (item->type) { case RTE_FLOW_ITEM_TYPE_ETH: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_eth *) - (item->mask))->type; - else - ether_type_m = rte_flow_item_eth_mask.type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_eth *) - (item->spec))->type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(eth, type); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_ethertype_to_item_type(spec, mask, false); break; case RTE_FLOW_ITEM_TYPE_VLAN: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_vlan *) - (item->mask))->inner_type; - else - ether_type_m = rte_flow_item_vlan_mask.inner_type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_vlan *) - (item->spec))->inner_type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_ethertype_to_item_type(spec, mask, false); break; case RTE_FLOW_ITEM_TYPE_IPV4: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv4 *) - (item->mask))->hdr.next_proto_id; - else - ip_next_proto_m = - rte_flow_item_ipv4_mask.hdr.next_proto_id; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv4 *) - (item->spec))->hdr.next_proto_id; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_inet_proto_to_item_type(spec, mask); break; case RTE_FLOW_ITEM_TYPE_IPV6: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv6 *) - (item->mask))->hdr.proto; - else - ip_next_proto_m = - rte_flow_item_ipv6_mask.hdr.proto; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv6 *) - (item->spec))->hdr.proto; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_inet_proto_to_item_type(spec, mask); + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol); + ret = mlx5_ethertype_to_item_type(spec, mask, true); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + MLX5_XSET_ITEM_MASK_SPEC(gre, protocol); + ret = mlx5_ethertype_to_item_type(spec, mask, true); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol); + ret = mlx5_nsh_proto_to_item_type(spec, mask); break; default: ret = RTE_FLOW_ITEM_TYPE_VOID; break; } return ret; +#undef MLX5_XSET_ITEM_MASK_SPEC } static const int * @@ -530,7 +568,8 @@ enum mlx5_expansion { MLX5_EXPANSION_IPV6_UDP, MLX5_EXPANSION_IPV6_TCP, MLX5_EXPANSION_IPV6_FRAG_EXT, - MLX5_EXPANSION_GTP + MLX5_EXPANSION_GTP, + MLX5_EXPANSION_GENEVE, }; /** Supported expansion of items. */ @@ -574,6 +613,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_GENEVE, MLX5_EXPANSION_GTP), .type = RTE_FLOW_ITEM_TYPE_UDP, .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, @@ -598,6 +638,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_GENEVE, MLX5_EXPANSION_GTP), .type = RTE_FLOW_ITEM_TYPE_UDP, .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, @@ -628,7 +669,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, }, [MLX5_EXPANSION_GRE] = { - .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6, MLX5_EXPANSION_GRE_KEY, MLX5_EXPANSION_MPLS), @@ -701,6 +743,12 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_GTP, }, + [MLX5_EXPANSION_GENEVE] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_GENEVE, + }, }; static struct rte_flow_action_handle * @@ -748,6 +796,14 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, struct rte_mbuf *m, struct rte_flow_restore_info *info, struct rte_flow_error *err); +static struct rte_flow_item_flex_handle * +mlx5_flow_flex_item_create(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_conf *conf, + struct rte_flow_error *error); +static int +mlx5_flow_flex_item_release(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_handle *handle, + struct rte_flow_error *error); static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, @@ -767,6 +823,8 @@ static const struct rte_flow_ops mlx5_flow_ops = { .tunnel_action_decap_release = mlx5_flow_tunnel_action_release, .tunnel_item_release = mlx5_flow_tunnel_item_release, .get_restore_info = mlx5_flow_tunnel_get_restore_info, + .flex_item_create = mlx5_flow_flex_item_create, + .flex_item_release = mlx5_flow_flex_item_release, }; /* Tunnel information. */ @@ -897,6 +955,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, case MLX5_MTR_COLOR: case MLX5_ASO_FLOW_HIT: case MLX5_ASO_CONNTRACK: + case MLX5_SAMPLE_ID: /* All features use the same REG_C. */ MLX5_ASSERT(priv->mtr_color_reg != REG_NON); return priv->mtr_color_reg; @@ -1162,7 +1221,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) } /** - * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device * flow. * * @param[in] dev @@ -1200,10 +1259,11 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, return; for (i = 0; i != ind_tbl->queues_n; ++i) { int idx = ind_tbl->queues[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); + MLX5_ASSERT(rxq_ctrl != NULL); + if (rxq_ctrl == NULL) + continue; /* * To support metadata register copy on Tx loopback, * this must be always enabled (metadata may arive @@ -1295,10 +1355,11 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, MLX5_ASSERT(dev->data->dev_started); for (i = 0; i != ind_tbl->queues_n; ++i) { int idx = ind_tbl->queues[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); + MLX5_ASSERT(rxq_ctrl != NULL); + if (rxq_ctrl == NULL) + continue; if (priv->config.dv_flow_en && priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && mlx5_flow_ext_mreg_supported(dev)) { @@ -1359,18 +1420,16 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev) unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); unsigned int j; - if (!(*priv->rxqs)[i]) + if (rxq == NULL || rxq->ctrl == NULL) continue; - rxq_ctrl = container_of((*priv->rxqs)[i], - struct mlx5_rxq_ctrl, rxq); - rxq_ctrl->flow_mark_n = 0; - rxq_ctrl->rxq.mark = 0; + rxq->ctrl->flow_mark_n = 0; + rxq->ctrl->rxq.mark = 0; for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) - rxq_ctrl->flow_tunnels_n[j] = 0; - rxq_ctrl->rxq.tunnel = 0; + rxq->ctrl->flow_tunnels_n[j] = 0; + rxq->ctrl->rxq.tunnel = 0; } } @@ -1384,13 +1443,15 @@ void mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *data; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { - if (!(*priv->rxqs)[i]) + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); + struct mlx5_rxq_data *data; + + if (rxq == NULL || rxq->ctrl == NULL) continue; - data = (*priv->rxqs)[i]; + data = &rxq->ctrl->rxq; if (!rte_flow_dynf_metadata_avail()) { data->dynf_meta = 0; data->flow_meta_mask = 0; @@ -1581,7 +1642,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &queue->index, "queue index out of range"); - if (!(*priv->rxqs)[queue->index]) + if (mlx5_rxq_get(dev, queue->index) == NULL) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &queue->index, @@ -1612,7 +1673,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, * 0 on success, a negative errno code on error. */ static int -mlx5_validate_rss_queues(const struct rte_eth_dev *dev, +mlx5_validate_rss_queues(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n, const char **error, uint32_t *queue_idx) { @@ -1621,20 +1682,19 @@ mlx5_validate_rss_queues(const struct rte_eth_dev *dev, uint32_t i; for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, + queues[i]); if (queues[i] >= priv->rxqs_n) { *error = "queue index out of range"; *queue_idx = i; return -EINVAL; } - if (!(*priv->rxqs)[queues[i]]) { + if (rxq_ctrl == NULL) { *error = "queue is not configured"; *queue_idx = i; return -EINVAL; } - rxq_ctrl = container_of((*priv->rxqs)[queues[i]], - struct mlx5_rxq_ctrl, rxq); if (i == 0) rxq_type = rxq_ctrl->type; if (rxq_type != rxq_ctrl->type) { @@ -2963,7 +3023,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "Geneve TLV opt length exceeeds the limit (31)"); + "Geneve TLV opt length exceeds the limit (31)"); /* Check if class type and length masks are full. */ if (full_mask.option_class != mask->option_class || full_mask.option_type != mask->option_type || @@ -3912,7 +3972,7 @@ find_graph_root(uint32_t rss_level) * subflow. * * @param[in] dev_flow - * Pointer the created preifx subflow. + * Pointer the created prefix subflow. * * @return * The layers get from prefix subflow. @@ -4239,7 +4299,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, }; - /* Fill the register fileds in the flow. */ + /* Fill the register fields in the flow. */ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); if (ret < 0) return NULL; @@ -4308,7 +4368,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) /* * The copy Flows are not included in any list. There * ones are referenced from other Flows and can not - * be applied, removed, deleted in ardbitrary order + * be applied, removed, deleted in arbitrary order * by list traversing. */ mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP, @@ -4765,7 +4825,7 @@ flow_create_split_inner(struct rte_eth_dev *dev, /* * If dev_flow is as one of the suffix flow, some actions in suffix * flow may need some user defined item layer flags, and pass the - * Metadate rxq mark flag to suffix flow as well. + * Metadata rxq mark flag to suffix flow as well. */ if (flow_split_info->prefix_layers) dev_flow->handle->layers = flow_split_info->prefix_layers; @@ -5206,6 +5266,8 @@ exit: * Pointer to the Q/RSS action. * @param[in] actions_n * Number of original actions. + * @param[in] mtr_sfx + * Check if it is in meter suffix table. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -5218,7 +5280,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, struct rte_flow_action *split_actions, const struct rte_flow_action *actions, const struct rte_flow_action *qrss, - int actions_n, struct rte_flow_error *error) + int actions_n, int mtr_sfx, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -5233,15 +5296,15 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * - Add jump to mreg CP_TBL. * As a result, there will be one more action. */ - ++actions_n; memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); + /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */ + ++actions_n; set_tag = (void *)(split_actions + actions_n); /* - * If tag action is not set to void(it means we are not the meter - * suffix flow), add the tag action. Since meter suffix flow already - * has the tag added. + * If we are not the meter suffix flow, add the tag action. + * Since meter suffix flow already has the tag added. */ - if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { + if (!mtr_sfx) { /* * Allocate the new subflow ID. This one is unique within * device and not shared with representors. Otherwise, @@ -5274,6 +5337,12 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, MLX5_RTE_FLOW_ACTION_TYPE_TAG, .conf = set_tag, }; + } else { + /* + * If we are the suffix flow of meter, tag already exist. + * Set the QUEUE/RSS action to void. + */ + split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID; } /* JUMP action to jump to mreg copy table (CP_TBL). */ jump = (void *)(set_tag + 1); @@ -5305,7 +5374,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * @param[out] error * Perform verbose error reporting if not NULL. * @param[in] encap_idx - * The encap action inndex. + * The encap action index. * * @return * 0 on success, negative value otherwise @@ -5570,7 +5639,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev, /* Prepare the prefix tag action. */ append_index++; set_tag = (void *)(actions_pre + actions_n + append_index); - ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); + ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); if (ret < 0) return ret; mlx5_ipool_malloc(priv->sh->ipool @@ -5728,17 +5797,6 @@ flow_create_split_metadata(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no memory to split " "metadata flow"); - /* - * If we are the suffix flow of meter, tag already exist. - * Set the tag action to void. - */ - if (mtr_sfx) - ext_actions[qrss - actions].type = - RTE_FLOW_ACTION_TYPE_VOID; - else - ext_actions[qrss - actions].type = - (enum rte_flow_action_type) - MLX5_RTE_FLOW_ACTION_TYPE_TAG; /* * Create the new actions list with removed Q/RSS action * and appended set tag and jump to register copy table @@ -5746,7 +5804,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, * in advance, because it is needed for set tag action. */ qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, - qrss, actions_n, error); + qrss, actions_n, + mtr_sfx, error); if (!mtr_sfx && !qrss_id) { ret = -rte_errno; goto exit; @@ -5837,6 +5896,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, /* Add suffix subflow to execute Q/RSS. */ flow_split_info->prefix_layers = layers; flow_split_info->prefix_mark = 0; + flow_split_info->table_id = 0; ret = flow_create_split_inner(dev, flow, &dev_flow, &q_attr, mtr_sfx ? items : q_items, q_actions, @@ -6051,7 +6111,6 @@ flow_create_split_meter(struct rte_eth_dev *dev, goto exit; } /* Add the prefix subflow. */ - flow_split_info->prefix_mark = 0; skip_scale_restore = flow_split_info->skip_scale; flow_split_info->skip_scale |= 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; @@ -6084,7 +6143,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, MLX5_FLOW_TABLE_LEVEL_METER; flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); - flow_split_info->prefix_mark = dev_flow->handle->mark; + flow_split_info->prefix_mark |= dev_flow->handle->mark; flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; } /* Add the prefix subflow. */ @@ -6236,7 +6295,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, } flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); - flow_split_info->prefix_mark = dev_flow->handle->mark; + flow_split_info->prefix_mark |= dev_flow->handle->mark; /* Suffix group level already be scaled with factor, set * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale * again in translation. @@ -6840,7 +6899,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, * @param type * Flow type to be flushed. * @param active - * If flushing is called avtively. + * If flushing is called actively. */ void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, @@ -7764,7 +7823,6 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, static int mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) { - struct mlx5_devx_mkey_attr mkey_attr; struct mlx5_counter_stats_mem_mng *mem_mng; volatile struct flow_counter_stats *raw_data; int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES; @@ -7774,6 +7832,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) sizeof(struct mlx5_counter_stats_mem_mng); size_t pgsize = rte_mem_page_size(); uint8_t *mem; + int ret; int i; if (pgsize == (size_t)-1) { @@ -7788,23 +7847,10 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) } mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; - mem_mng->umem = mlx5_os_umem_reg(sh->cdev->ctx, mem, size, - IBV_ACCESS_LOCAL_WRITE); - if (!mem_mng->umem) { - rte_errno = errno; - mlx5_free(mem); - return -rte_errno; - } - memset(&mkey_attr, 0, sizeof(mkey_attr)); - mkey_attr.addr = (uintptr_t)mem; - mkey_attr.size = size; - mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); - mkey_attr.pd = sh->cdev->pdn; - mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write; - mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read; - mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr); - if (!mem_mng->dm) { - mlx5_os_umem_dereg(mem_mng->umem); + ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd, + sh->cdev->pdn, mem, size, + &mem_mng->wm); + if (ret) { rte_errno = errno; mlx5_free(mem); return -rte_errno; @@ -7923,7 +7969,7 @@ mlx5_flow_query_alarm(void *arg) ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0, MLX5_COUNTERS_PER_POOL, NULL, NULL, - pool->raw_hw->mem_mng->dm->id, + pool->raw_hw->mem_mng->wm.lkey, (void *)(uintptr_t) pool->raw_hw->data, sh->devx_comp, @@ -8500,7 +8546,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. PMDs initialize this * structure in case of error only. * @return - * 0 on success, a nagative value otherwise. + * 0 on success, a negative value otherwise. */ int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, @@ -8978,7 +9024,7 @@ mlx5_get_tof(const struct rte_flow_item *item, } /** - * tunnel offload functionalilty is defined for DV environment only + * tunnel offload functionality is defined for DV environment only */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT __extension__ @@ -9491,33 +9537,37 @@ err: return err; } -static inline bool +static inline int mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, struct rte_flow_tunnel *tunnel, - const char *err_msg) + struct rte_flow_error *error) { - err_msg = NULL; - if (!is_tunnel_offload_active(dev)) { - err_msg = "tunnel offload was not activated"; - goto out; - } else if (!tunnel) { - err_msg = "no application tunnel"; - goto out; - } + struct mlx5_priv *priv = dev->data->dev_private; + if (!priv->config.dv_flow_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "flow DV interface is off"); + if (!is_tunnel_offload_active(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "tunnel offload was not activated"); + if (!tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "no application tunnel"); switch (tunnel->type) { default: - err_msg = "unsupported tunnel type"; - goto out; + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "unsupported tunnel type"); case RTE_FLOW_ITEM_TYPE_VXLAN: case RTE_FLOW_ITEM_TYPE_GRE: case RTE_FLOW_ITEM_TYPE_NVGRE: case RTE_FLOW_ITEM_TYPE_GENEVE: break; } - -out: - return !err_msg; + return 0; } static int @@ -9527,15 +9577,11 @@ mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, uint32_t *num_of_actions, struct rte_flow_error *error) { - int ret; struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, - err_msg); + if (ret) + return ret; ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); if (ret < 0) { return rte_flow_error_set(error, ret, @@ -9554,15 +9600,11 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev, uint32_t *num_of_items, struct rte_flow_error *error) { - int ret; struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - err_msg); + if (ret) + return ret; ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); if (ret < 0) { return rte_flow_error_set(error, ret, @@ -9788,6 +9830,45 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, } #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ +/* Flex flow item API */ +static struct rte_flow_item_flex_handle * +mlx5_flow_flex_item_create(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_conf *conf, + struct rte_flow_error *error) +{ + static const char err_msg[] = "flex item creation unsupported"; + struct rte_flow_attr attr = { .transfer = 0 }; + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + + if (!fops->item_create) { + DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, err_msg); + return NULL; + } + return fops->item_create(dev, conf, error); +} + +static int +mlx5_flow_flex_item_release(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_handle *handle, + struct rte_flow_error *error) +{ + static const char err_msg[] = "flex item release unsupported"; + struct rte_flow_attr attr = { .transfer = 0 }; + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + + if (!fops->item_release) { + DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, err_msg); + return -rte_errno; + } + return fops->item_release(dev, handle, error); +} + static void mlx5_dbg__print_pattern(const struct rte_flow_item *item) {