X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=2cadf615ec4197e7f67d1ccd0655bfc5f953d290;hb=f3f1f576f43804b30f81deae967bc09aff7000df;hp=d5ed673891f27372df26997ad633d3060e00b5e1;hpb=7cf2d15a399255eca96746948a715f678736eebf;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index d5ed673891..2cadf615ec 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -164,105 +164,143 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) return false; } +/** + * Network Service Header (NSH) and its next protocol values + * are described in RFC-8393. + */ +static enum rte_flow_item_type +mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) +{ + enum rte_flow_item_type type; + + switch (proto_mask & proto_spec) { + case RTE_VXLAN_GPE_TYPE_IPV4: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case RTE_VXLAN_GPE_TYPE_IPV6: + type = RTE_VXLAN_GPE_TYPE_IPV6; + break; + case RTE_VXLAN_GPE_TYPE_ETH: + type = RTE_FLOW_ITEM_TYPE_ETH; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + +static enum rte_flow_item_type +mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) +{ + enum rte_flow_item_type type; + + switch (proto_mask & proto_spec) { + case IPPROTO_UDP: + type = RTE_FLOW_ITEM_TYPE_UDP; + break; + case IPPROTO_TCP: + type = RTE_FLOW_ITEM_TYPE_TCP; + break; + case IPPROTO_IP: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case IPPROTO_IPV6: + type = RTE_FLOW_ITEM_TYPE_IPV6; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + +static enum rte_flow_item_type +mlx5_ethertype_to_item_type(rte_be16_t type_spec, + rte_be16_t type_mask, bool is_tunnel) +{ + enum rte_flow_item_type type; + + switch (rte_be_to_cpu_16(type_spec & type_mask)) { + case RTE_ETHER_TYPE_TEB: + type = is_tunnel ? + RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_ETHER_TYPE_VLAN: + type = !is_tunnel ? + RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_ETHER_TYPE_IPV4: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case RTE_ETHER_TYPE_IPV6: + type = RTE_FLOW_ITEM_TYPE_IPV6; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + static enum rte_flow_item_type mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) { - enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID; - uint16_t ether_type = 0; - uint16_t ether_type_m; - uint8_t ip_next_proto = 0; - uint8_t ip_next_proto_m; +#define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \ + do { \ + const void *m = item->mask; \ + const void *s = item->spec; \ + mask = m ? \ + ((const struct rte_flow_item_##type *)m)->fld : \ + rte_flow_item_##type##_mask.fld; \ + spec = ((const struct rte_flow_item_##type *)s)->fld; \ + } while (0) + + enum rte_flow_item_type ret; + uint16_t spec, mask; if (item == NULL || item->spec == NULL) - return ret; + return RTE_FLOW_ITEM_TYPE_VOID; switch (item->type) { case RTE_FLOW_ITEM_TYPE_ETH: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_eth *) - (item->mask))->type; - else - ether_type_m = rte_flow_item_eth_mask.type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_eth *) - (item->spec))->type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(eth, type); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_ethertype_to_item_type(spec, mask, false); break; case RTE_FLOW_ITEM_TYPE_VLAN: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_vlan *) - (item->mask))->inner_type; - else - ether_type_m = rte_flow_item_vlan_mask.inner_type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_vlan *) - (item->spec))->inner_type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_ethertype_to_item_type(spec, mask, false); break; case RTE_FLOW_ITEM_TYPE_IPV4: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv4 *) - (item->mask))->hdr.next_proto_id; - else - ip_next_proto_m = - rte_flow_item_ipv4_mask.hdr.next_proto_id; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv4 *) - (item->spec))->hdr.next_proto_id; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_inet_proto_to_item_type(spec, mask); break; case RTE_FLOW_ITEM_TYPE_IPV6: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv6 *) - (item->mask))->hdr.proto; - else - ip_next_proto_m = - rte_flow_item_ipv6_mask.hdr.proto; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv6 *) - (item->spec))->hdr.proto; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_inet_proto_to_item_type(spec, mask); + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol); + ret = mlx5_ethertype_to_item_type(spec, mask, true); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + MLX5_XSET_ITEM_MASK_SPEC(gre, protocol); + ret = mlx5_ethertype_to_item_type(spec, mask, true); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol); + ret = mlx5_nsh_proto_to_item_type(spec, mask); break; default: ret = RTE_FLOW_ITEM_TYPE_VOID; break; } return ret; +#undef MLX5_XSET_ITEM_MASK_SPEC } static const int * @@ -391,13 +429,20 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { next = NULL; missed = 1; - for (i = 0; node->next && node->next[i]; ++i) { + i = 0; + while (node->next && node->next[i]) { next = &graph[node->next[i]]; if (next->type == missed_item.type) { flow_items[0].type = missed_item.type; flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; break; } + if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) { + node = next; + i = 0; + } else { + ++i; + } next = NULL; } } @@ -523,7 +568,8 @@ enum mlx5_expansion { MLX5_EXPANSION_IPV6_UDP, MLX5_EXPANSION_IPV6_TCP, MLX5_EXPANSION_IPV6_FRAG_EXT, - MLX5_EXPANSION_GTP + MLX5_EXPANSION_GTP, + MLX5_EXPANSION_GENEVE, }; /** Supported expansion of items. */ @@ -567,6 +613,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_GENEVE, MLX5_EXPANSION_GTP), .type = RTE_FLOW_ITEM_TYPE_UDP, .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, @@ -591,6 +638,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_GENEVE, MLX5_EXPANSION_GTP), .type = RTE_FLOW_ITEM_TYPE_UDP, .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, @@ -621,7 +669,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, }, [MLX5_EXPANSION_GRE] = { - .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6, MLX5_EXPANSION_GRE_KEY, MLX5_EXPANSION_MPLS), @@ -694,6 +743,12 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_GTP, }, + [MLX5_EXPANSION_GENEVE] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_GENEVE, + }, }; static struct rte_flow_action_handle * @@ -741,6 +796,14 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, struct rte_mbuf *m, struct rte_flow_restore_info *info, struct rte_flow_error *err); +static struct rte_flow_item_flex_handle * +mlx5_flow_flex_item_create(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_conf *conf, + struct rte_flow_error *error); +static int +mlx5_flow_flex_item_release(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_handle *handle, + struct rte_flow_error *error); static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, @@ -760,6 +823,8 @@ static const struct rte_flow_ops mlx5_flow_ops = { .tunnel_action_decap_release = mlx5_flow_tunnel_action_release, .tunnel_item_release = mlx5_flow_tunnel_item_release, .get_restore_info = mlx5_flow_tunnel_get_restore_info, + .flex_item_create = mlx5_flow_flex_item_create, + .flex_item_release = mlx5_flow_flex_item_release, }; /* Tunnel information. */ @@ -890,6 +955,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, case MLX5_MTR_COLOR: case MLX5_ASO_FLOW_HIT: case MLX5_ASO_CONNTRACK: + case MLX5_SAMPLE_ID: /* All features use the same REG_C. */ MLX5_ASSERT(priv->mtr_color_reg != REG_NON); return priv->mtr_color_reg; @@ -1155,7 +1221,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) } /** - * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device * flow. * * @param[in] dev @@ -1193,10 +1259,11 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, return; for (i = 0; i != ind_tbl->queues_n; ++i) { int idx = ind_tbl->queues[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); + MLX5_ASSERT(rxq_ctrl != NULL); + if (rxq_ctrl == NULL) + continue; /* * To support metadata register copy on Tx loopback, * this must be always enabled (metadata may arive @@ -1288,10 +1355,11 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, MLX5_ASSERT(dev->data->dev_started); for (i = 0; i != ind_tbl->queues_n; ++i) { int idx = ind_tbl->queues[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); + MLX5_ASSERT(rxq_ctrl != NULL); + if (rxq_ctrl == NULL) + continue; if (priv->config.dv_flow_en && priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && mlx5_flow_ext_mreg_supported(dev)) { @@ -1352,18 +1420,16 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev) unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); unsigned int j; - if (!(*priv->rxqs)[i]) + if (rxq == NULL || rxq->ctrl == NULL) continue; - rxq_ctrl = container_of((*priv->rxqs)[i], - struct mlx5_rxq_ctrl, rxq); - rxq_ctrl->flow_mark_n = 0; - rxq_ctrl->rxq.mark = 0; + rxq->ctrl->flow_mark_n = 0; + rxq->ctrl->rxq.mark = 0; for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) - rxq_ctrl->flow_tunnels_n[j] = 0; - rxq_ctrl->rxq.tunnel = 0; + rxq->ctrl->flow_tunnels_n[j] = 0; + rxq->ctrl->rxq.tunnel = 0; } } @@ -1377,13 +1443,15 @@ void mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *data; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { - if (!(*priv->rxqs)[i]) + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); + struct mlx5_rxq_data *data; + + if (rxq == NULL || rxq->ctrl == NULL) continue; - data = (*priv->rxqs)[i]; + data = &rxq->ctrl->rxq; if (!rte_flow_dynf_metadata_avail()) { data->dynf_meta = 0; data->flow_meta_mask = 0; @@ -1574,7 +1642,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &queue->index, "queue index out of range"); - if (!(*priv->rxqs)[queue->index]) + if (mlx5_rxq_get(dev, queue->index) == NULL) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &queue->index, @@ -1587,6 +1655,57 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, return 0; } +/** + * Validate queue numbers for device RSS. + * + * @param[in] dev + * Configured device. + * @param[in] queues + * Array of queue numbers. + * @param[in] queues_n + * Size of the @p queues array. + * @param[out] error + * On error, filled with a textual error description. + * @param[out] queue + * On error, filled with an offending queue index in @p queues array. + * + * @return + * 0 on success, a negative errno code on error. + */ +static int +mlx5_validate_rss_queues(struct rte_eth_dev *dev, + const uint16_t *queues, uint32_t queues_n, + const char **error, uint32_t *queue_idx) +{ + const struct mlx5_priv *priv = dev->data->dev_private; + enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED; + uint32_t i; + + for (i = 0; i != queues_n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, + queues[i]); + + if (queues[i] >= priv->rxqs_n) { + *error = "queue index out of range"; + *queue_idx = i; + return -EINVAL; + } + if (rxq_ctrl == NULL) { + *error = "queue is not configured"; + *queue_idx = i; + return -EINVAL; + } + if (i == 0) + rxq_type = rxq_ctrl->type; + if (rxq_type != rxq_ctrl->type) { + *error = "combining hairpin and regular RSS queues is not supported"; + *queue_idx = i; + return -ENOTSUP; + } + } + return 0; +} + /* * Validate the rss action. * @@ -1607,8 +1726,9 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = action->conf; - enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED; - unsigned int i; + int ret; + const char *message; + uint32_t queue_idx; if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) @@ -1672,27 +1792,12 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "No queues configured"); - for (i = 0; i != rss->queue_num; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; - - if (rss->queue[i] >= priv->rxqs_n) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue[i], "queue index out of range"); - if (!(*priv->rxqs)[rss->queue[i]]) - return rte_flow_error_set - (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue[i], "queue is not configured"); - rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]], - struct mlx5_rxq_ctrl, rxq); - if (i == 0) - rxq_type = rxq_ctrl->type; - if (rxq_type != rxq_ctrl->type) - return rte_flow_error_set - (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue[i], - "combining hairpin and regular RSS queues is not supported"); + ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num, + &message, &queue_idx); + if (ret != 0) { + return rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[queue_idx], message); } return 0; } @@ -2918,7 +3023,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "Geneve TLV opt length exceeeds the limit (31)"); + "Geneve TLV opt length exceeds the limit (31)"); /* Check if class type and length masks are full. */ if (full_mask.option_class != mask->option_class || full_mask.option_type != mask->option_type || @@ -3867,7 +3972,7 @@ find_graph_root(uint32_t rss_level) * subflow. * * @param[in] dev_flow - * Pointer the created preifx subflow. + * Pointer the created prefix subflow. * * @return * The layers get from prefix subflow. @@ -4194,7 +4299,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, }; - /* Fill the register fileds in the flow. */ + /* Fill the register fields in the flow. */ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); if (ret < 0) return NULL; @@ -4263,7 +4368,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) /* * The copy Flows are not included in any list. There * ones are referenced from other Flows and can not - * be applied, removed, deleted in ardbitrary order + * be applied, removed, deleted in arbitrary order * by list traversing. */ mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP, @@ -4720,7 +4825,7 @@ flow_create_split_inner(struct rte_eth_dev *dev, /* * If dev_flow is as one of the suffix flow, some actions in suffix * flow may need some user defined item layer flags, and pass the - * Metadate rxq mark flag to suffix flow as well. + * Metadata rxq mark flag to suffix flow as well. */ if (flow_split_info->prefix_layers) dev_flow->handle->layers = flow_split_info->prefix_layers; @@ -5161,6 +5266,8 @@ exit: * Pointer to the Q/RSS action. * @param[in] actions_n * Number of original actions. + * @param[in] mtr_sfx + * Check if it is in meter suffix table. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -5173,7 +5280,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, struct rte_flow_action *split_actions, const struct rte_flow_action *actions, const struct rte_flow_action *qrss, - int actions_n, struct rte_flow_error *error) + int actions_n, int mtr_sfx, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -5188,15 +5296,15 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * - Add jump to mreg CP_TBL. * As a result, there will be one more action. */ - ++actions_n; memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); + /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */ + ++actions_n; set_tag = (void *)(split_actions + actions_n); /* - * If tag action is not set to void(it means we are not the meter - * suffix flow), add the tag action. Since meter suffix flow already - * has the tag added. + * If we are not the meter suffix flow, add the tag action. + * Since meter suffix flow already has the tag added. */ - if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { + if (!mtr_sfx) { /* * Allocate the new subflow ID. This one is unique within * device and not shared with representors. Otherwise, @@ -5229,6 +5337,12 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, MLX5_RTE_FLOW_ACTION_TYPE_TAG, .conf = set_tag, }; + } else { + /* + * If we are the suffix flow of meter, tag already exist. + * Set the QUEUE/RSS action to void. + */ + split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID; } /* JUMP action to jump to mreg copy table (CP_TBL). */ jump = (void *)(set_tag + 1); @@ -5260,7 +5374,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * @param[out] error * Perform verbose error reporting if not NULL. * @param[in] encap_idx - * The encap action inndex. + * The encap action index. * * @return * 0 on success, negative value otherwise @@ -5525,7 +5639,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev, /* Prepare the prefix tag action. */ append_index++; set_tag = (void *)(actions_pre + actions_n + append_index); - ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); + ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); if (ret < 0) return ret; mlx5_ipool_malloc(priv->sh->ipool @@ -5683,17 +5797,6 @@ flow_create_split_metadata(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no memory to split " "metadata flow"); - /* - * If we are the suffix flow of meter, tag already exist. - * Set the tag action to void. - */ - if (mtr_sfx) - ext_actions[qrss - actions].type = - RTE_FLOW_ACTION_TYPE_VOID; - else - ext_actions[qrss - actions].type = - (enum rte_flow_action_type) - MLX5_RTE_FLOW_ACTION_TYPE_TAG; /* * Create the new actions list with removed Q/RSS action * and appended set tag and jump to register copy table @@ -5701,7 +5804,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, * in advance, because it is needed for set tag action. */ qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, - qrss, actions_n, error); + qrss, actions_n, + mtr_sfx, error); if (!mtr_sfx && !qrss_id) { ret = -rte_errno; goto exit; @@ -5792,6 +5896,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, /* Add suffix subflow to execute Q/RSS. */ flow_split_info->prefix_layers = layers; flow_split_info->prefix_mark = 0; + flow_split_info->table_id = 0; ret = flow_create_split_inner(dev, flow, &dev_flow, &q_attr, mtr_sfx ? items : q_items, q_actions, @@ -6006,7 +6111,6 @@ flow_create_split_meter(struct rte_eth_dev *dev, goto exit; } /* Add the prefix subflow. */ - flow_split_info->prefix_mark = 0; skip_scale_restore = flow_split_info->skip_scale; flow_split_info->skip_scale |= 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; @@ -6039,7 +6143,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, MLX5_FLOW_TABLE_LEVEL_METER; flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); - flow_split_info->prefix_mark = dev_flow->handle->mark; + flow_split_info->prefix_mark |= dev_flow->handle->mark; flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; } /* Add the prefix subflow. */ @@ -6191,7 +6295,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, } flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); - flow_split_info->prefix_mark = dev_flow->handle->mark; + flow_split_info->prefix_mark |= dev_flow->handle->mark; /* Suffix group level already be scaled with factor, set * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale * again in translation. @@ -6795,7 +6899,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, * @param type * Flow type to be flushed. * @param active - * If flushing is called avtively. + * If flushing is called actively. */ void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, @@ -7719,7 +7823,6 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, static int mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) { - struct mlx5_devx_mkey_attr mkey_attr; struct mlx5_counter_stats_mem_mng *mem_mng; volatile struct flow_counter_stats *raw_data; int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES; @@ -7729,6 +7832,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) sizeof(struct mlx5_counter_stats_mem_mng); size_t pgsize = rte_mem_page_size(); uint8_t *mem; + int ret; int i; if (pgsize == (size_t)-1) { @@ -7743,23 +7847,10 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) } mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; - mem_mng->umem = mlx5_os_umem_reg(sh->cdev->ctx, mem, size, - IBV_ACCESS_LOCAL_WRITE); - if (!mem_mng->umem) { - rte_errno = errno; - mlx5_free(mem); - return -rte_errno; - } - memset(&mkey_attr, 0, sizeof(mkey_attr)); - mkey_attr.addr = (uintptr_t)mem; - mkey_attr.size = size; - mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); - mkey_attr.pd = sh->cdev->pdn; - mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write; - mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read; - mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr); - if (!mem_mng->dm) { - mlx5_os_umem_dereg(mem_mng->umem); + ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd, + sh->cdev->pdn, mem, size, + &mem_mng->wm); + if (ret) { rte_errno = errno; mlx5_free(mem); return -rte_errno; @@ -7878,7 +7969,7 @@ mlx5_flow_query_alarm(void *arg) ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0, MLX5_COUNTERS_PER_POOL, NULL, NULL, - pool->raw_hw->mem_mng->dm->id, + pool->raw_hw->mem_mng->wm.lkey, (void *)(uintptr_t) pool->raw_hw->data, sh->devx_comp, @@ -8455,7 +8546,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. PMDs initialize this * structure in case of error only. * @return - * 0 on success, a nagative value otherwise. + * 0 on success, a negative value otherwise. */ int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, @@ -8779,6 +8870,116 @@ mlx5_action_handle_flush(struct rte_eth_dev *dev) return ret; } +/** + * Validate existing indirect actions against current device configuration + * and attach them to device resources. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_action_handle_attach(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool = + priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS]; + struct mlx5_shared_action_rss *shared_rss, *shared_rss_last; + int ret = 0; + uint32_t idx; + + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + const char *message; + uint32_t queue_idx; + + ret = mlx5_validate_rss_queues(dev, ind_tbl->queues, + ind_tbl->queues_n, + &message, &queue_idx); + if (ret != 0) { + DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s", + dev->data->port_id, ind_tbl->queues[queue_idx], + message); + break; + } + } + if (ret != 0) + return ret; + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + ret = mlx5_ind_table_obj_attach(dev, ind_tbl); + if (ret != 0) { + DRV_LOG(ERR, "Port %u could not attach " + "indirection table obj %p", + dev->data->port_id, (void *)ind_tbl); + goto error; + } + } + return 0; +error: + shared_rss_last = shared_rss; + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + if (shared_rss == shared_rss_last) + break; + if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0) + DRV_LOG(CRIT, "Port %u could not detach " + "indirection table obj %p on rollback", + dev->data->port_id, (void *)ind_tbl); + } + return ret; +} + +/** + * Detach indirect actions of the device from its resources. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_action_handle_detach(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool = + priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS]; + struct mlx5_shared_action_rss *shared_rss, *shared_rss_last; + int ret = 0; + uint32_t idx; + + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + ret = mlx5_ind_table_obj_detach(dev, ind_tbl); + if (ret != 0) { + DRV_LOG(ERR, "Port %u could not detach " + "indirection table obj %p", + dev->data->port_id, (void *)ind_tbl); + goto error; + } + } + return 0; +error: + shared_rss_last = shared_rss; + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + if (shared_rss == shared_rss_last) + break; + if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0) + DRV_LOG(CRIT, "Port %u could not attach " + "indirection table obj %p on rollback", + dev->data->port_id, (void *)ind_tbl); + } + return ret; +} + #ifndef HAVE_MLX5DV_DR #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) #else @@ -8823,7 +9024,7 @@ mlx5_get_tof(const struct rte_flow_item *item, } /** - * tunnel offload functionalilty is defined for DV environment only + * tunnel offload functionality is defined for DV environment only */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT __extension__ @@ -9336,33 +9537,37 @@ err: return err; } -static inline bool +static inline int mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, struct rte_flow_tunnel *tunnel, - const char *err_msg) + struct rte_flow_error *error) { - err_msg = NULL; - if (!is_tunnel_offload_active(dev)) { - err_msg = "tunnel offload was not activated"; - goto out; - } else if (!tunnel) { - err_msg = "no application tunnel"; - goto out; - } + struct mlx5_priv *priv = dev->data->dev_private; + if (!priv->config.dv_flow_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "flow DV interface is off"); + if (!is_tunnel_offload_active(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "tunnel offload was not activated"); + if (!tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "no application tunnel"); switch (tunnel->type) { default: - err_msg = "unsupported tunnel type"; - goto out; + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "unsupported tunnel type"); case RTE_FLOW_ITEM_TYPE_VXLAN: case RTE_FLOW_ITEM_TYPE_GRE: case RTE_FLOW_ITEM_TYPE_NVGRE: case RTE_FLOW_ITEM_TYPE_GENEVE: break; } - -out: - return !err_msg; + return 0; } static int @@ -9372,15 +9577,11 @@ mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, uint32_t *num_of_actions, struct rte_flow_error *error) { - int ret; struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, - err_msg); + if (ret) + return ret; ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); if (ret < 0) { return rte_flow_error_set(error, ret, @@ -9399,15 +9600,11 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev, uint32_t *num_of_items, struct rte_flow_error *error) { - int ret; struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - err_msg); + if (ret) + return ret; ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); if (ret < 0) { return rte_flow_error_set(error, ret, @@ -9633,6 +9830,45 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, } #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ +/* Flex flow item API */ +static struct rte_flow_item_flex_handle * +mlx5_flow_flex_item_create(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_conf *conf, + struct rte_flow_error *error) +{ + static const char err_msg[] = "flex item creation unsupported"; + struct rte_flow_attr attr = { .transfer = 0 }; + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + + if (!fops->item_create) { + DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, err_msg); + return NULL; + } + return fops->item_create(dev, conf, error); +} + +static int +mlx5_flow_flex_item_release(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_handle *handle, + struct rte_flow_error *error) +{ + static const char err_msg[] = "flex item release unsupported"; + struct rte_flow_attr attr = { .transfer = 0 }; + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + + if (!fops->item_release) { + DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, err_msg); + return -rte_errno; + } + return fops->item_release(dev, handle, error); +} + static void mlx5_dbg__print_pattern(const struct rte_flow_item *item) { @@ -9675,17 +9911,119 @@ mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, const struct mlx5_flow_expand_node *node) { const struct rte_flow_item *item = pattern + item_idx, *prev_item; - switch (item->type) { - case RTE_FLOW_ITEM_TYPE_VXLAN: + + if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN && + node != NULL && + node->type == RTE_FLOW_ITEM_TYPE_VXLAN) { + /* + * The expansion node is VXLAN and it is also the last + * expandable item in the pattern, so need to continue + * expansion of the inner tunnel. + */ MLX5_ASSERT(item_idx > 0); prev_item = pattern + item_idx - 1; MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP); if (mlx5_flow_is_std_vxlan_port(prev_item)) return &graph[MLX5_EXPANSION_STD_VXLAN]; - else - return &graph[MLX5_EXPANSION_L3_VXLAN]; + return &graph[MLX5_EXPANSION_L3_VXLAN]; + } + return node; +} + +/* Map of Verbs to Flow priority with 8 Verbs priorities. */ +static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, +}; + +/* Map of Verbs to Flow priority with 16 Verbs priorities. */ +static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, + { 9, 10, 11 }, { 12, 13, 14 }, +}; + +/** + * Discover the number of available flow priorities. + * + * @param dev + * Ethernet device. + * + * @return + * On success, number of available flow priorities. + * On failure, a negative errno-style code and rte_errno is set. + */ +int +mlx5_flow_discover_priorities(struct rte_eth_dev *dev) +{ + static const uint16_t vprio[] = {8, 16}; + const struct mlx5_priv *priv = dev->data->dev_private; + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type; + int ret; + + type = mlx5_flow_os_get_type(); + if (type == MLX5_FLOW_TYPE_MAX) { + type = MLX5_FLOW_TYPE_VERBS; + if (priv->sh->devx && priv->config.dv_flow_en) + type = MLX5_FLOW_TYPE_DV; + } + fops = flow_get_drv_ops(type); + if (fops->discover_priorities == NULL) { + DRV_LOG(ERR, "Priority discovery not supported"); + rte_errno = ENOTSUP; + return -rte_errno; + } + ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio)); + if (ret < 0) + return ret; + switch (ret) { + case 8: + ret = RTE_DIM(priority_map_3); + break; + case 16: + ret = RTE_DIM(priority_map_5); break; default: - return node; + rte_errno = ENOTSUP; + DRV_LOG(ERR, + "port %u maximum priority: %d expected 8/16", + dev->data->port_id, ret); + return -rte_errno; + } + DRV_LOG(INFO, "port %u supported flow priorities:" + " 0-%d for ingress or egress root table," + " 0-%d for non-root table or transfer root table.", + dev->data->port_id, ret - 2, + MLX5_NON_ROOT_FLOW_MAX_PRIO - 1); + return ret; +} + +/** + * Adjust flow priority based on the highest layer and the request priority. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] priority + * The rule base priority. + * @param[in] subpriority + * The priority based on the items. + * + * @return + * The new priority. + */ +uint32_t +mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, + uint32_t subpriority) +{ + uint32_t res = 0; + struct mlx5_priv *priv = dev->data->dev_private; + + switch (priv->sh->flow_max_priority) { + case RTE_DIM(priority_map_3): + res = priority_map_3[priority][subpriority]; + break; + case RTE_DIM(priority_map_5): + res = priority_map_5[priority][subpriority]; + break; } + return res; }