X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=98870184d06cf1022f5c1d314d33fe0f3876c9ab;hb=e73e3547ce54d7ae48dff82d87efac0b7a30692a;hp=a8fca11bfcd64ce4b98a0daeaf80cd4bd27e336f;hpb=0c76d1c9a18d855c7d7a38db66ed7b21923c814f;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index a8fca11bfc..98870184d0 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include @@ -30,9 +29,10 @@ #include "mlx5.h" #include "mlx5_defs.h" -#include "mlx5_prm.h" -#include "mlx5_glue.h" #include "mlx5_flow.h" +#include "mlx5_glue.h" +#include "mlx5_prm.h" +#include "mlx5_rxtx.h" /* Dev ops structure defined in mlx5.c */ extern const struct eth_dev_ops mlx5_dev_ops; @@ -42,6 +42,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate; #ifdef HAVE_IBV_FLOW_DV_SUPPORT extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; #endif +extern const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops; extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; @@ -51,6 +52,7 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = { #ifdef HAVE_IBV_FLOW_DV_SUPPORT [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, #endif + [MLX5_FLOW_TYPE_TCF] = &mlx5_flow_tcf_drv_ops, [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops }; @@ -237,7 +239,6 @@ static const struct rte_flow_ops mlx5_flow_ops = { /* Convert FDIR request to Generic flow. */ struct mlx5_fdir { struct rte_flow_attr attr; - struct rte_flow_action actions[2]; struct rte_flow_item items[4]; struct rte_flow_item_eth l2; struct rte_flow_item_eth l2_mask; @@ -257,6 +258,7 @@ struct mlx5_fdir { struct rte_flow_item_udp udp; struct rte_flow_item_tcp tcp; } l4_mask; + struct rte_flow_action actions[2]; struct rte_flow_action_queue queue; }; @@ -273,7 +275,7 @@ static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { /* Tunnel information. */ struct mlx5_flow_tunnel_info { - uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ + uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ }; @@ -292,7 +294,7 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { }, { .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, - .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, }, { .tunnel = MLX5_FLOW_LAYER_MPLS, @@ -313,6 +315,7 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { int mlx5_flow_discover_priorities(struct rte_eth_dev *dev) { + struct mlx5_priv *priv = dev->data->dev_private; struct { struct ibv_flow_attr attr; struct ibv_flow_spec_eth eth; @@ -320,6 +323,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) } flow_attr = { .attr = { .num_of_specs = 2, + .port = (uint8_t)priv->ibv_port, }, .eth = { .type = IBV_FLOW_SPEC_ETH, @@ -348,6 +352,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) claim_zero(mlx5_glue->destroy_flow(flow)); priority = vprio[i]; } + mlx5_hrxq_drop_release(dev); switch (priority) { case 8: priority = RTE_DIM(priority_map_3); @@ -359,10 +364,9 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) rte_errno = ENOTSUP; DRV_LOG(ERR, "port %u verbs maximum priority: %d expected 8/16", - dev->data->port_id, vprio[i]); + dev->data->port_id, priority); return -rte_errno; } - mlx5_hrxq_drop_release(dev); DRV_LOG(INFO, "port %u flow maximum priority: %d", dev->data->port_id, priority); return priority; @@ -385,7 +389,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, uint32_t subpriority) { uint32_t res = 0; - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; switch (priv->config.flow_prio) { case RTE_DIM(priority_map_3): @@ -416,7 +420,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int +int mlx5_flow_item_acceptable(const struct rte_flow_item *item, const uint8_t *mask, const uint8_t *nic_mask, @@ -450,10 +454,10 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, } ret = memcmp(spec, last, size); if (ret != 0) - return rte_flow_error_set(error, ENOTSUP, + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "range is not supported"); + "range is not valid"); } return 0; } @@ -475,7 +479,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, */ uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, - int tunnel __rte_unused, uint32_t layer_types, + int tunnel __rte_unused, uint64_t layer_types, uint64_t hash_fields) { struct rte_flow *flow = dev_flow->flow; @@ -503,7 +507,7 @@ mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, * Rx queue to update. */ static void -mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) +flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) { unsigned int i; uint32_t tunnel_ptype = 0; @@ -523,20 +527,22 @@ mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) } /** - * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow. + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive + * flow. * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] flow - * Pointer to flow structure. + * @param[in] dev_flow + * Pointer to device flow structure. */ static void -mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = dev_flow->flow; const int mark = !!(flow->actions & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; for (i = 0; i != flow->rss.queue_num; ++i) { @@ -554,33 +560,52 @@ mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) /* Increase the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { - if ((tunnels_info[j].tunnel & flow->layers) == + if ((tunnels_info[j].tunnel & + dev_flow->layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]++; break; } } - mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); + flow_rxq_tunnel_ptype_update(rxq_ctrl); } } } +/** + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] flow + * Pointer to flow structure. + */ +static void +flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow *dev_flow; + + LIST_FOREACH(dev_flow, &flow->dev_flows, next) + flow_drv_rxq_flags_set(dev, dev_flow); +} + /** * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the - * @p flow if no other flow uses it with the same kind of request. + * device flow if no other flow uses it with the same kind of request. * * @param dev * Pointer to Ethernet device. - * @param[in] flow - * Pointer to the flow. + * @param[in] dev_flow + * Pointer to the device flow. */ static void -mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = dev_flow->flow; const int mark = !!(flow->actions & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; assert(dev->data->dev_started); @@ -599,17 +624,36 @@ mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) /* Decrease the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { - if ((tunnels_info[j].tunnel & flow->layers) == + if ((tunnels_info[j].tunnel & + dev_flow->layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]--; break; } } - mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); + flow_rxq_tunnel_ptype_update(rxq_ctrl); } } } +/** + * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the + * @p flow if no other flow uses it with the same kind of request. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to the flow. + */ +static void +flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow *dev_flow; + + LIST_FOREACH(dev_flow, &flow->dev_flows, next) + flow_drv_rxq_flags_trim(dev, dev_flow); +} + /** * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. * @@ -617,9 +661,9 @@ mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) * Pointer to Ethernet device. */ static void -mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) +flow_rxq_flags_clear(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { @@ -643,6 +687,8 @@ mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) * * @param[in] action_flags * Bit-fields that holds the actions detected until now. + * @param[in] attr + * Attributes of flow that includes this action. * @param[out] error * Pointer to error structure. * @@ -651,6 +697,7 @@ mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) */ int mlx5_flow_validate_action_flag(uint64_t action_flags, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { @@ -667,6 +714,11 @@ mlx5_flow_validate_action_flag(uint64_t action_flags, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have 2 flag" " actions in same flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "flag action not supported for " + "egress"); return 0; } @@ -677,6 +729,8 @@ mlx5_flow_validate_action_flag(uint64_t action_flags, * Pointer to the queue action. * @param[in] action_flags * Bit-fields that holds the actions detected until now. + * @param[in] attr + * Attributes of flow that includes this action. * @param[out] error * Pointer to error structure. * @@ -686,6 +740,7 @@ mlx5_flow_validate_action_flag(uint64_t action_flags, int mlx5_flow_validate_action_mark(const struct rte_flow_action *action, uint64_t action_flags, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { const struct rte_flow_action_mark *mark = action->conf; @@ -714,6 +769,11 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have 2 mark actions in same" " flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "mark action not supported for " + "egress"); return 0; } @@ -722,14 +782,17 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action, * * @param[in] action_flags * Bit-fields that holds the actions detected until now. + * @param[in] attr + * Attributes of flow that includes this action. * @param[out] error * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_drop(uint64_t action_flags, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { if (action_flags & MLX5_FLOW_ACTION_FLAG) @@ -745,6 +808,11 @@ mlx5_flow_validate_action_drop(uint64_t action_flags, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have 2 fate actions in" " same flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "drop action not supported for " + "egress"); return 0; } @@ -757,19 +825,22 @@ mlx5_flow_validate_action_drop(uint64_t action_flags, * Bit-fields that holds the actions detected until now. * @param[in] dev * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. * @param[out] error * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_queue(const struct rte_flow_action *action, uint64_t action_flags, struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_queue *queue = action->conf; if (action_flags & MLX5_FLOW_FATE_ACTIONS) @@ -777,6 +848,10 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have 2 fate actions in" " same flow"); + if (!priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No Rx queues configured"); if (queue->index >= priv->rxqs_n) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, @@ -787,6 +862,11 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &queue->index, "queue is not configured"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "queue action not supported for " + "egress"); return 0; } @@ -799,20 +879,27 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, * Bit-fields that holds the actions detected until now. * @param[in] dev * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[in] item_flags + * Items that were detected. * @param[out] error * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_rss(const struct rte_flow_action *action, uint64_t action_flags, struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + uint64_t item_flags, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = action->conf; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; if (action_flags & MLX5_FLOW_FATE_ACTIONS) @@ -835,7 +922,13 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->level, "tunnel RSS is not supported"); - if (rss->key_len < MLX5_RSS_HASH_KEY_LEN) + /* allow RSS key_len 0 in case of NULL (default) RSS key. */ + if (rss->key_len == 0 && rss->key != NULL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key length 0"); + if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->key_len, @@ -856,12 +949,30 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, &rss->types, "some RSS protocols are not" " supported"); + if (!priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No Rx queues configured"); + if (!rss->queue_num) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No queues configured"); for (i = 0; i != rss->queue_num; ++i) { if (!(*priv->rxqs)[rss->queue[i]]) return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->queue[i], "queue is not configured"); } + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "rss action not supported for " + "egress"); + if (rss->level > 1 && !tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "inner RSS is not supported for " + "non-tunnel flows"); return 0; } @@ -870,22 +981,24 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, * * @param[in] dev * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. * @param[out] error * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_flow_validate_action_count(struct rte_eth_dev *dev, +mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - - if (!priv->config.flow_counter_en) + if (attr->egress) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "flow counters are not supported."); + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "count action not supported for " + "egress"); return 0; } @@ -908,7 +1021,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, const struct rte_flow_attr *attributes, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; uint32_t priority_max = priv->config.flow_prio - 1; if (attributes->group) @@ -962,15 +1075,13 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item, }; int ret; int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; - if (item_flags & MLX5_FLOW_LAYER_OUTER_L2) + if (item_flags & ethm) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "3 levels of l2 are not supported"); - if ((item_flags & MLX5_FLOW_LAYER_INNER_L2) && !tunnel) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "2 L2 without tunnel are not supported"); + "multiple L2 layers not supported"); if (!mask) mask = &rte_flow_item_eth_mask; ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, @@ -995,7 +1106,7 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item, */ int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, - int64_t item_flags, + uint64_t item_flags, struct rte_flow_error *error) { const struct rte_flow_item_vlan *spec = item->spec; @@ -1007,17 +1118,17 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, uint16_t vlan_tag = 0; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int ret; - const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | + const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | MLX5_FLOW_LAYER_INNER_L4) : (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4); - const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : MLX5_FLOW_LAYER_OUTER_VLAN; if (item_flags & vlanm) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "VLAN layer already configured"); + "multiple VLAN layers not supported"); else if ((item_flags & l34m) != 0) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1053,6 +1164,9 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. * @param[out] error * Pointer to error structure. * @@ -1061,7 +1175,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, */ int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, - int64_t item_flags, + uint64_t item_flags, + const struct rte_flow_item_ipv4 *acc_mask, struct rte_flow_error *error) { const struct rte_flow_item_ipv4 *mask = item->mask; @@ -1074,22 +1189,31 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, }, }; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; int ret; - if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : - MLX5_FLOW_LAYER_OUTER_L3)) + if (item_flags & l3m) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L3 layers not supported"); - else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : - MLX5_FLOW_LAYER_OUTER_L4)) + else if (item_flags & l4m) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 cannot follow an L4 layer."); if (!mask) mask = &rte_flow_item_ipv4_mask; + else if (mask->hdr.next_proto_id != 0 && + mask->hdr.next_proto_id != 0xff) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, + "partial mask is not supported" + " for protocol"); ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_ipv4), error); if (ret < 0) @@ -1104,6 +1228,9 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. * @param[out] error * Pointer to error structure. * @@ -1113,6 +1240,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, uint64_t item_flags, + const struct rte_flow_item_ipv6 *acc_mask, struct rte_flow_error *error) { const struct rte_flow_item_ipv6 *mask = item->mask; @@ -1130,33 +1258,25 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, }, }; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; int ret; - if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : - MLX5_FLOW_LAYER_OUTER_L3)) + if (item_flags & l3m) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L3 layers not supported"); - else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : - MLX5_FLOW_LAYER_OUTER_L4)) + else if (item_flags & l4m) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 cannot follow an L4 layer."); - /* - * IPv6 is not recognised by the NIC inside a GRE tunnel. - * Such support has to be disabled as the rule will be - * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and - * Mellanox OFED 4.4-1.0.0.0. - */ - if (tunnel && item_flags & MLX5_FLOW_LAYER_GRE) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "IPv6 inside a GRE tunnel is" - " not recognised."); if (!mask) mask = &rte_flow_item_ipv6_mask; ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_ipv6), error); if (ret < 0) @@ -1173,6 +1293,8 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, * Bit-fields that holds the items detected until now. * @param[in] target_protocol * The next protocol in the previous item. + * @param[in] flow_mask + * mlx5 flow-specific (TCF, DV, verbs, etc.) supported header fields mask. * @param[out] error * Pointer to error structure. * @@ -1187,6 +1309,10 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item, { const struct rte_flow_item_udp *mask = item->mask; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; int ret; if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) @@ -1194,16 +1320,14 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item, RTE_FLOW_ERROR_TYPE_ITEM, item, "protocol filtering not compatible" " with UDP layer"); - if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : - MLX5_FLOW_LAYER_OUTER_L3))) + if (!(item_flags & l3m)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 is mandatory to filter on L4"); - if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : - MLX5_FLOW_LAYER_OUTER_L4)) + if (item_flags & l4m) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "L4 layer is already present"); + "multiple L4 layers not supported"); if (!mask) mask = &rte_flow_item_udp_mask; ret = mlx5_flow_item_acceptable @@ -1234,32 +1358,36 @@ int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, uint64_t item_flags, uint8_t target_protocol, + const struct rte_flow_item_tcp *flow_mask, struct rte_flow_error *error) { const struct rte_flow_item_tcp *mask = item->mask; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; int ret; + assert(flow_mask); if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "protocol filtering not compatible" " with TCP layer"); - if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : - MLX5_FLOW_LAYER_OUTER_L3))) + if (!(item_flags & l3m)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 is mandatory to filter on L4"); - if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : - MLX5_FLOW_LAYER_OUTER_L4)) + if (item_flags & l4m) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "L4 layer is already present"); + "multiple L4 layers not supported"); if (!mask) mask = &rte_flow_item_tcp_mask; ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, - (const uint8_t *)&rte_flow_item_tcp_mask, + (const uint8_t *)flow_mask, sizeof(struct rte_flow_item_tcp), error); if (ret < 0) return ret; @@ -1299,7 +1427,8 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, if (item_flags & MLX5_FLOW_LAYER_TUNNEL) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "a tunnel is already present"); + "multiple tunnel layers not" + " supported"); /* * Verify only UDPv4 is present as defined in * https://tools.ietf.org/html/rfc7348 @@ -1366,7 +1495,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, struct rte_eth_dev *dev, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_item_vxlan_gpe *spec = item->spec; const struct rte_flow_item_vxlan_gpe *mask = item->mask; int ret; @@ -1385,7 +1514,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, if (item_flags & MLX5_FLOW_LAYER_TUNNEL) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "a tunnel is already present"); + "multiple tunnel layers not" + " supported"); /* * Verify only UDPv4 is present as defined in * https://tools.ietf.org/html/rfc7348 @@ -1468,7 +1598,8 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item, if (item_flags & MLX5_FLOW_LAYER_TUNNEL) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "a tunnel is already present"); + "multiple tunnel layers not" + " supported"); if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1495,12 +1626,14 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item, /** * Validate MPLS item. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] item * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. - * @param[in] target_protocol - * The next protocol in the previous item. + * @param[in] prev_layer + * The protocol layer indicated in previous item. * @param[out] error * Pointer to error structure. * @@ -1508,25 +1641,38 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item, * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused, +mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_item *item __rte_unused, uint64_t item_flags __rte_unused, - uint8_t target_protocol __rte_unused, + uint64_t prev_layer __rte_unused, struct rte_flow_error *error) { #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT const struct rte_flow_item_mpls *mask = item->mask; + struct mlx5_priv *priv = dev->data->dev_private; int ret; - if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS) + if (!priv->config.mpls_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "MPLS not supported or" + " disabled in firmware" + " configuration."); + /* MPLS over IP, UDP, GRE is allowed */ + if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | + MLX5_FLOW_LAYER_OUTER_L4_UDP | + MLX5_FLOW_LAYER_GRE))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "protocol filtering not compatible" " with MPLS layer"); - if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ + if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && + !(item_flags & MLX5_FLOW_LAYER_GRE)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "a tunnel is already" - " present"); + "multiple tunnel layers not" + " supported"); if (!mask) mask = &rte_flow_item_mpls_mask; ret = mlx5_flow_item_acceptable @@ -1558,8 +1704,6 @@ static struct mlx5_flow * flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, - uint64_t *item_flags __rte_unused, - uint64_t *action_flags __rte_unused, struct rte_flow_error *error __rte_unused) { rte_errno = ENOTSUP; @@ -1599,6 +1743,17 @@ flow_null_destroy(struct rte_eth_dev *dev __rte_unused, { } +static int +flow_null_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + const struct rte_flow_action *actions __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + rte_errno = ENOTSUP; + return -rte_errno; +} + /* Void driver to protect from null pointer reference. */ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { .validate = flow_null_validate, @@ -1607,6 +1762,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { .apply = flow_null_apply, .remove = flow_null_remove, .destroy = flow_null_destroy, + .query = flow_null_query, }; /** @@ -1619,23 +1775,19 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { * Pointer to the flow attributes. * * @return - * flow driver type if supported, MLX5_FLOW_TYPE_MAX otherwise. + * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. */ static enum mlx5_flow_drv_type -flow_get_drv_type(struct rte_eth_dev *dev __rte_unused, - const struct rte_flow_attr *attr) +flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) { - struct priv *priv __rte_unused = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; - if (!attr->transfer) { -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : - MLX5_FLOW_TYPE_VERBS; -#else - type = MLX5_FLOW_TYPE_VERBS; -#endif - } + if (attr->transfer && !priv->config.dv_esw_en) + type = MLX5_FLOW_TYPE_TCF; + else + type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : + MLX5_FLOW_TYPE_VERBS; return type; } @@ -1657,7 +1809,7 @@ flow_get_drv_type(struct rte_eth_dev *dev __rte_unused, * Pointer to the error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static inline int flow_drv_validate(struct rte_eth_dev *dev, @@ -1679,29 +1831,30 @@ flow_drv_validate(struct rte_eth_dev *dev, * calculates the size of memory required for device flow, allocates the memory, * initializes the device flow and returns the pointer. * + * @note + * This function initializes device flow structure such as dv, tcf or verbs in + * struct mlx5_flow. However, it is caller's responsibility to initialize the + * rest. For example, adding returning device flow to flow->dev_flow list and + * setting backward reference to the flow should be done out of this function. + * layers field is not filled either. + * * @param[in] attr * Pointer to the flow attributes. * @param[in] items * Pointer to the list of items. * @param[in] actions * Pointer to the list of actions. - * @param[out] item_flags - * Pointer to bit mask of all items detected. - * @param[out] action_flags - * Pointer to bit mask of all actions detected. * @param[out] error * Pointer to the error structure. * * @return - * Pointer to device flow on success, otherwise NULL and rte_ernno is set. + * Pointer to device flow on success, otherwise NULL and rte_errno is set. */ static inline struct mlx5_flow * -flow_drv_prepare(struct rte_flow *flow, +flow_drv_prepare(const struct rte_flow *flow, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - uint64_t *item_flags, - uint64_t *action_flags, struct rte_flow_error *error) { const struct mlx5_flow_driver_ops *fops; @@ -1709,8 +1862,7 @@ flow_drv_prepare(struct rte_flow *flow, assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); - return fops->prepare(attr, items, actions, item_flags, action_flags, - error); + return fops->prepare(attr, items, actions, error); } /** @@ -1719,6 +1871,12 @@ flow_drv_prepare(struct rte_flow *flow, * translates a generic flow into a driver flow. flow_drv_prepare() must * precede. * + * @note + * dev_flow->layers could be filled as a result of parsing during translation + * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled + * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, + * flow->actions could be overwritten even though all the expanded dev_flows + * have the same actions. * * @param[in] dev * Pointer to the rte dev structure. @@ -1734,7 +1892,7 @@ flow_drv_prepare(struct rte_flow *flow, * Pointer to the error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static inline int flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, @@ -1782,7 +1940,7 @@ flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, * Flow driver remove API. This abstracts calling driver specific functions. * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow * on device. All the resources of the flow should be freed by calling - * flow_dv_destroy(). + * flow_drv_destroy(). * * @param[in] dev * Pointer to Ethernet device. @@ -1852,7 +2010,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev, * Pointer to the RSS action if exist, else return NULL. */ static const struct rte_flow_action_rss* -mlx5_flow_get_rss_action(const struct rte_flow_action actions[]) +flow_get_rss_action(const struct rte_flow_action actions[]) { for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -1867,7 +2025,7 @@ mlx5_flow_get_rss_action(const struct rte_flow_action actions[]) } static unsigned int -mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) { const struct rte_flow_item *item; unsigned int has_vlan = 0; @@ -1905,17 +2063,14 @@ mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) * A flow on success, NULL otherwise and rte_errno is set. */ static struct rte_flow * -mlx5_flow_list_create(struct rte_eth_dev *dev, - struct mlx5_flows *list, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct rte_flow *flow = NULL; struct mlx5_flow *dev_flow; - uint64_t action_flags = 0; - uint64_t item_flags = 0; const struct rte_flow_action_rss *rss; union { struct rte_flow_expand_rss buf; @@ -1930,7 +2085,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, if (ret < 0) return NULL; flow_size = sizeof(struct rte_flow); - rss = mlx5_flow_get_rss_action(actions); + rss = flow_get_rss_action(actions); if (rss) flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), sizeof(void *)); @@ -1938,6 +2093,8 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); flow = rte_calloc(__func__, 1, flow_size, 0); flow->drv_type = flow_get_drv_type(dev, attr); + flow->ingress = attr->ingress; + flow->transfer = attr->transfer; assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); flow->queue = (void *)(flow + 1); @@ -1945,7 +2102,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, if (rss && rss->types) { unsigned int graph_root; - graph_root = mlx5_find_graph_root(items, rss->level); + graph_root = find_graph_root(items, rss->level); ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), items, rss->types, mlx5_support_expansion, @@ -1958,8 +2115,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, } for (i = 0; i < buf->entries; ++i) { dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern, - actions, &item_flags, &action_flags, - error); + actions, error); if (!dev_flow) goto error; dev_flow->flow = flow; @@ -1976,7 +2132,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, goto error; } TAILQ_INSERT_TAIL(list, flow, next); - mlx5_flow_rxq_flags_set(dev, flow); + flow_rxq_flags_set(dev, flow); return flow; error: ret = rte_errno; /* Save rte_errno before cleanup. */ @@ -2000,9 +2156,10 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - return mlx5_flow_list_create - (dev, &((struct priv *)dev->data->dev_private)->flows, - attr, items, actions, error); + struct mlx5_priv *priv = (struct mlx5_priv *)dev->data->dev_private; + + return flow_list_create(dev, &priv->flows, + attr, items, actions, error); } /** @@ -2016,17 +2173,18 @@ mlx5_flow_create(struct rte_eth_dev *dev, * Flow to destroy. */ static void -mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, - struct rte_flow *flow) +flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, + struct rte_flow *flow) { - flow_drv_destroy(dev, flow); - TAILQ_REMOVE(list, flow, next); /* * Update RX queue flags only if port is started, otherwise it is * already clean. */ if (dev->data->dev_started) - mlx5_flow_rxq_flags_trim(dev, flow); + flow_rxq_flags_trim(dev, flow); + flow_drv_destroy(dev, flow); + TAILQ_REMOVE(list, flow, next); + rte_free(flow->fdir); rte_free(flow); } @@ -2045,7 +2203,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) struct rte_flow *flow; flow = TAILQ_FIRST(list); - mlx5_flow_list_destroy(dev, list, flow); + flow_list_destroy(dev, list, flow); } } @@ -2064,7 +2222,7 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) flow_drv_remove(dev, flow); - mlx5_flow_rxq_flags_clear(dev); + flow_rxq_flags_clear(dev); } /** @@ -2089,7 +2247,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) ret = flow_drv_apply(dev, flow, &error); if (ret < 0) goto error; - mlx5_flow_rxq_flags_set(dev, flow); + flow_rxq_flags_set(dev, flow); } return 0; error: @@ -2110,7 +2268,7 @@ error: int mlx5_flow_verify(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow; int ret = 0; @@ -2146,7 +2304,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow_item_vlan *vlan_spec, struct rte_flow_item_vlan *vlan_mask) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .ingress = 1, .priority = MLX5_FLOW_PRIO_RSVD, @@ -2192,14 +2350,13 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow_error error; unsigned int i; - if (!priv->reta_idx_n) { - rte_errno = EINVAL; - return -rte_errno; + if (!priv->reta_idx_n || !priv->rxqs_n) { + return 0; } for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; - flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, - actions, &error); + flow = flow_list_create(dev, &priv->ctrl_flows, + &attr, items, actions, &error); if (!flow) return -rte_errno; return 0; @@ -2237,9 +2394,9 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; - mlx5_flow_list_destroy(dev, &priv->flows, flow); + flow_list_destroy(dev, &priv->flows, flow); return 0; } @@ -2253,7 +2410,7 @@ int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->flows); return 0; @@ -2270,7 +2427,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; if (dev->data->dev_started) { rte_flow_error_set(error, EBUSY, @@ -2288,92 +2445,45 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, } /** - * Query flow counter. - * - * @param flow - * Pointer to the flow. + * Query a flow. * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * @see rte_flow_query() + * @see rte_flow_ops */ static int -mlx5_flow_query_count(struct rte_flow *flow __rte_unused, - void *data __rte_unused, - struct rte_flow_error *error) -{ -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - if (flow->actions & MLX5_FLOW_ACTION_COUNT) { - struct rte_flow_query_count *qc = data; - uint64_t counters[2] = {0, 0}; - struct ibv_query_counter_set_attr query_cs_attr = { - .cs = flow->counter->cs, - .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, - }; - struct ibv_counter_set_data query_out = { - .out = counters, - .outlen = 2 * sizeof(uint64_t), - }; - int err = mlx5_glue->query_counter_set(&query_cs_attr, - &query_out); +flow_drv_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type ftype = flow->drv_type; - if (err) - return rte_flow_error_set - (error, err, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot read counter"); - qc->hits_set = 1; - qc->bytes_set = 1; - qc->hits = counters[0] - flow->counter->hits; - qc->bytes = counters[1] - flow->counter->bytes; - if (qc->reset) { - flow->counter->hits = counters[0]; - flow->counter->bytes = counters[1]; - } - return 0; - } - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "flow does not have counter"); -#endif - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "counters are not available"); + assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(ftype); + + return fops->query(dev, flow, actions, data, error); } /** - * Query a flows. + * Query a flow. * * @see rte_flow_query() * @see rte_flow_ops */ int -mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, +mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, const struct rte_flow_action *actions, void *data, struct rte_flow_error *error) { - int ret = 0; + int ret; - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { - switch (actions->type) { - case RTE_FLOW_ACTION_TYPE_VOID: - break; - case RTE_FLOW_ACTION_TYPE_COUNT: - ret = mlx5_flow_query_count(flow, data, error); - break; - default: - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "action not supported"); - } - if (ret < 0) - return ret; - } + ret = flow_drv_query(dev, flow, actions, data, error); + if (ret < 0) + return ret; return 0; } @@ -2391,11 +2501,11 @@ mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_filter_convert(struct rte_eth_dev *dev, +flow_fdir_filter_convert(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, struct mlx5_fdir *attributes) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_eth_fdir_input *input = &fdir_filter->input; const struct rte_eth_fdir_masks *mask = &dev->data->dev_conf.fdir_conf.mask; @@ -2438,13 +2548,13 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ + attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ .src_addr = input->flow.ip4_flow.src_ip, .dst_addr = input->flow.ip4_flow.dst_ip, .time_to_live = input->flow.ip4_flow.ttl, .type_of_service = input->flow.ip4_flow.tos, }; - attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){ + attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ .src_addr = mask->ipv4_mask.src_ip, .dst_addr = mask->ipv4_mask.dst_ip, .time_to_live = mask->ipv4_mask.ttl, @@ -2460,7 +2570,7 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ + attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ .hop_limits = input->flow.ipv6_flow.hop_limits, .proto = input->flow.ipv6_flow.proto, }; @@ -2492,11 +2602,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, /* Handle L4. */ switch (fdir_filter->input.flow_type) { case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: - attributes->l4.udp.hdr = (struct udp_hdr){ + attributes->l4.udp.hdr = (struct rte_udp_hdr){ .src_port = input->flow.udp4_flow.src_port, .dst_port = input->flow.udp4_flow.dst_port, }; - attributes->l4_mask.udp.hdr = (struct udp_hdr){ + attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2507,11 +2617,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: - attributes->l4.tcp.hdr = (struct tcp_hdr){ + attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ .src_port = input->flow.tcp4_flow.src_port, .dst_port = input->flow.tcp4_flow.dst_port, }; - attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2522,11 +2632,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: - attributes->l4.udp.hdr = (struct udp_hdr){ + attributes->l4.udp.hdr = (struct rte_udp_hdr){ .src_port = input->flow.udp6_flow.src_port, .dst_port = input->flow.udp6_flow.dst_port, }; - attributes->l4_mask.udp.hdr = (struct udp_hdr){ + attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2537,11 +2647,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: - attributes->l4.tcp.hdr = (struct tcp_hdr){ + attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ .src_port = input->flow.tcp6_flow.src_port, .dst_port = input->flow.tcp6_flow.dst_port, }; - attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2563,6 +2673,69 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, return 0; } +#define FLOW_FDIR_CMP(f1, f2, fld) \ + memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) + +/** + * Compare two FDIR flows. If items and actions are identical, the two flows are + * regarded as same. + * + * @param dev + * Pointer to Ethernet device. + * @param f1 + * FDIR flow to compare. + * @param f2 + * FDIR flow to compare. + * + * @return + * Zero on match, 1 otherwise. + */ +static int +flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) +{ + if (FLOW_FDIR_CMP(f1, f2, attr) || + FLOW_FDIR_CMP(f1, f2, l2) || + FLOW_FDIR_CMP(f1, f2, l2_mask) || + FLOW_FDIR_CMP(f1, f2, l3) || + FLOW_FDIR_CMP(f1, f2, l3_mask) || + FLOW_FDIR_CMP(f1, f2, l4) || + FLOW_FDIR_CMP(f1, f2, l4_mask) || + FLOW_FDIR_CMP(f1, f2, actions[0].type)) + return 1; + if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && + FLOW_FDIR_CMP(f1, f2, queue)) + return 1; + return 0; +} + +/** + * Search device flow list to find out a matched FDIR flow. + * + * @param dev + * Pointer to Ethernet device. + * @param fdir_flow + * FDIR flow to lookup. + * + * @return + * Pointer of flow if found, NULL otherwise. + */ +static struct rte_flow * +flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = NULL; + + assert(fdir_flow); + TAILQ_FOREACH(flow, &priv->flows, next) { + if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { + DRV_LOG(DEBUG, "port %u found FDIR flow %p", + dev->data->port_id, (void *)flow); + break; + } + } + return flow; +} + /** * Add new flow director filter and store it in list. * @@ -2575,33 +2748,38 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_filter_add(struct rte_eth_dev *dev, +flow_fdir_filter_add(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { - struct priv *priv = dev->data->dev_private; - struct mlx5_fdir attributes = { - .attr.group = 0, - .l2_mask = { - .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", - .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", - .type = 0, - }, - }; - struct rte_flow_error error; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_fdir *fdir_flow; struct rte_flow *flow; int ret; - ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); + fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); + if (!fdir_flow) { + rte_errno = ENOMEM; + return -rte_errno; + } + ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); if (ret) - return ret; - flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, - attributes.items, attributes.actions, - &error); + goto error; + flow = flow_fdir_filter_lookup(dev, fdir_flow); if (flow) { - DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id, - (void *)flow); - return 0; + rte_errno = EEXIST; + goto error; } + flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, + fdir_flow->items, fdir_flow->actions, NULL); + if (!flow) + goto error; + assert(!flow->fdir); + flow->fdir = fdir_flow; + DRV_LOG(DEBUG, "port %u created FDIR flow %p", + dev->data->port_id, (void *)flow); + return 0; +error: + rte_free(fdir_flow); return -rte_errno; } @@ -2617,12 +2795,28 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused, - const struct rte_eth_fdir_filter *fdir_filter - __rte_unused) +flow_fdir_filter_delete(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter) { - rte_errno = ENOTSUP; - return -rte_errno; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow; + struct mlx5_fdir fdir_flow = { + .attr.group = 0, + }; + int ret; + + ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); + if (ret) + return -rte_errno; + flow = flow_fdir_filter_lookup(dev, &fdir_flow); + if (!flow) { + rte_errno = ENOENT; + return -rte_errno; + } + flow_list_destroy(dev, &priv->flows, flow); + DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", + dev->data->port_id, (void *)flow); + return 0; } /** @@ -2637,15 +2831,15 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_filter_update(struct rte_eth_dev *dev, +flow_fdir_filter_update(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { int ret; - ret = mlx5_fdir_filter_delete(dev, fdir_filter); + ret = flow_fdir_filter_delete(dev, fdir_filter); if (ret) return ret; - return mlx5_fdir_filter_add(dev, fdir_filter); + return flow_fdir_filter_add(dev, fdir_filter); } /** @@ -2655,9 +2849,9 @@ mlx5_fdir_filter_update(struct rte_eth_dev *dev, * Pointer to Ethernet device. */ static void -mlx5_fdir_filter_flush(struct rte_eth_dev *dev) +flow_fdir_filter_flush(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->flows); } @@ -2671,7 +2865,7 @@ mlx5_fdir_filter_flush(struct rte_eth_dev *dev) * Resulting flow director information. */ static void -mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) +flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) { struct rte_eth_fdir_masks *mask = &dev->data->dev_conf.fdir_conf.mask; @@ -2701,7 +2895,7 @@ mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, +flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { enum rte_fdir_mode fdir_mode = @@ -2718,16 +2912,16 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, } switch (filter_op) { case RTE_ETH_FILTER_ADD: - return mlx5_fdir_filter_add(dev, arg); + return flow_fdir_filter_add(dev, arg); case RTE_ETH_FILTER_UPDATE: - return mlx5_fdir_filter_update(dev, arg); + return flow_fdir_filter_update(dev, arg); case RTE_ETH_FILTER_DELETE: - return mlx5_fdir_filter_delete(dev, arg); + return flow_fdir_filter_delete(dev, arg); case RTE_ETH_FILTER_FLUSH: - mlx5_fdir_filter_flush(dev); + flow_fdir_filter_flush(dev); break; case RTE_ETH_FILTER_INFO: - mlx5_fdir_info_get(dev, arg); + flow_fdir_info_get(dev, arg); break; default: DRV_LOG(DEBUG, "port %u unknown operation %u", @@ -2768,7 +2962,7 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - return mlx5_fdir_ctrl_func(dev, filter_op, arg); + return flow_fdir_ctrl_func(dev, filter_op, arg); default: DRV_LOG(ERR, "port %u filter type (%d) not supported", dev->data->port_id, filter_type);