net/enic: avoid error message when no advanced filtering
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index d222914..9904bc5 100644 (file)
@@ -391,13 +391,20 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
                next = NULL;
                missed = 1;
-               for (i = 0; node->next && node->next[i]; ++i) {
+               i = 0;
+               while (node->next && node->next[i]) {
                        next = &graph[node->next[i]];
                        if (next->type == missed_item.type) {
                                flow_items[0].type = missed_item.type;
                                flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
                                break;
                        }
+                       if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
+                               node = next;
+                               i = 0;
+                       } else {
+                               ++i;
+                       }
                        next = NULL;
                }
        }
@@ -1587,6 +1594,58 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
        return 0;
 }
 
+/**
+ * Validate queue numbers for device RSS.
+ *
+ * @param[in] dev
+ *   Configured device.
+ * @param[in] queues
+ *   Array of queue numbers.
+ * @param[in] queues_n
+ *   Size of the @p queues array.
+ * @param[out] error
+ *   On error, filled with a textual error description.
+ * @param[out] queue
+ *   On error, filled with an offending queue index in @p queues array.
+ *
+ * @return
+ *   0 on success, a negative errno code on error.
+ */
+static int
+mlx5_validate_rss_queues(const struct rte_eth_dev *dev,
+                        const uint16_t *queues, uint32_t queues_n,
+                        const char **error, uint32_t *queue_idx)
+{
+       const struct mlx5_priv *priv = dev->data->dev_private;
+       enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
+       uint32_t i;
+
+       for (i = 0; i != queues_n; ++i) {
+               struct mlx5_rxq_ctrl *rxq_ctrl;
+
+               if (queues[i] >= priv->rxqs_n) {
+                       *error = "queue index out of range";
+                       *queue_idx = i;
+                       return -EINVAL;
+               }
+               if (!(*priv->rxqs)[queues[i]]) {
+                       *error =  "queue is not configured";
+                       *queue_idx = i;
+                       return -EINVAL;
+               }
+               rxq_ctrl = container_of((*priv->rxqs)[queues[i]],
+                                       struct mlx5_rxq_ctrl, rxq);
+               if (i == 0)
+                       rxq_type = rxq_ctrl->type;
+               if (rxq_type != rxq_ctrl->type) {
+                       *error = "combining hairpin and regular RSS queues is not supported";
+                       *queue_idx = i;
+                       return -ENOTSUP;
+               }
+       }
+       return 0;
+}
+
 /*
  * Validate the rss action.
  *
@@ -1607,8 +1666,9 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = action->conf;
-       enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
-       unsigned int i;
+       int ret;
+       const char *message;
+       uint32_t queue_idx;
 
        if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
            rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
@@ -1672,27 +1732,12 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          NULL, "No queues configured");
-       for (i = 0; i != rss->queue_num; ++i) {
-               struct mlx5_rxq_ctrl *rxq_ctrl;
-
-               if (rss->queue[i] >= priv->rxqs_n)
-                       return rte_flow_error_set
-                               (error, EINVAL,
-                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i], "queue index out of range");
-               if (!(*priv->rxqs)[rss->queue[i]])
-                       return rte_flow_error_set
-                               (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i], "queue is not configured");
-               rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
-                                       struct mlx5_rxq_ctrl, rxq);
-               if (i == 0)
-                       rxq_type = rxq_ctrl->type;
-               if (rxq_type != rxq_ctrl->type)
-                       return rte_flow_error_set
-                               (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i],
-                                "combining hairpin and regular RSS queues is not supported");
+       ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num,
+                                      &message, &queue_idx);
+       if (ret != 0) {
+               return rte_flow_error_set(error, -ret,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->queue[queue_idx], message);
        }
        return 0;
 }
@@ -3675,8 +3720,11 @@ flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
        uint16_t offset = (age_idx >> 16) & UINT16_MAX;
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
-       struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
+       struct mlx5_aso_age_pool *pool;
 
+       rte_rwlock_read_lock(&mng->resize_rwl);
+       pool = mng->pools[pool_idx];
+       rte_rwlock_read_unlock(&mng->resize_rwl);
        return &pool->actions[offset - 1];
 }
 
@@ -8776,6 +8824,116 @@ mlx5_action_handle_flush(struct rte_eth_dev *dev)
        return ret;
 }
 
+/**
+ * Validate existing indirect actions against current device configuration
+ * and attach them to device resources.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_attach(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_indexed_pool *ipool =
+                       priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
+       struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
+       int ret = 0;
+       uint32_t idx;
+
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+               const char *message;
+               uint32_t queue_idx;
+
+               ret = mlx5_validate_rss_queues(dev, ind_tbl->queues,
+                                              ind_tbl->queues_n,
+                                              &message, &queue_idx);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s",
+                               dev->data->port_id, ind_tbl->queues[queue_idx],
+                               message);
+                       break;
+               }
+       }
+       if (ret != 0)
+               return ret;
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               ret = mlx5_ind_table_obj_attach(dev, ind_tbl);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u could not attach "
+                               "indirection table obj %p",
+                               dev->data->port_id, (void *)ind_tbl);
+                       goto error;
+               }
+       }
+       return 0;
+error:
+       shared_rss_last = shared_rss;
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               if (shared_rss == shared_rss_last)
+                       break;
+               if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)
+                       DRV_LOG(CRIT, "Port %u could not detach "
+                               "indirection table obj %p on rollback",
+                               dev->data->port_id, (void *)ind_tbl);
+       }
+       return ret;
+}
+
+/**
+ * Detach indirect actions of the device from its resources.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_detach(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_indexed_pool *ipool =
+                       priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
+       struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
+       int ret = 0;
+       uint32_t idx;
+
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               ret = mlx5_ind_table_obj_detach(dev, ind_tbl);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u could not detach "
+                               "indirection table obj %p",
+                               dev->data->port_id, (void *)ind_tbl);
+                       goto error;
+               }
+       }
+       return 0;
+error:
+       shared_rss_last = shared_rss;
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               if (shared_rss == shared_rss_last)
+                       break;
+               if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)
+                       DRV_LOG(CRIT, "Port %u could not attach "
+                               "indirection table obj %p on rollback",
+                               dev->data->port_id, (void *)ind_tbl);
+       }
+       return ret;
+}
+
 #ifndef HAVE_MLX5DV_DR
 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
 #else
@@ -9672,17 +9830,119 @@ mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
                const struct mlx5_flow_expand_node *node)
 {
        const struct rte_flow_item *item = pattern + item_idx, *prev_item;
-       switch (item->type) {
-       case RTE_FLOW_ITEM_TYPE_VXLAN:
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN &&
+                       node != NULL &&
+                       node->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+               /*
+                * The expansion node is VXLAN and it is also the last
+                * expandable item in the pattern, so need to continue
+                * expansion of the inner tunnel.
+                */
                MLX5_ASSERT(item_idx > 0);
                prev_item = pattern + item_idx - 1;
                MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP);
                if (mlx5_flow_is_std_vxlan_port(prev_item))
                        return &graph[MLX5_EXPANSION_STD_VXLAN];
-               else
-                       return &graph[MLX5_EXPANSION_L3_VXLAN];
+               return &graph[MLX5_EXPANSION_L3_VXLAN];
+       }
+       return node;
+}
+
+/* Map of Verbs to Flow priority with 8 Verbs priorities. */
+static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
+       { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
+};
+
+/* Map of Verbs to Flow priority with 16 Verbs priorities. */
+static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
+       { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
+       { 9, 10, 11 }, { 12, 13, 14 },
+};
+
+/**
+ * Discover the number of available flow priorities.
+ *
+ * @param dev
+ *   Ethernet device.
+ *
+ * @return
+ *   On success, number of available flow priorities.
+ *   On failure, a negative errno-style code and rte_errno is set.
+ */
+int
+mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
+{
+       static const uint16_t vprio[] = {8, 16};
+       const struct mlx5_priv *priv = dev->data->dev_private;
+       const struct mlx5_flow_driver_ops *fops;
+       enum mlx5_flow_drv_type type;
+       int ret;
+
+       type = mlx5_flow_os_get_type();
+       if (type == MLX5_FLOW_TYPE_MAX) {
+               type = MLX5_FLOW_TYPE_VERBS;
+               if (priv->sh->devx && priv->config.dv_flow_en)
+                       type = MLX5_FLOW_TYPE_DV;
+       }
+       fops = flow_get_drv_ops(type);
+       if (fops->discover_priorities == NULL) {
+               DRV_LOG(ERR, "Priority discovery not supported");
+               rte_errno = ENOTSUP;
+               return -rte_errno;
+       }
+       ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio));
+       if (ret < 0)
+               return ret;
+       switch (ret) {
+       case 8:
+               ret = RTE_DIM(priority_map_3);
+               break;
+       case 16:
+               ret = RTE_DIM(priority_map_5);
                break;
        default:
-               return node;
+               rte_errno = ENOTSUP;
+               DRV_LOG(ERR,
+                       "port %u maximum priority: %d expected 8/16",
+                       dev->data->port_id, ret);
+               return -rte_errno;
+       }
+       DRV_LOG(INFO, "port %u supported flow priorities:"
+               " 0-%d for ingress or egress root table,"
+               " 0-%d for non-root table or transfer root table.",
+               dev->data->port_id, ret - 2,
+               MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
+       return ret;
+}
+
+/**
+ * Adjust flow priority based on the highest layer and the request priority.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] priority
+ *   The rule base priority.
+ * @param[in] subpriority
+ *   The priority based on the items.
+ *
+ * @return
+ *   The new priority.
+ */
+uint32_t
+mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+                         uint32_t subpriority)
+{
+       uint32_t res = 0;
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       switch (priv->sh->flow_max_priority) {
+       case RTE_DIM(priority_map_3):
+               res = priority_map_3[priority][subpriority];
+               break;
+       case RTE_DIM(priority_map_5):
+               res = priority_map_5[priority][subpriority];
+               break;
        }
+       return  res;
 }