net/enic: avoid error message when no advanced filtering
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 5d19ef1..9904bc5 100644 (file)
@@ -391,13 +391,20 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
                next = NULL;
                missed = 1;
-               for (i = 0; node->next && node->next[i]; ++i) {
+               i = 0;
+               while (node->next && node->next[i]) {
                        next = &graph[node->next[i]];
                        if (next->type == missed_item.type) {
                                flow_items[0].type = missed_item.type;
                                flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
                                break;
                        }
+                       if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
+                               node = next;
+                               i = 0;
+                       } else {
+                               ++i;
+                       }
                        next = NULL;
                }
        }
@@ -914,7 +921,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "invalid tag id");
-               if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
+               if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "unsupported tag id");
@@ -924,21 +931,21 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                 * If the available index REG_C_y >= REG_C_x, skip the
                 * color register.
                 */
-               if (skip_mtr_reg && config->flow_mreg_c
+               if (skip_mtr_reg && priv->sh->flow_mreg_c
                    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
                        if (id >= (uint32_t)(REG_C_7 - start_reg))
                                return rte_flow_error_set(error, EINVAL,
                                                       RTE_FLOW_ERROR_TYPE_ITEM,
                                                        NULL, "invalid tag id");
-                       if (config->flow_mreg_c
+                       if (priv->sh->flow_mreg_c
                            [id + 1 + start_reg - REG_C_0] != REG_NON)
-                               return config->flow_mreg_c
+                               return priv->sh->flow_mreg_c
                                               [id + 1 + start_reg - REG_C_0];
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "unsupported tag id");
                }
-               return config->flow_mreg_c[id + start_reg - REG_C_0];
+               return priv->sh->flow_mreg_c[id + start_reg - REG_C_0];
        }
        MLX5_ASSERT(false);
        return rte_flow_error_set(error, EINVAL,
@@ -959,7 +966,6 @@ bool
 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
 
        /*
         * Having available reg_c can be regarded inclusively as supporting
@@ -969,7 +975,7 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
         * - reg_c's are preserved across different domain (FDB and NIC) on
         *   packet loopback by flow lookup miss.
         */
-       return config->flow_mreg_c[2] != REG_NON;
+       return priv->sh->flow_mreg_c[2] != REG_NON;
 }
 
 /**
@@ -990,7 +996,7 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
 
        if (!attr->group && !attr->transfer)
-               return priv->config.flow_prio - 2;
+               return priv->sh->flow_max_priority - 2;
        return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
 }
 
@@ -1018,11 +1024,11 @@ mlx5_get_matcher_priority(struct rte_eth_dev *dev,
 
        if (!attr->group && !attr->transfer) {
                if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
-                       priority = priv->config.flow_prio - 1;
+                       priority = priv->sh->flow_max_priority - 1;
                return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
        } else if (!external && attr->transfer && attr->group == 0 &&
                   attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
-               return (priv->config.flow_prio - 1) * 3;
+               return (priv->sh->flow_max_priority - 1) * 3;
        }
        if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
                priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
@@ -1588,6 +1594,58 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
        return 0;
 }
 
+/**
+ * Validate queue numbers for device RSS.
+ *
+ * @param[in] dev
+ *   Configured device.
+ * @param[in] queues
+ *   Array of queue numbers.
+ * @param[in] queues_n
+ *   Size of the @p queues array.
+ * @param[out] error
+ *   On error, filled with a textual error description.
+ * @param[out] queue
+ *   On error, filled with an offending queue index in @p queues array.
+ *
+ * @return
+ *   0 on success, a negative errno code on error.
+ */
+static int
+mlx5_validate_rss_queues(const struct rte_eth_dev *dev,
+                        const uint16_t *queues, uint32_t queues_n,
+                        const char **error, uint32_t *queue_idx)
+{
+       const struct mlx5_priv *priv = dev->data->dev_private;
+       enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
+       uint32_t i;
+
+       for (i = 0; i != queues_n; ++i) {
+               struct mlx5_rxq_ctrl *rxq_ctrl;
+
+               if (queues[i] >= priv->rxqs_n) {
+                       *error = "queue index out of range";
+                       *queue_idx = i;
+                       return -EINVAL;
+               }
+               if (!(*priv->rxqs)[queues[i]]) {
+                       *error =  "queue is not configured";
+                       *queue_idx = i;
+                       return -EINVAL;
+               }
+               rxq_ctrl = container_of((*priv->rxqs)[queues[i]],
+                                       struct mlx5_rxq_ctrl, rxq);
+               if (i == 0)
+                       rxq_type = rxq_ctrl->type;
+               if (rxq_type != rxq_ctrl->type) {
+                       *error = "combining hairpin and regular RSS queues is not supported";
+                       *queue_idx = i;
+                       return -ENOTSUP;
+               }
+       }
+       return 0;
+}
+
 /*
  * Validate the rss action.
  *
@@ -1608,8 +1666,9 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = action->conf;
-       enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
-       unsigned int i;
+       int ret;
+       const char *message;
+       uint32_t queue_idx;
 
        if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
            rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
@@ -1673,27 +1732,12 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          NULL, "No queues configured");
-       for (i = 0; i != rss->queue_num; ++i) {
-               struct mlx5_rxq_ctrl *rxq_ctrl;
-
-               if (rss->queue[i] >= priv->rxqs_n)
-                       return rte_flow_error_set
-                               (error, EINVAL,
-                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i], "queue index out of range");
-               if (!(*priv->rxqs)[rss->queue[i]])
-                       return rte_flow_error_set
-                               (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i], "queue is not configured");
-               rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
-                                       struct mlx5_rxq_ctrl, rxq);
-               if (i == 0)
-                       rxq_type = rxq_ctrl->type;
-               if (rxq_type != rxq_ctrl->type)
-                       return rte_flow_error_set
-                               (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i],
-                                "combining hairpin and regular RSS queues is not supported");
+       ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num,
+                                      &message, &queue_idx);
+       if (ret != 0) {
+               return rte_flow_error_set(error, -ret,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->queue[queue_idx], message);
        }
        return 0;
 }
@@ -1877,7 +1921,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
                              struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t priority_max = priv->config.flow_prio - 1;
+       uint32_t priority_max = priv->sh->flow_max_priority - 1;
 
        if (attributes->group)
                return rte_flow_error_set(error, ENOTSUP,
@@ -3676,8 +3720,11 @@ flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
        uint16_t offset = (age_idx >> 16) & UINT16_MAX;
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
-       struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
+       struct mlx5_aso_age_pool *pool;
 
+       rte_rwlock_read_lock(&mng->resize_rwl);
+       pool = mng->pools[pool_idx];
+       rte_rwlock_read_unlock(&mng->resize_rwl);
        return &pool->actions[offset - 1];
 }
 
@@ -8112,13 +8159,12 @@ int
 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
        enum modify_reg idx;
        int n = 0;
 
        /* reg_c[0] and reg_c[1] are reserved. */
-       config->flow_mreg_c[n++] = REG_C_0;
-       config->flow_mreg_c[n++] = REG_C_1;
+       priv->sh->flow_mreg_c[n++] = REG_C_0;
+       priv->sh->flow_mreg_c[n++] = REG_C_1;
        /* Discover availability of other reg_c's. */
        for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
                struct rte_flow_attr attr = {
@@ -8154,7 +8200,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                struct rte_flow *flow;
                struct rte_flow_error error;
 
-               if (!config->dv_flow_en)
+               if (!priv->config.dv_flow_en)
                        break;
                /* Create internal flow, validation skips copy action. */
                flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
@@ -8163,17 +8209,18 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                                      flow_idx);
                if (!flow)
                        continue;
-               config->flow_mreg_c[n++] = idx;
+               priv->sh->flow_mreg_c[n++] = idx;
                flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
        }
        for (; n < MLX5_MREG_C_NUM; ++n)
-               config->flow_mreg_c[n] = REG_NON;
+               priv->sh->flow_mreg_c[n] = REG_NON;
+       priv->sh->metadata_regc_check_flag = 1;
        return 0;
 }
 
 int
 save_dump_file(const uint8_t *data, uint32_t size,
-       uint32_t type, uint32_t id, void *arg, FILE *file)
+       uint32_t type, uint64_t id, void *arg, FILE *file)
 {
        char line[BUF_SIZE];
        uint32_t out = 0;
@@ -8185,17 +8232,18 @@ save_dump_file(const uint8_t *data, uint32_t size,
        switch (type) {
        case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR:
                actions_num = *(uint32_t *)(arg);
-               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,%d,",
+               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,",
                                type, id, actions_num);
                break;
        case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT:
-               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,",
+               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",",
                                type, id);
                break;
        case DR_DUMP_REC_TYPE_PMD_COUNTER:
                count = (struct rte_flow_query_count *)arg;
-               fprintf(file, "%d,0x%x,%" PRIu64 ",%" PRIu64 "\n", type,
-                               id, count->hits, count->bytes);
+               fprintf(file,
+                       "%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n",
+                       type, id, count->hits, count->bytes);
                return 0;
        default:
                return -1;
@@ -8269,30 +8317,34 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
        uint32_t actions_num;
        const uint8_t *data;
        size_t size;
-       uint32_t id;
+       uint64_t id;
        uint32_t type;
+       void *action = NULL;
 
        if (!flow) {
                return rte_flow_error_set(error, ENOENT,
-                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                       NULL,
-                       "invalid flow handle");
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "invalid flow handle");
        }
        handle_idx = flow->dev_handles;
        while (handle_idx) {
                dh = mlx5_ipool_get(priv->sh->ipool
-                       [MLX5_IPOOL_MLX5_FLOW], handle_idx);
+                               [MLX5_IPOOL_MLX5_FLOW], handle_idx);
                if (!dh)
                        continue;
                handle_idx = dh->next.next;
-               id = (uint32_t)(uintptr_t)dh->drv_flow;
 
                /* query counter */
                type = DR_DUMP_REC_TYPE_PMD_COUNTER;
-               if (!mlx5_flow_query_counter(dev, flow, &count, error))
-                       save_dump_file(NULL, 0, type,
-                                       id, (void *)&count, file);
-
+               flow_dv_query_count_ptr(dev, flow->counter,
+                                               &action, error);
+               if (action) {
+                       id = (uint64_t)(uintptr_t)action;
+                       if (!mlx5_flow_query_counter(dev, flow, &count, error))
+                               save_dump_file(NULL, 0, type,
+                                               id, (void *)&count, file);
+               }
                /* Get modify_hdr and encap_decap buf from ipools. */
                encap_decap = NULL;
                modify_hdr = dh->dvh.modify_hdr;
@@ -8305,14 +8357,16 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
                if (modify_hdr) {
                        data = (const uint8_t *)modify_hdr->actions;
                        size = (size_t)(modify_hdr->actions_num) * 8;
+                       id = (uint64_t)(uintptr_t)modify_hdr->action;
                        actions_num = modify_hdr->actions_num;
                        type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
                        save_dump_file(data, size, type, id,
-                                       (void *)(&actions_num), file);
+                                               (void *)(&actions_num), file);
                }
                if (encap_decap) {
                        data = encap_decap->buf;
                        size = encap_decap->size;
+                       id = (uint64_t)(uintptr_t)encap_decap->action;
                        type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
                        save_dump_file(data, size, type,
                                                id, NULL, file);
@@ -8320,6 +8374,119 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
        }
        return 0;
 }
+
+/**
+ * Dump all flow's encap_decap/modify_hdr/counter data to file
+ *
+ * @param[in] dev
+ *   The pointer to Ethernet device.
+ * @param[in] file
+ *   A pointer to a file for output.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ * @return
+ *   0 on success, a negative value otherwise.
+ */
+static int
+mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
+       FILE *file, struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_hlist *h;
+       struct mlx5_flow_dv_modify_hdr_resource  *modify_hdr;
+       struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+       struct rte_flow_query_count count;
+       uint32_t actions_num;
+       const uint8_t *data;
+       size_t size;
+       uint64_t id;
+       uint32_t type;
+       uint32_t i;
+       uint32_t j;
+       struct mlx5_list_inconst *l_inconst;
+       struct mlx5_list_entry *e;
+       int lcore_index;
+       struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+       uint32_t max;
+       void *action;
+
+       /* encap_decap hlist is lcore_share, get global core cache. */
+       i = MLX5_LIST_GLOBAL;
+       h = sh->encaps_decaps;
+       if (h) {
+               for (j = 0; j <= h->mask; j++) {
+                       l_inconst = &h->buckets[j].l;
+                       if (!l_inconst || !l_inconst->cache[i])
+                               continue;
+
+                       e = LIST_FIRST(&l_inconst->cache[i]->h);
+                       while (e) {
+                               encap_decap =
+                               (struct mlx5_flow_dv_encap_decap_resource *)e;
+                               data = encap_decap->buf;
+                               size = encap_decap->size;
+                               id = (uint64_t)(uintptr_t)encap_decap->action;
+                               type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
+                               save_dump_file(data, size, type,
+                                       id, NULL, file);
+                               e = LIST_NEXT(e, next);
+                       }
+               }
+       }
+
+       /* get modify_hdr */
+       h = sh->modify_cmds;
+       if (h) {
+               lcore_index = rte_lcore_index(rte_lcore_id());
+               if (unlikely(lcore_index == -1)) {
+                       lcore_index = MLX5_LIST_NLCORE;
+                       rte_spinlock_lock(&h->l_const.lcore_lock);
+               }
+               i = lcore_index;
+
+               for (j = 0; j <= h->mask; j++) {
+                       l_inconst = &h->buckets[j].l;
+                       if (!l_inconst || !l_inconst->cache[i])
+                               continue;
+
+                       e = LIST_FIRST(&l_inconst->cache[i]->h);
+                       while (e) {
+                               modify_hdr =
+                               (struct mlx5_flow_dv_modify_hdr_resource *)e;
+                               data = (const uint8_t *)modify_hdr->actions;
+                               size = (size_t)(modify_hdr->actions_num) * 8;
+                               actions_num = modify_hdr->actions_num;
+                               id = (uint64_t)(uintptr_t)modify_hdr->action;
+                               type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
+                               save_dump_file(data, size, type, id,
+                                               (void *)(&actions_num), file);
+                               e = LIST_NEXT(e, next);
+                       }
+               }
+
+               if (unlikely(lcore_index == MLX5_LIST_NLCORE))
+                       rte_spinlock_unlock(&h->l_const.lcore_lock);
+       }
+
+       /* get counter */
+       MLX5_ASSERT(cmng->n_valid <= cmng->n);
+       max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
+       for (j = 1; j <= max; j++) {
+               action = NULL;
+               flow_dv_query_count_ptr(dev, j, &action, error);
+               if (action) {
+                       if (!flow_dv_query_count(dev, j, &count, error)) {
+                               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+                               id = (uint64_t)(uintptr_t)action;
+                               save_dump_file(NULL, 0, type,
+                                               id, (void *)&count, file);
+                       }
+               }
+       }
+       return 0;
+}
 #endif
 
 /**
@@ -8346,9 +8513,6 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        int ret;
        struct mlx5_flow_handle *dh;
        struct rte_flow *flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       uint32_t idx;
-#endif
 
        if (!priv->config.dv_flow_en) {
                if (fputs("device dv flow disabled\n", file) <= 0)
@@ -8359,8 +8523,8 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        /* dump all */
        if (!flow_idx) {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-               MLX5_IPOOL_FOREACH(priv->flows[MLX5_FLOW_TYPE_GEN], idx, flow)
-                       mlx5_flow_dev_dump_ipool(dev, flow, file, error);
+               if (mlx5_flow_dev_dump_sh_all(dev, file, error))
+                       return -EINVAL;
 #endif
                return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
                                        sh->rx_domain,
@@ -8370,7 +8534,7 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
                        (uintptr_t)(void *)flow_idx);
        if (!flow)
-               return -ENOENT;
+               return -EINVAL;
 
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        mlx5_flow_dev_dump_ipool(dev, flow, file, error);
@@ -8660,6 +8824,116 @@ mlx5_action_handle_flush(struct rte_eth_dev *dev)
        return ret;
 }
 
+/**
+ * Validate existing indirect actions against current device configuration
+ * and attach them to device resources.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_attach(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_indexed_pool *ipool =
+                       priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
+       struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
+       int ret = 0;
+       uint32_t idx;
+
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+               const char *message;
+               uint32_t queue_idx;
+
+               ret = mlx5_validate_rss_queues(dev, ind_tbl->queues,
+                                              ind_tbl->queues_n,
+                                              &message, &queue_idx);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s",
+                               dev->data->port_id, ind_tbl->queues[queue_idx],
+                               message);
+                       break;
+               }
+       }
+       if (ret != 0)
+               return ret;
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               ret = mlx5_ind_table_obj_attach(dev, ind_tbl);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u could not attach "
+                               "indirection table obj %p",
+                               dev->data->port_id, (void *)ind_tbl);
+                       goto error;
+               }
+       }
+       return 0;
+error:
+       shared_rss_last = shared_rss;
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               if (shared_rss == shared_rss_last)
+                       break;
+               if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)
+                       DRV_LOG(CRIT, "Port %u could not detach "
+                               "indirection table obj %p on rollback",
+                               dev->data->port_id, (void *)ind_tbl);
+       }
+       return ret;
+}
+
+/**
+ * Detach indirect actions of the device from its resources.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_detach(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_indexed_pool *ipool =
+                       priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
+       struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
+       int ret = 0;
+       uint32_t idx;
+
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               ret = mlx5_ind_table_obj_detach(dev, ind_tbl);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u could not detach "
+                               "indirection table obj %p",
+                               dev->data->port_id, (void *)ind_tbl);
+                       goto error;
+               }
+       }
+       return 0;
+error:
+       shared_rss_last = shared_rss;
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               if (shared_rss == shared_rss_last)
+                       break;
+               if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)
+                       DRV_LOG(CRIT, "Port %u could not attach "
+                               "indirection table obj %p on rollback",
+                               dev->data->port_id, (void *)ind_tbl);
+       }
+       return ret;
+}
+
 #ifndef HAVE_MLX5DV_DR
 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
 #else
@@ -9556,17 +9830,119 @@ mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
                const struct mlx5_flow_expand_node *node)
 {
        const struct rte_flow_item *item = pattern + item_idx, *prev_item;
-       switch (item->type) {
-       case RTE_FLOW_ITEM_TYPE_VXLAN:
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN &&
+                       node != NULL &&
+                       node->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+               /*
+                * The expansion node is VXLAN and it is also the last
+                * expandable item in the pattern, so need to continue
+                * expansion of the inner tunnel.
+                */
                MLX5_ASSERT(item_idx > 0);
                prev_item = pattern + item_idx - 1;
                MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP);
                if (mlx5_flow_is_std_vxlan_port(prev_item))
                        return &graph[MLX5_EXPANSION_STD_VXLAN];
-               else
-                       return &graph[MLX5_EXPANSION_L3_VXLAN];
+               return &graph[MLX5_EXPANSION_L3_VXLAN];
+       }
+       return node;
+}
+
+/* Map of Verbs to Flow priority with 8 Verbs priorities. */
+static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
+       { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
+};
+
+/* Map of Verbs to Flow priority with 16 Verbs priorities. */
+static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
+       { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
+       { 9, 10, 11 }, { 12, 13, 14 },
+};
+
+/**
+ * Discover the number of available flow priorities.
+ *
+ * @param dev
+ *   Ethernet device.
+ *
+ * @return
+ *   On success, number of available flow priorities.
+ *   On failure, a negative errno-style code and rte_errno is set.
+ */
+int
+mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
+{
+       static const uint16_t vprio[] = {8, 16};
+       const struct mlx5_priv *priv = dev->data->dev_private;
+       const struct mlx5_flow_driver_ops *fops;
+       enum mlx5_flow_drv_type type;
+       int ret;
+
+       type = mlx5_flow_os_get_type();
+       if (type == MLX5_FLOW_TYPE_MAX) {
+               type = MLX5_FLOW_TYPE_VERBS;
+               if (priv->sh->devx && priv->config.dv_flow_en)
+                       type = MLX5_FLOW_TYPE_DV;
+       }
+       fops = flow_get_drv_ops(type);
+       if (fops->discover_priorities == NULL) {
+               DRV_LOG(ERR, "Priority discovery not supported");
+               rte_errno = ENOTSUP;
+               return -rte_errno;
+       }
+       ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio));
+       if (ret < 0)
+               return ret;
+       switch (ret) {
+       case 8:
+               ret = RTE_DIM(priority_map_3);
+               break;
+       case 16:
+               ret = RTE_DIM(priority_map_5);
                break;
        default:
-               return node;
+               rte_errno = ENOTSUP;
+               DRV_LOG(ERR,
+                       "port %u maximum priority: %d expected 8/16",
+                       dev->data->port_id, ret);
+               return -rte_errno;
+       }
+       DRV_LOG(INFO, "port %u supported flow priorities:"
+               " 0-%d for ingress or egress root table,"
+               " 0-%d for non-root table or transfer root table.",
+               dev->data->port_id, ret - 2,
+               MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
+       return ret;
+}
+
+/**
+ * Adjust flow priority based on the highest layer and the request priority.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] priority
+ *   The rule base priority.
+ * @param[in] subpriority
+ *   The priority based on the items.
+ *
+ * @return
+ *   The new priority.
+ */
+uint32_t
+mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+                         uint32_t subpriority)
+{
+       uint32_t res = 0;
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       switch (priv->sh->flow_max_priority) {
+       case RTE_DIM(priority_map_3):
+               res = priority_map_3[priority][subpriority];
+               break;
+       case RTE_DIM(priority_map_5):
+               res = priority_map_5[priority][subpriority];
+               break;
        }
+       return  res;
 }