net/mlx5: add wire vport hint
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index 58fc628..3d69957 100644 (file)
@@ -81,6 +81,8 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
 static int
 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
                                        uint32_t port_id);
+static void
+flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
 
 /**
  * Initialize flow attributes structure according to flow items' types.
@@ -3933,7 +3935,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev,
        target_group =
                ((const struct rte_flow_action_jump *)action->conf)->group;
        ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
-                                      grp_info, error);
+                                      &grp_info, error);
        if (ret)
                return ret;
        if (attributes->group == target_group &&
@@ -5101,7 +5103,7 @@ static int
 flow_dv_validate_attributes(struct rte_eth_dev *dev,
                            const struct mlx5_flow_tunnel *tunnel,
                            const struct rte_flow_attr *attributes,
-                           struct flow_grp_info grp_info,
+                           const struct flow_grp_info *grp_info,
                            struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
@@ -5256,7 +5258,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        }
        grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
                                (dev, tunnel, attr, items, actions);
-       ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
+       ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
        if (ret < 0)
                return ret;
        is_root = (uint64_t)ret;
@@ -7594,13 +7596,16 @@ flow_dv_translate_item_source_vport(void *matcher, void *key,
  *   Flow matcher value.
  * @param[in] item
  *   Flow pattern to translate.
+ * @param[in]
+ *   Flow attributes.
  *
  * @return
  *   0 on success, a negative errno value otherwise.
  */
 static int
 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
-                              void *key, const struct rte_flow_item *item)
+                              void *key, const struct rte_flow_item *item,
+                              const struct rte_flow_attr *attr)
 {
        const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
        const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
@@ -7612,14 +7617,30 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
        priv = mlx5_port_to_eswitch_info(id, item == NULL);
        if (!priv)
                return -rte_errno;
-       /* Translate to vport field or to metadata, depending on mode. */
-       if (priv->vport_meta_mask)
-               flow_dv_translate_item_meta_vport(matcher, key,
-                                                 priv->vport_meta_tag,
-                                                 priv->vport_meta_mask);
-       else
+       /*
+        * Translate to vport field or to metadata, depending on mode.
+        * Kernel can use either misc.source_port or half of C0 metadata
+        * register.
+        */
+       if (priv->vport_meta_mask) {
+               /*
+                * Provide the hint for SW steering library
+                * to insert the flow into ingress domain and
+                * save the extra vport match.
+                */
+               if (mask == 0xffff && priv->vport_id == 0xffff &&
+                   priv->pf_bond < 0 && attr->transfer)
+                       flow_dv_translate_item_source_vport
+                               (matcher, key, priv->vport_id, mask);
+               else
+                       flow_dv_translate_item_meta_vport
+                               (matcher, key,
+                                priv->vport_meta_tag,
+                                priv->vport_meta_mask);
+       } else {
                flow_dv_translate_item_source_vport(matcher, key,
                                                    priv->vport_id, mask);
+       }
        return 0;
 }
 
@@ -8040,6 +8061,8 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
                                   "cannot get table");
                return NULL;
        }
+       DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
+               table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
        tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
        return &tbl_data->tbl;
 }
@@ -8078,7 +8101,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
                if (he)
                        mlx5_hlist_unregister(tunnel_grp_hash, he);
                DRV_LOG(DEBUG,
-                       "Table_id %#x tunnel %u group %u released.",
+                       "Table_id %u tunnel %u group %u released.",
                        table_id,
                        tbl_data->tunnel ?
                        tbl_data->tunnel->tunnel_id : 0,
@@ -8190,6 +8213,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
                         struct mlx5_flow_dv_matcher *ref,
                         union mlx5_flow_tbl_key *key,
                         struct mlx5_flow *dev_flow,
+                        const struct mlx5_flow_tunnel *tunnel,
+                        uint32_t group_id,
                         struct rte_flow_error *error)
 {
        struct mlx5_cache_entry *entry;
@@ -8201,8 +8226,14 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
                .data = ref,
        };
 
-       tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
-                                      key->domain, false, NULL, 0, 0, error);
+       /**
+        * tunnel offload API requires this registration for cases when
+        * tunnel match rule was inserted before tunnel set rule.
+        */
+       tbl = flow_dv_tbl_resource_get(dev, key->table_id,
+                                      key->direction, key->domain,
+                                      dev_flow->external, tunnel,
+                                      group_id, 0, error);
        if (!tbl)
                return -rte_errno;      /* No need to refill the error info */
        tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
@@ -8558,7 +8589,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
        rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
        rss_desc->hash_fields = dev_flow->hash_fields;
        rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
-       rss_desc->standalone = false;
+       rss_desc->shared_rss = 0;
        *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
        if (!*hrxq_idx)
                return NULL;
@@ -8981,7 +9012,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
        uint64_t action_flags = 0;
 
        MLX5_ASSERT(wks);
-       rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+       rss_desc = &wks->rss_desc;
        sample_act = &res->sample_act;
        sample_idx = &res->sample_idx;
        sample_action = (const struct rte_flow_action_sample *)action->conf;
@@ -9193,7 +9224,7 @@ flow_dv_create_action_sample(struct rte_eth_dev *dev,
        uint32_t hrxq_idx;
 
        MLX5_ASSERT(wks);
-       rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+       rss_desc = &wks->rss_desc;
        if (num_of_dest > 1) {
                if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
                        /* Handle QP action for mirroring */
@@ -9345,8 +9376,10 @@ flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
                /* First ASO flow hit allocation - starting ASO data-path. */
                int ret = mlx5_aso_queue_start(priv->sh);
 
-               if (ret)
+               if (ret) {
+                       mlx5_free(pools);
                        return ret;
+               }
        }
        mng->n = resize;
        mng->pools = pools;
@@ -9571,8 +9604,12 @@ flow_dv_translate(struct rte_eth_dev *dev,
                .skip_scale = !!dev_flow->skip_scale,
        };
 
-       MLX5_ASSERT(wks);
-       rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+       if (!wks)
+               return rte_flow_error_set(error, ENOMEM,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "failed to push flow workspace");
+       rss_desc = &wks->rss_desc;
        memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
        memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
        mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
@@ -9589,7 +9626,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
        grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
                                (dev, tunnel, attr, items, actions);
        ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
-                                      grp_info, error);
+                                      &grp_info, error);
        if (ret)
                return ret;
        dev_flow->dv.group = table;
@@ -9603,10 +9640,14 @@ flow_dv_translate(struct rte_eth_dev *dev,
                /*
                 * do not add decap action if match rule drops packet
                 * HW rejects rules with decap & drop
+                *
+                * if tunnel match rule was inserted before matching tunnel set
+                * rule flow table used in the match rule must be registered.
+                * current implementation handles that in the
+                * flow_dv_match_register() at the function end.
                 */
                bool add_decap = true;
                const struct rte_flow_action *ptr = actions;
-               struct mlx5_flow_tbl_resource *tbl;
 
                for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
                        if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
@@ -9623,20 +9664,6 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                        dev_flow->dv.encap_decap->action;
                        action_flags |= MLX5_FLOW_ACTION_DECAP;
                }
-               /*
-                * bind table_id with <group, table> for tunnel match rule.
-                * Tunnel set rule establishes that bind in JUMP action handler.
-                * Required for scenario when application creates tunnel match
-                * rule before tunnel set rule.
-                */
-               tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
-                                              attr->transfer,
-                                              !!dev_flow->external, tunnel,
-                                              attr->group, 0, error);
-               if (!tbl)
-                       return rte_flow_error_set
-                              (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
-                              actions, "cannot register tunnel group");
        }
        for (; !actions_end ; actions++) {
                const struct rte_flow_action_queue *queue;
@@ -9780,7 +9807,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
                         * when expanding items for RSS.
                         */
                        action_flags |= MLX5_FLOW_ACTION_RSS;
-                       dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+                       dev_flow->handle->fate_action = rss_desc->shared_rss ?
+                               MLX5_FLOW_FATE_SHARED_RSS :
+                               MLX5_FLOW_FATE_QUEUE;
                        break;
                case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
                        flow->age = (uint32_t)(uintptr_t)(action->conf);
@@ -9934,7 +9963,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        ret = mlx5_flow_group_to_table(dev, tunnel,
                                                       jump_group,
                                                       &table,
-                                                      grp_info, error);
+                                                      &grp_info, error);
                        if (ret)
                                return ret;
                        tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
@@ -10214,8 +10243,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                                  NULL, "item not supported");
                switch (item_type) {
                case RTE_FLOW_ITEM_TYPE_PORT_ID:
-                       flow_dv_translate_item_port_id(dev, match_mask,
-                                                      match_value, items);
+                       flow_dv_translate_item_port_id
+                               (dev, match_mask, match_value, items, attr);
                        last_item = MLX5_FLOW_ITEM_PORT_ID;
                        break;
                case RTE_FLOW_ITEM_TYPE_ETH:
@@ -10441,7 +10470,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
        if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
            (priv->representor || priv->master)) {
                if (flow_dv_translate_item_port_id(dev, match_mask,
-                                                  match_value, NULL))
+                                                  match_value, NULL, attr))
                        return -rte_errno;
        }
 #ifdef RTE_LIBRTE_MLX5_DEBUG
@@ -10464,7 +10493,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
        tbl_key.domain = attr->transfer;
        tbl_key.direction = attr->egress;
        tbl_key.table_id = dev_flow->dv.group;
-       if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
+       if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
+                                    tunnel, attr->group, error))
                return -rte_errno;
        return 0;
 }
@@ -10574,10 +10604,10 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] flow
- *   Shred RSS action holding hash RX queue objects.
  * @param[in] dev_flow
  *   Pointer to the sub flow.
+ * @param[in] rss_desc
+ *   Pointer to the RSS descriptor.
  * @param[out] hrxq
  *   Pointer to retrieved hash RX queue object.
  *
@@ -10585,29 +10615,23 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
  */
 static uint32_t
-__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
-                          struct mlx5_flow *dev_flow,
-                          struct mlx5_hrxq **hrxq)
+__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
+                      struct mlx5_flow_rss_desc *rss_desc,
+                      struct mlx5_hrxq **hrxq)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
        uint32_t hrxq_idx;
 
-       if (flow->shared_rss) {
+       if (rss_desc->shared_rss) {
                hrxq_idx = __flow_dv_action_rss_hrxq_lookup
-                               (dev, flow->shared_rss, dev_flow->hash_fields,
+                               (dev, rss_desc->shared_rss,
+                                dev_flow->hash_fields,
                                 !!(dev_flow->handle->layers &
                                    MLX5_FLOW_LAYER_TUNNEL));
-               if (hrxq_idx) {
+               if (hrxq_idx)
                        *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
                                               hrxq_idx);
-                       __atomic_fetch_add(&(*hrxq)->refcnt, 1,
-                                          __ATOMIC_RELAXED);
-               }
        } else {
-               struct mlx5_flow_rss_desc *rss_desc =
-                               &wks->rss_desc[!!wks->flow_nested_idx];
-
                *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
                                             &hrxq_idx);
        }
@@ -10642,9 +10666,15 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        int err;
        int idx;
        struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+       struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
 
        MLX5_ASSERT(wks);
-       for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
+       if (rss_desc->shared_rss) {
+               dh = wks->flows[wks->flow_idx - 1].handle;
+               MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
+               dh->rix_srss = rss_desc->shared_rss;
+       }
+       for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
                dev_flow = &wks->flows[idx];
                dv = &dev_flow->dv;
                dh = dev_flow->handle;
@@ -10658,11 +10688,12 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                dv->actions[n++] =
                                                priv->drop_queue.hrxq->action;
                        }
-               } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
-                          !dv_h->rix_sample && !dv_h->rix_dest_array) {
+               } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
+                          !dv_h->rix_sample && !dv_h->rix_dest_array) ||
+                           (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
                        struct mlx5_hrxq *hrxq = NULL;
                        uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
-                                               (dev, flow, dev_flow, &hrxq);
+                                       (dev, dev_flow, rss_desc, &hrxq);
                        if (!hrxq) {
                                rte_flow_error_set
                                        (error, rte_errno,
@@ -10670,7 +10701,8 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                         "cannot get hash queue");
                                goto error;
                        }
-                       dh->rix_hrxq = hrxq_idx;
+                       if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
+                               dh->rix_hrxq = hrxq_idx;
                        dv->actions[n++] = hrxq->action;
                } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
                        if (!priv->sh->default_miss_action) {
@@ -10716,6 +10748,8 @@ error:
                if (dh->vf_vlan.tag && dh->vf_vlan.created)
                        mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
        }
+       if (rss_desc->shared_rss)
+               wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
        rte_errno = err; /* Restore rte_errno. */
        return -rte_errno;
 }
@@ -10900,6 +10934,25 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
                                     &cache->entry);
 }
 
+/**
+ * Release shared RSS action resource.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param srss
+ *   Shared RSS action index.
+ */
+static void
+flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_shared_action_rss *shared_rss;
+
+       shared_rss = mlx5_ipool_get
+                       (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
+       __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+}
+
 void
 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
                            struct mlx5_cache_entry *entry)
@@ -10964,6 +11017,9 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
                flow_dv_port_id_action_resource_release(dev,
                                handle->rix_port_id_action);
                break;
+       case MLX5_FLOW_FATE_SHARED_RSS:
+               flow_dv_shared_rss_action_release(dev, handle->rix_srss);
+               break;
        default:
                DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
                break;
@@ -11130,13 +11186,6 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
        if (!flow)
                return;
        flow_dv_remove(dev, flow);
-       if (flow->shared_rss) {
-               struct mlx5_shared_action_rss *shared_rss = mlx5_ipool_get
-                               (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
-                                                             flow->shared_rss);
-
-               __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
-       }
        if (flow->counter) {
                flow_dv_counter_free(dev, flow->counter);
                flow->counter = 0;
@@ -11241,6 +11290,8 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
+ * @param[in] action_idx
+ *   Shared RSS action ipool index.
  * @param[in, out] action
  *   Partially initialized shared RSS action.
  * @param[out] error
@@ -11252,6 +11303,7 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
  */
 static int
 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
+                       uint32_t action_idx,
                        struct mlx5_shared_action_rss *action,
                        struct rte_flow_error *error)
 {
@@ -11263,7 +11315,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
        rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
        rss_desc.const_q = action->origin.queue;
        rss_desc.queue_num = action->origin.queue_num;
-       rss_desc.standalone = true;
+       /* Set non-zero value to indicate a shared RSS. */
+       rss_desc.shared_rss = action_idx;
        for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
                uint32_t hrxq_idx;
                uint64_t hash_fields = mlx5_rss_hash_fields[i];
@@ -11355,7 +11408,7 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
        memcpy(shared_action->queue, rss->queue, queue_size);
        origin->queue = shared_action->queue;
        origin->queue_num = rss->queue_num;
-       if (__flow_dv_action_rss_setup(dev, shared_action, error))
+       if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
                goto error_rss_init;
        __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
        rte_spinlock_lock(&priv->shared_act_sl);
@@ -12266,19 +12319,15 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
                .match_mask = (void *)&mask,
        };
        void *actions[2] = { 0 };
-       struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
+       struct mlx5_flow_tbl_resource *tbl = NULL;
        struct mlx5_devx_obj *dcs = NULL;
        void *matcher = NULL;
        void *flow = NULL;
-       int i, ret = -1;
+       int ret = -1;
 
        tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
        if (!tbl)
                goto err;
-       dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
-                                           NULL, 0, 0, NULL);
-       if (!dest_tbl)
-               goto err;
        dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
        if (!dcs)
                goto err;
@@ -12286,10 +12335,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
                                                    &actions[0]);
        if (ret)
                goto err;
-       ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
-                               (dest_tbl->obj, &actions[1]);
-       if (ret)
-               goto err;
+       actions[1] = priv->drop_queue.hrxq->action;
        dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
        ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
                                               &matcher);
@@ -12318,17 +12364,12 @@ err:
                                     "support detection");
                ret = 0;
        }
-       for (i = 0; i < 2; i++) {
-               if (actions[i])
-                       claim_zero(mlx5_flow_os_destroy_flow_action
-                                  (actions[i]));
-       }
+       if (actions[0])
+               claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
        if (matcher)
                claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
        if (tbl)
                flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
-       if (dest_tbl)
-               flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);
        if (dcs)
                claim_zero(mlx5_devx_cmd_destroy(dcs));
        return ret;