net/ice/base: fix build with GCC 12
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
index eeed386..36c9bff 100644 (file)
@@ -31,6 +31,7 @@
 #define ICE_PPP_IPV4_PROTO     0x0021
 #define ICE_PPP_IPV6_PROTO     0x0057
 #define ICE_IPV4_PROTO_NVGRE   0x002F
+#define ICE_SW_PRI_BASE 6
 
 #define ICE_SW_INSET_ETHER ( \
        ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
        ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
        ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
+#define ICE_SW_INSET_MAC_QINQ_IPV4_TCP ( \
+       ICE_SW_INSET_MAC_QINQ_IPV4 | \
+       ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
+#define ICE_SW_INSET_MAC_QINQ_IPV4_UDP ( \
+       ICE_SW_INSET_MAC_QINQ_IPV4 | \
+       ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
        ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
        ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
        ICE_INSET_IPV6_NEXT_HDR)
 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
        ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
+#define ICE_SW_INSET_MAC_QINQ_IPV6_TCP ( \
+       ICE_SW_INSET_MAC_QINQ_IPV6 | \
+       ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
+#define ICE_SW_INSET_MAC_QINQ_IPV6_UDP ( \
+       ICE_SW_INSET_MAC_QINQ_IPV6 | \
+       ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
        ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
        ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
@@ -167,11 +180,33 @@ struct sw_meta {
        struct ice_adv_rule_info rule_info;
 };
 
+enum ice_sw_fltr_status {
+       ICE_SW_FLTR_ADDED,
+       ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT,
+       ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT,
+};
+
+struct ice_switch_filter_conf {
+       enum ice_sw_fltr_status fltr_status;
+
+       struct ice_rule_query_data sw_query_data;
+
+       /*
+        * The lookup elements and rule info are saved here when filter creation
+        * succeeds.
+        */
+       uint16_t vsi_num;
+       uint16_t lkups_num;
+       struct ice_adv_lkup_elem *lkups;
+       struct ice_adv_rule_info rule_info;
+};
+
 static struct ice_flow_parser ice_switch_dist_parser;
 static struct ice_flow_parser ice_switch_perm_parser;
 
 static struct
 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
+       {pattern_any,                                   ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
@@ -216,7 +251,11 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = {
        {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
+       {pattern_eth_qinq_ipv4_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV4_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
+       {pattern_eth_qinq_ipv4_udp,                     ICE_SW_INSET_MAC_QINQ_IPV4_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
+       {pattern_eth_qinq_ipv6_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV6_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
+       {pattern_eth_qinq_ipv6_udp,                     ICE_SW_INSET_MAC_QINQ_IPV6_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
@@ -251,6 +290,7 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = {
 
 static struct
 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
+       {pattern_any,                                   ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
@@ -295,7 +335,11 @@ ice_pattern_match_item ice_switch_pattern_perm_list[] = {
        {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
+       {pattern_eth_qinq_ipv4_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV4_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
+       {pattern_eth_qinq_ipv4_udp,                     ICE_SW_INSET_MAC_QINQ_IPV4_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
+       {pattern_eth_qinq_ipv6_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV6_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
+       {pattern_eth_qinq_ipv6_udp,                     ICE_SW_INSET_MAC_QINQ_IPV6_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
        {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
@@ -338,7 +382,7 @@ ice_switch_create(struct ice_adapter *ad,
        struct ice_pf *pf = &ad->pf;
        struct ice_hw *hw = ICE_PF_TO_HW(pf);
        struct ice_rule_query_data rule_added = {0};
-       struct ice_rule_query_data *filter_ptr;
+       struct ice_switch_filter_conf *filter_conf_ptr;
        struct ice_adv_lkup_elem *list =
                ((struct sw_meta *)meta)->list;
        uint16_t lkups_cnt =
@@ -358,28 +402,48 @@ ice_switch_create(struct ice_adapter *ad,
                        "lookup list should not be NULL");
                goto error;
        }
+
+       if (ice_dcf_adminq_need_retry(ad)) {
+               rte_flow_error_set(error, EAGAIN,
+                       RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                       "DCF is not on");
+               goto error;
+       }
+
        ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
        if (!ret) {
-               filter_ptr = rte_zmalloc("ice_switch_filter",
-                       sizeof(struct ice_rule_query_data), 0);
-               if (!filter_ptr) {
+               filter_conf_ptr = rte_zmalloc("ice_switch_filter",
+                       sizeof(struct ice_switch_filter_conf), 0);
+               if (!filter_conf_ptr) {
                        rte_flow_error_set(error, EINVAL,
                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
                                   "No memory for ice_switch_filter");
                        goto error;
                }
-               flow->rule = filter_ptr;
-               rte_memcpy(filter_ptr,
-                       &rule_added,
-                       sizeof(struct ice_rule_query_data));
+
+               filter_conf_ptr->sw_query_data = rule_added;
+
+               filter_conf_ptr->vsi_num =
+                       ice_get_hw_vsi_num(hw, rule_info->sw_act.vsi_handle);
+               filter_conf_ptr->lkups = list;
+               filter_conf_ptr->lkups_num = lkups_cnt;
+               filter_conf_ptr->rule_info = *rule_info;
+
+               filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
+
+               flow->rule = filter_conf_ptr;
        } else {
-               rte_flow_error_set(error, EINVAL,
+               if (ice_dcf_adminq_need_retry(ad))
+                       ret = -EAGAIN;
+               else
+                       ret = -EINVAL;
+
+               rte_flow_error_set(error, -ret,
                        RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
                        "switch filter create flow fail");
                goto error;
        }
 
-       rte_free(list);
        rte_free(meta);
        return 0;
 
@@ -390,6 +454,18 @@ error:
        return -rte_errno;
 }
 
+static inline void
+ice_switch_filter_rule_free(struct rte_flow *flow)
+{
+       struct ice_switch_filter_conf *filter_conf_ptr =
+               (struct ice_switch_filter_conf *)flow->rule;
+
+       if (filter_conf_ptr)
+               rte_free(filter_conf_ptr->lkups);
+
+       rte_free(filter_conf_ptr);
+}
+
 static int
 ice_switch_destroy(struct ice_adapter *ad,
                struct rte_flow *flow,
@@ -397,37 +473,47 @@ ice_switch_destroy(struct ice_adapter *ad,
 {
        struct ice_hw *hw = &ad->hw;
        int ret;
-       struct ice_rule_query_data *filter_ptr;
+       struct ice_switch_filter_conf *filter_conf_ptr;
 
-       filter_ptr = (struct ice_rule_query_data *)
+       filter_conf_ptr = (struct ice_switch_filter_conf *)
                flow->rule;
 
-       if (!filter_ptr) {
+       if (!filter_conf_ptr ||
+           filter_conf_ptr->fltr_status == ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT) {
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
                        "no such flow"
                        " create by switch filter");
+
+               ice_switch_filter_rule_free(flow);
+
                return -rte_errno;
        }
 
-       ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+       if (ice_dcf_adminq_need_retry(ad)) {
+               rte_flow_error_set(error, EAGAIN,
+                       RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                       "DCF is not on");
+               return -rte_errno;
+       }
+
+       ret = ice_rem_adv_rule_by_id(hw, &filter_conf_ptr->sw_query_data);
        if (ret) {
-               rte_flow_error_set(error, EINVAL,
+               if (ice_dcf_adminq_need_retry(ad))
+                       ret = -EAGAIN;
+               else
+                       ret = -EINVAL;
+
+               rte_flow_error_set(error, -ret,
                        RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
                        "fail to destroy switch filter rule");
                return -rte_errno;
        }
 
-       rte_free(filter_ptr);
+       ice_switch_filter_rule_free(flow);
        return ret;
 }
 
-static void
-ice_switch_filter_rule_free(struct rte_flow *flow)
-{
-       rte_free(flow->rule);
-}
-
 static bool
 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
                struct rte_flow_error *error,
@@ -498,6 +584,10 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
                item_type = item->type;
 
                switch (item_type) {
+               case RTE_FLOW_ITEM_TYPE_ANY:
+                       *tun_type = ICE_SW_TUN_AND_NON_TUN;
+                       break;
+
                case RTE_FLOW_ITEM_TYPE_ETH:
                        eth_spec = item->spec;
                        eth_mask = item->mask;
@@ -1350,7 +1440,7 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
                                return false;
                        }
                        if (gtp_psc_spec && gtp_psc_mask) {
-                               if (gtp_psc_mask->pdu_type) {
+                               if (gtp_psc_mask->hdr.type) {
                                        rte_flow_error_set(error, EINVAL,
                                                RTE_FLOW_ERROR_TYPE_ITEM,
                                                item,
@@ -1358,13 +1448,13 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
                                        return false;
                                }
                                input = &outer_input_set;
-                               if (gtp_psc_mask->qfi)
+                               if (gtp_psc_mask->hdr.qfi)
                                        *input |= ICE_INSET_GTPU_QFI;
                                list[t].type = ICE_GTP;
                                list[t].h_u.gtp_hdr.qfi =
-                                       gtp_psc_spec->qfi;
+                                       gtp_psc_spec->hdr.qfi;
                                list[t].m_u.gtp_hdr.qfi =
-                                       gtp_psc_mask->qfi;
+                                       gtp_psc_mask->hdr.qfi;
                                input_set_byte += 1;
                                t++;
                        }
@@ -1572,7 +1662,10 @@ ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
        rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
        rule_info->sw_act.flag = ICE_FLTR_RX;
        rule_info->rx = 1;
-       rule_info->priority = priority + 5;
+       /* 0 denotes lowest priority of recipe and highest priority
+        * of rte_flow. Change rte_flow priority into recipe priority.
+        */
+       rule_info->priority = ICE_SW_PRI_BASE - priority;
 
        return 0;
 }
@@ -1651,7 +1744,10 @@ ice_switch_parse_action(struct ice_pf *pf,
        rule_info->sw_act.vsi_handle = vsi->idx;
        rule_info->rx = 1;
        rule_info->sw_act.src = vsi->idx;
-       rule_info->priority = priority + 5;
+       /* 0 denotes lowest priority of recipe and highest priority
+        * of rte_flow. Change rte_flow priority into recipe priority.
+        */
+       rule_info->priority = ICE_SW_PRI_BASE - priority;
 
        return 0;
 
@@ -1861,8 +1957,12 @@ ice_switch_redirect(struct ice_adapter *ad,
                    struct rte_flow *flow,
                    struct ice_flow_redirect *rd)
 {
-       struct ice_rule_query_data *rdata = flow->rule;
+       struct ice_rule_query_data *rdata;
+       struct ice_switch_filter_conf *filter_conf_ptr =
+               (struct ice_switch_filter_conf *)flow->rule;
+       struct ice_rule_query_data added_rdata = { 0 };
        struct ice_adv_fltr_mgmt_list_entry *list_itr;
+       struct ice_adv_lkup_elem *lkups_ref = NULL;
        struct ice_adv_lkup_elem *lkups_dp = NULL;
        struct LIST_HEAD_TYPE *list_head;
        struct ice_adv_rule_info rinfo;
@@ -1871,6 +1971,8 @@ ice_switch_redirect(struct ice_adapter *ad,
        uint16_t lkups_cnt;
        int ret;
 
+       rdata = &filter_conf_ptr->sw_query_data;
+
        if (rdata->vsi_handle != rd->vsi_handle)
                return 0;
 
@@ -1881,59 +1983,117 @@ ice_switch_redirect(struct ice_adapter *ad,
        if (rd->type != ICE_FLOW_REDIRECT_VSI)
                return -ENOTSUP;
 
-       list_head = &sw->recp_list[rdata->rid].filt_rules;
-       LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
-                           list_entry) {
-               rinfo = list_itr->rule_info;
-               if ((rinfo.fltr_rule_id == rdata->rule_id &&
-                   rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
-                   rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
-                   (rinfo.fltr_rule_id == rdata->rule_id &&
-                   rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
-                       lkups_cnt = list_itr->lkups_cnt;
-                       lkups_dp = (struct ice_adv_lkup_elem *)
-                               ice_memdup(hw, list_itr->lkups,
-                                          sizeof(*list_itr->lkups) *
-                                          lkups_cnt, ICE_NONDMA_TO_NONDMA);
-
-                       if (!lkups_dp) {
-                               PMD_DRV_LOG(ERR, "Failed to allocate memory.");
-                               return -EINVAL;
-                       }
+       switch (filter_conf_ptr->fltr_status) {
+       case ICE_SW_FLTR_ADDED:
+               list_head = &sw->recp_list[rdata->rid].filt_rules;
+               LIST_FOR_EACH_ENTRY(list_itr, list_head,
+                                   ice_adv_fltr_mgmt_list_entry,
+                                   list_entry) {
+                       rinfo = list_itr->rule_info;
+                       if ((rinfo.fltr_rule_id == rdata->rule_id &&
+                           rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
+                           rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
+                           (rinfo.fltr_rule_id == rdata->rule_id &&
+                           rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
+                               lkups_cnt = list_itr->lkups_cnt;
+
+                               lkups_dp = (struct ice_adv_lkup_elem *)
+                                       ice_memdup(hw, list_itr->lkups,
+                                                  sizeof(*list_itr->lkups) *
+                                                  lkups_cnt,
+                                                  ICE_NONDMA_TO_NONDMA);
+                               if (!lkups_dp) {
+                                       PMD_DRV_LOG(ERR,
+                                                   "Failed to allocate memory.");
+                                       return -EINVAL;
+                               }
+                               lkups_ref = lkups_dp;
 
-                       if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
-                               rinfo.sw_act.vsi_handle = rd->vsi_handle;
-                               rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
+                               if (rinfo.sw_act.fltr_act ==
+                                   ICE_FWD_TO_VSI_LIST) {
+                                       rinfo.sw_act.vsi_handle =
+                                               rd->vsi_handle;
+                                       rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
+                               }
+                               break;
                        }
-                       break;
                }
-       }
 
-       if (!lkups_dp)
+               if (!lkups_ref)
+                       return -EINVAL;
+
+               goto rmv_rule;
+       case ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT:
+               /* Recover VSI context */
+               hw->vsi_ctx[rd->vsi_handle]->vsi_num = filter_conf_ptr->vsi_num;
+               rinfo = filter_conf_ptr->rule_info;
+               lkups_cnt = filter_conf_ptr->lkups_num;
+               lkups_ref = filter_conf_ptr->lkups;
+
+               if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
+                       rinfo.sw_act.vsi_handle = rd->vsi_handle;
+                       rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
+               }
+
+               goto rmv_rule;
+       case ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT:
+               rinfo = filter_conf_ptr->rule_info;
+               lkups_cnt = filter_conf_ptr->lkups_num;
+               lkups_ref = filter_conf_ptr->lkups;
+
+               goto add_rule;
+       default:
                return -EINVAL;
+       }
+
+rmv_rule:
+       if (ice_dcf_adminq_need_retry(ad)) {
+               PMD_DRV_LOG(WARNING, "DCF is not on");
+               ret = -EAGAIN;
+               goto out;
+       }
 
        /* Remove the old rule */
-       ret = ice_rem_adv_rule(hw, list_itr->lkups,
-                              lkups_cnt, &rinfo);
+       ret = ice_rem_adv_rule(hw, lkups_ref, lkups_cnt, &rinfo);
        if (ret) {
                PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
                            rdata->rule_id);
+               filter_conf_ptr->fltr_status =
+                       ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT;
                ret = -EINVAL;
                goto out;
        }
 
+add_rule:
+       if (ice_dcf_adminq_need_retry(ad)) {
+               PMD_DRV_LOG(WARNING, "DCF is not on");
+               ret = -EAGAIN;
+               goto out;
+       }
+
        /* Update VSI context */
        hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
 
        /* Replay the rule */
-       ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
-                              &rinfo, rdata);
+       ret = ice_add_adv_rule(hw, lkups_ref, lkups_cnt,
+                              &rinfo, &added_rdata);
        if (ret) {
                PMD_DRV_LOG(ERR, "Failed to replay the rule");
+               filter_conf_ptr->fltr_status =
+                       ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT;
                ret = -EINVAL;
+       } else {
+               filter_conf_ptr->sw_query_data = added_rdata;
+               /* Save VSI number for failure recover */
+               filter_conf_ptr->vsi_num = rd->new_vsi_num;
+               filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
        }
 
 out:
+       if (ret == -EINVAL)
+               if (ice_dcf_adminq_need_retry(ad))
+                       ret = -EAGAIN;
+
        ice_free(hw, lkups_dp);
        return ret;
 }