net/ice/base: preserve NVM capabilities in safe mode
[dpdk.git] / drivers / net / ice / base / ice_switch.c
index ebf405f..1c07c60 100644 (file)
@@ -535,6 +535,207 @@ static const u8 dummy_udp_gtp_packet[] = {
        0x00, 0x00, 0x00, 0x00,
 };
 
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_UDP_OF,           34 },
+       { ICE_GTP,              42 },
+       { ICE_IPV4_IL,          62 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x08, 0x00,
+
+       0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
+       0x00, 0x00, 0x40, 0x00,
+       0x40, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
+       0x00, 0x00, 0x40, 0x00,
+       0x40, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00,
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_UDP_OF,           34 },
+       { ICE_GTP,              42 },
+       { ICE_IPV6_IL,          62 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x08, 0x00,
+
+       0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
+       0x00, 0x00, 0x40, 0x00,
+       0x40, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
+       0x00, 0x00, 0x3b, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00,
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_IPV6_OFOS,        14 },
+       { ICE_UDP_OF,           54 },
+       { ICE_GTP,              62 },
+       { ICE_IPV4_IL,          82 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x86, 0xdd,
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+       0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
+       0x00, 0x00, 0x40, 0x00,
+       0x40, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00,
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_IPV6_OFOS,        14 },
+       { ICE_UDP_OF,           54 },
+       { ICE_GTP,              62 },
+       { ICE_IPV6_IL,          82 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x86, 0xdd,
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+       0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
+       0x00, 0x00, 0x3b, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00,
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_UDP_OF,           34 },
+       { ICE_GTP_NO_PAY,       42 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_IPV6_OFOS,        14 },
+       { ICE_UDP_OF,           54 },
+       { ICE_GTP_NO_PAY,       62 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
        { ICE_MAC_OFOS,         0 },
        { ICE_ETYPE_OL,         12 },
@@ -1070,11 +1271,13 @@ static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
                }
 
                if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
-                   j <= ICE_PROFID_IPV6_GTPU_IPV6_OTHER)
+                   j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
                        gtp_valid = true;
 
-               if (j >= ICE_PROFID_IPV4_ESP &&
-                   j <= ICE_PROFID_IPV6_PFCP_SESSION)
+               if ((j >= ICE_PROFID_IPV4_ESP &&
+                    j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
+                   (j >= ICE_PROFID_IPV4_GTPC_TEID &&
+                    j <= ICE_PROFID_IPV6_GTPU_TEID))
                        flag_valid = true;
        }
 
@@ -1092,6 +1295,8 @@ static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
        else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
                 !pppoe_valid)
                tun_type = ICE_NON_TUN;
+       else
+               tun_type = ICE_NON_TUN;
 
        if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
                i = ice_is_bit_set(recipe_to_profile[rid],
@@ -1104,6 +1309,21 @@ static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
                        tun_type = ICE_SW_TUN_PPPOE_IPV6;
        }
 
+       if (tun_type == ICE_SW_TUN_GTP) {
+               if (ice_is_bit_set(recipe_to_profile[rid],
+                                  ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
+                       tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
+               else if (ice_is_bit_set(recipe_to_profile[rid],
+                                       ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
+                       tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
+               else if (ice_is_bit_set(recipe_to_profile[rid],
+                                       ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
+                       tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
+               else if (ice_is_bit_set(recipe_to_profile[rid],
+                                       ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
+                       tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
+       }
+
        if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
                for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
                        if (ice_is_bit_set(recipe_to_profile[rid], j)) {
@@ -1181,6 +1401,12 @@ static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
                                case ICE_PROFID_MAC_IPV6_L2TPV3:
                                        tun_type = ICE_SW_TUN_IPV6_L2TPV3;
                                        break;
+                               case ICE_PROFID_IPV4_GTPU_TEID:
+                                       tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
+                                       break;
+                               case ICE_PROFID_IPV6_GTPU_TEID:
+                                       tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
+                                       break;
                                default:
                                        break;
                                }
@@ -1365,7 +1591,7 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
        ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
        u16 i;
 
-       for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
+       for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
                u16 j;
 
                ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
@@ -1374,9 +1600,8 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
                        continue;
                ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
                              ICE_MAX_NUM_RECIPES);
-               for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
-                       if (ice_is_bit_set(r_bitmap, j))
-                               ice_set_bit(i, recipe_to_profile[j]);
+               ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
+                       ice_set_bit(i, recipe_to_profile[j]);
        }
 }
 
@@ -1421,7 +1646,7 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
  * @num_elems: pointer to number of elements
  * @cd: pointer to command details structure or NULL
  *
- * Get switch configuration (0x0200) to be placed in 'buff'.
+ * Get switch configuration (0x0200) to be placed in buf.
  * This admin command returns information such as initial VSI/port number
  * and switch ID it belongs to.
  *
@@ -1438,13 +1663,13 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
  * parsing the response buffer.
  */
 static enum ice_status
-ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
+ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
                  u16 buf_size, u16 *req_desc, u16 *num_elems,
                  struct ice_sq_cd *cd)
 {
        struct ice_aqc_get_sw_cfg *cmd;
-       enum ice_status status;
        struct ice_aq_desc desc;
+       enum ice_status status;
 
        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
        cmd = &desc.params.get_sw_conf;
@@ -1478,9 +1703,8 @@ ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
        enum ice_status status;
        u16 buf_len;
 
-       buf_len = sizeof(*sw_buf);
-       sw_buf = (struct ice_aqc_alloc_free_res_elem *)
-                  ice_malloc(hw, buf_len);
+       buf_len = ice_struct_size(sw_buf, elem, 1);
+       sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
        if (!sw_buf)
                return ICE_ERR_NO_MEMORY;
 
@@ -1560,9 +1784,8 @@ enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
        enum ice_status status, ret_status;
        u16 buf_len;
 
-       buf_len = sizeof(*sw_buf);
-       sw_buf = (struct ice_aqc_alloc_free_res_elem *)
-                  ice_malloc(hw, buf_len);
+       buf_len = ice_struct_size(sw_buf, elem, 1);
+       sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
        if (!sw_buf)
                return ICE_ERR_NO_MEMORY;
 
@@ -1600,8 +1823,7 @@ enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
        status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
                                       ice_aqc_opc_free_res, NULL);
        if (status) {
-               ice_debug(hw, ICE_DBG_SW,
-                         "VEB counter resource could not be freed\n");
+               ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
                ret_status = status;
        }
 
@@ -1995,8 +2217,7 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
                        return ICE_ERR_PARAM;
                break;
        default:
-               ice_debug(hw, ICE_DBG_SW,
-                         "Error due to unsupported rule_type %u\n", rule_type);
+               ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
                return ICE_ERR_OUT_OF_RANGE;
        }
 
@@ -2018,8 +2239,7 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
                         * than ICE_MAX_VSI, if not return with error.
                         */
                        if (id >= ICE_MAX_VSI) {
-                               ice_debug(hw, ICE_DBG_SW,
-                                         "Error VSI index (%u) out-of-range\n",
+                               ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
                                          id);
                                ice_free(hw, mr_list);
                                return ICE_ERR_OUT_OF_RANGE;
@@ -2103,9 +2323,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
        enum ice_status status;
        u16 buf_len;
 
-       buf_len = sizeof(*sw_buf);
-       sw_buf = (struct ice_aqc_alloc_free_res_elem *)
-               ice_malloc(hw, buf_len);
+       buf_len = ice_struct_size(sw_buf, elem, 1);
+       sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
        if (!sw_buf)
                return ICE_ERR_NO_MEMORY;
        sw_buf->num_elems = CPU_TO_LE16(1);
@@ -2148,7 +2367,7 @@ ice_aq_alloc_free_vsi_list_exit:
  * @hw: pointer to the HW struct
  * @bcast_thresh: represents the upper threshold for broadcast storm control
  * @mcast_thresh: represents the upper threshold for multicast storm control
- * @ctl_bitmask: storm control control knobs
+ * @ctl_bitmask: storm control knobs
  *
  * Sets the storm control configuration (0x0280)
  */
@@ -2175,7 +2394,7 @@ ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
  * @hw: pointer to the HW struct
  * @bcast_thresh: represents the upper threshold for broadcast storm control
  * @mcast_thresh: represents the upper threshold for multicast storm control
- * @ctl_bitmask: storm control control knobs
+ * @ctl_bitmask: storm control knobs
  *
  * Gets the storm control configuration (0x0281)
  */
@@ -2387,7 +2606,7 @@ enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
        enum ice_status status;
        u16 buf_len;
 
-       buf_len = sizeof(*sw_buf);
+       buf_len = ice_struct_size(sw_buf, elem, 1);
        sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
        if (!sw_buf)
                return ICE_ERR_NO_MEMORY;
@@ -2427,8 +2646,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
                pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
                break;
        default:
-               ice_debug(pi->hw, ICE_DBG_SW,
-                         "incorrect VSI/port type received\n");
+               ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
                break;
        }
 }
@@ -2438,7 +2656,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
  */
 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
 {
-       struct ice_aqc_get_sw_cfg_resp *rbuf;
+       struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
        enum ice_status status;
        u8 num_total_ports;
        u16 req_desc = 0;
@@ -2448,7 +2666,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
 
        num_total_ports = 1;
 
-       rbuf = (struct ice_aqc_get_sw_cfg_resp *)
+       rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
                ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
 
        if (!rbuf)
@@ -2460,19 +2678,19 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
         * writing a non-zero value in req_desc
         */
        do {
+               struct ice_aqc_get_sw_cfg_resp_elem *ele;
+
                status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
                                           &req_desc, &num_elems, NULL);
 
                if (status)
                        break;
 
-               for (i = 0; i < num_elems; i++) {
-                       struct ice_aqc_get_sw_cfg_resp_elem *ele;
+               for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
                        u16 pf_vf_num, swid, vsi_port_num;
                        bool is_vf = false;
                        u8 res_type;
 
-                       ele = rbuf[i].elements;
                        vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
                                ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
 
@@ -2492,8 +2710,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
                        case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
                        case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
                                if (j == num_total_ports) {
-                                       ice_debug(hw, ICE_DBG_SW,
-                                                 "more ports than expected\n");
+                                       ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
                                        status = ICE_ERR_CFG;
                                        goto out;
                                }
@@ -3487,8 +3704,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
                tmp_fltr_info.vsi_handle = rem_vsi_handle;
                status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
                if (status) {
-                       ice_debug(hw, ICE_DBG_SW,
-                                 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+                       ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
                                  tmp_fltr_info.fwd_id.hw_vsi_id, status);
                        return status;
                }
@@ -3504,8 +3720,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
                /* Remove the VSI list since it is no longer used */
                status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
                if (status) {
-                       ice_debug(hw, ICE_DBG_SW,
-                                 "Failed to remove VSI list %d, error %d\n",
+                       ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
                                  vsi_list_id, status);
                        return status;
                }
@@ -3613,17 +3828,18 @@ exit:
  * ice_aq_get_res_alloc - get allocated resources
  * @hw: pointer to the HW struct
  * @num_entries: pointer to u16 to store the number of resource entries returned
- * @buf: pointer to user-supplied buffer
- * @buf_size: size of buff
+ * @buf: pointer to buffer
+ * @buf_size: size of buf
  * @cd: pointer to command details structure or NULL
  *
- * The user-supplied buffer must be large enough to store the resource
+ * The caller-supplied buffer must be large enough to store the resource
  * information for all resource types. Each resource type is an
- * ice_aqc_get_res_resp_data_elem structure.
+ * ice_aqc_get_res_resp_elem structure.
  */
 enum ice_status
-ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
-                    u16 buf_size, struct ice_sq_cd *cd)
+ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
+                    struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
+                    struct ice_sq_cd *cd)
 {
        struct ice_aqc_get_res_alloc *resp;
        enum ice_status status;
@@ -3650,8 +3866,8 @@ ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
  * ice_aq_get_res_descs - get allocated resource descriptors
  * @hw: pointer to the hardware structure
  * @num_entries: number of resource entries in buffer
- * @buf: Indirect buffer to hold data parameters and response
- * @buf_size: size of buffer for indirect commands
+ * @buf: structure to hold response data buffer
+ * @buf_size: size of buffer
  * @res_type: resource type
  * @res_shared: is resource shared
  * @desc_id: input - first desc ID to start; output - next desc ID
@@ -3659,9 +3875,8 @@ ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
  */
 enum ice_status
 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
-                    struct ice_aqc_get_allocd_res_desc_resp *buf,
-                    u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
-                    struct ice_sq_cd *cd)
+                    struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
+                    bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
 {
        struct ice_aqc_get_allocd_res_desc *cmd;
        struct ice_aq_desc desc;
@@ -3966,8 +4181,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
                 */
                if (v_list_itr->vsi_count > 1 &&
                    v_list_itr->vsi_list_info->ref_cnt > 1) {
-                       ice_debug(hw, ICE_DBG_SW,
-                                 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
+                       ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
                        status = ICE_ERR_CFG;
                        goto exit;
                }
@@ -5181,8 +5395,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
                ice_remove_eth_mac(hw, &remove_list_head);
                break;
        case ICE_SW_LKUP_DFLT:
-               ice_debug(hw, ICE_DBG_SW,
-                         "Remove filters for this lookup type hasn't been implemented yet\n");
+               ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
                break;
        case ICE_SW_LKUP_LAST:
                ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
@@ -5253,9 +5466,8 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
        u16 buf_len;
 
        /* Allocate resource */
-       buf_len = sizeof(*buf);
-       buf = (struct ice_aqc_alloc_free_res_elem *)
-               ice_malloc(hw, buf_len);
+       buf_len = ice_struct_size(buf, elem, 1);
+       buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
        if (!buf)
                return ICE_ERR_NO_MEMORY;
 
@@ -5292,9 +5504,8 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
        u16 buf_len;
 
        /* Free resource */
-       buf_len = sizeof(*buf);
-       buf = (struct ice_aqc_alloc_free_res_elem *)
-               ice_malloc(hw, buf_len);
+       buf_len = ice_struct_size(buf, elem, 1);
+       buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
        if (!buf)
                return ICE_ERR_NO_MEMORY;
 
@@ -5306,8 +5517,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
        status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
                                       ice_aqc_opc_free_res, NULL);
        if (status)
-               ice_debug(hw, ICE_DBG_SW,
-                         "counter resource could not be freed\n");
+               ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
 
        ice_free(hw, buf);
        return status;
@@ -5354,9 +5564,8 @@ ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
                return ICE_ERR_PARAM;
 
        /* Allocate resource for large action */
-       buf_len = sizeof(*sw_buf);
-       sw_buf = (struct ice_aqc_alloc_free_res_elem *)
-               ice_malloc(hw, buf_len);
+       buf_len = ice_struct_size(sw_buf, elem, 1);
+       sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
        if (!sw_buf)
                return ICE_ERR_NO_MEMORY;
 
@@ -5615,6 +5824,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
        { ICE_ESP,              { 0, 2, 4, 6 } },
        { ICE_AH,               { 0, 2, 4, 6, 8, 10 } },
        { ICE_NAT_T,            { 8, 10, 12, 14 } },
+       { ICE_GTP_NO_PAY,       { 8, 10, 12, 14 } },
 };
 
 /* The following table describes preferred grouping of recipes.
@@ -5647,6 +5857,7 @@ static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
        { ICE_ESP,              ICE_ESP_HW },
        { ICE_AH,               ICE_AH_HW },
        { ICE_NAT_T,            ICE_UDP_ILOS_HW },
+       { ICE_GTP_NO_PAY,       ICE_UDP_ILOS_HW },
 };
 
 /**
@@ -5774,7 +5985,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
                        lkup_exts->fv_words[word].prot_id =
                                ice_prot_id_tbl[rule->type].protocol_id;
                        lkup_exts->field_mask[word] =
-                               BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
+                               BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
                        word++;
                }
 
@@ -5938,7 +6149,6 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
        ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
        ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
        ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
-       u16 count = 0;
        u16 bit;
 
        ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
@@ -5946,47 +6156,33 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
        ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
        ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
 
-       for (count = 0; count < ICE_MAX_FV_WORDS; count++)
-               ice_set_bit(count, possible_idx);
+       ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
 
        /* For each profile we are going to associate the recipe with, add the
         * recipes that are associated with that profile. This will give us
         * the set of recipes that our recipe may collide with. Also, determine
         * what possible result indexes are usable given this set of profiles.
         */
-       bit = 0;
-       while (ICE_MAX_NUM_PROFILES >
-              (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
+       ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
                ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
                              ICE_MAX_NUM_RECIPES);
                ice_and_bitmap(possible_idx, possible_idx,
                               hw->switch_info->prof_res_bm[bit],
                               ICE_MAX_FV_WORDS);
-               bit++;
        }
 
        /* For each recipe that our new recipe may collide with, determine
         * which indexes have been used.
         */
-       for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
-               if (ice_is_bit_set(recipes, bit)) {
-                       ice_or_bitmap(used_idx, used_idx,
-                                     hw->switch_info->recp_list[bit].res_idxs,
-                                     ICE_MAX_FV_WORDS);
-               }
+       ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
+               ice_or_bitmap(used_idx, used_idx,
+                             hw->switch_info->recp_list[bit].res_idxs,
+                             ICE_MAX_FV_WORDS);
 
        ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
 
        /* return number of free indexes */
-       count = 0;
-       bit = 0;
-       while (ICE_MAX_FV_WORDS >
-              (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
-               count++;
-               bit++;
-       }
-
-       return count;
+       return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
 }
 
 /**
@@ -6100,8 +6296,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
                         * that can be used.
                         */
                        if (chain_idx >= ICE_MAX_FV_WORDS) {
-                               ice_debug(hw, ICE_DBG_SW,
-                                         "No chain index available\n");
+                               ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
                                status = ICE_ERR_MAX_LIMIT;
                                goto err_unroll;
                        }
@@ -6529,6 +6724,38 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
        case ICE_SW_IPV6_UDP:
                ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
                return;
+       case ICE_SW_TUN_IPV4_GTPU_IPV4:
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
+               return;
+       case ICE_SW_TUN_IPV6_GTPU_IPV4:
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
+               return;
+       case ICE_SW_TUN_IPV4_GTPU_IPV6:
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
+               ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
+               return;
+       case ICE_SW_TUN_IPV6_GTPU_IPV6:
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
+               ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
+               return;
        case ICE_SW_TUN_AND_NON_TUN:
        default:
                prof_type = ICE_PROF_ALL;
@@ -6666,18 +6893,17 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
        if (LIST_EMPTY(&rm->fv_list)) {
                u16 j;
 
-               for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
-                       if (ice_is_bit_set(fv_bitmap, j)) {
-                               struct ice_sw_fv_list_entry *fvl;
-
-                               fvl = (struct ice_sw_fv_list_entry *)
-                                       ice_malloc(hw, sizeof(*fvl));
-                               if (!fvl)
-                                       goto err_unroll;
-                               fvl->fv_ptr = NULL;
-                               fvl->profile_id = j;
-                               LIST_ADD(&fvl->list_entry, &rm->fv_list);
-                       }
+               ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
+                       struct ice_sw_fv_list_entry *fvl;
+
+                       fvl = (struct ice_sw_fv_list_entry *)
+                               ice_malloc(hw, sizeof(*fvl));
+                       if (!fvl)
+                               goto err_unroll;
+                       fvl->fv_ptr = NULL;
+                       fvl->profile_id = j;
+                       LIST_ADD(&fvl->list_entry, &rm->fv_list);
+               }
        }
 
        /* get bitmap of all profiles the recipe will be associated with */
@@ -6732,10 +6958,9 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
                              ICE_MAX_NUM_RECIPES);
 
                /* Update recipe to profile bitmap array */
-               for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
-                       if (ice_is_bit_set(r_bitmap, j))
-                               ice_set_bit((u16)fvit->profile_id,
-                                           recipe_to_profile[j]);
+               ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
+                       ice_set_bit((u16)fvit->profile_id,
+                                   recipe_to_profile[j]);
        }
 
        *rid = rm->root_rid;
@@ -6821,6 +7046,38 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
                        tcp = true;
        }
 
+       if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
+               *pkt = dummy_ipv4_gtpu_ipv4_packet;
+               *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
+               *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
+               return;
+       } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
+               *pkt = dummy_ipv6_gtpu_ipv6_packet;
+               *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
+               *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
+               return;
+       } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
+               *pkt = dummy_ipv4_gtpu_ipv4_packet;
+               *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
+               *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
+               return;
+       } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
+               *pkt = dummy_ipv4_gtpu_ipv6_packet;
+               *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
+               *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
+               return;
+       } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
+               *pkt = dummy_ipv6_gtpu_ipv4_packet;
+               *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
+               *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
+               return;
+       } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
+               *pkt = dummy_ipv6_gtpu_ipv6_packet;
+               *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
+               *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
+               return;
+       }
+
        if (tun_type == ICE_SW_TUN_IPV4_ESP) {
                *pkt = dummy_ipv4_esp_pkt;
                *pkt_len = sizeof(dummy_ipv4_esp_pkt);
@@ -7135,6 +7392,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
                        break;
 
                case ICE_GTP:
+               case ICE_GTP_NO_PAY:
                        len = sizeof(struct ice_udp_gtp_hdr);
                        break;
                case ICE_PPPOE:
@@ -7679,8 +7937,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
                 */
                status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
                if (status) {
-                       ice_debug(hw, ICE_DBG_SW,
-                                 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+                       ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
                                  tmp_fltr.fwd_id.hw_vsi_id, status);
                        return status;
                }
@@ -7689,8 +7946,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
                /* Remove the VSI list since it is no longer used */
                status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
                if (status) {
-                       ice_debug(hw, ICE_DBG_SW,
-                                 "Failed to remove VSI list %d, error %d\n",
+                       ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
                                  vsi_list_id, status);
                        return status;
                }
@@ -7858,13 +8114,12 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
  */
 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
 {
-       struct ice_adv_fltr_mgmt_list_entry *list_itr;
+       struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
        struct ice_vsi_list_map_info *map_info;
        struct LIST_HEAD_TYPE *list_head;
        struct ice_adv_rule_info rinfo;
        struct ice_switch_info *sw;
        enum ice_status status;
-       u16 vsi_list_id = 0;
        u8 rid;
 
        sw = hw->switch_info;
@@ -7873,22 +8128,31 @@ enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
                        continue;
                if (!sw->recp_list[rid].adv_rule)
                        continue;
+
                list_head = &sw->recp_list[rid].filt_rules;
-               map_info = NULL;
-               LIST_FOR_EACH_ENTRY(list_itr, list_head,
-                                   ice_adv_fltr_mgmt_list_entry, list_entry) {
-                       map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
-                                                          vsi_handle,
-                                                          &vsi_list_id);
-                       if (!map_info)
-                               continue;
+               LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
+                                        ice_adv_fltr_mgmt_list_entry,
+                                        list_entry) {
                        rinfo = list_itr->rule_info;
+
+                       if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
+                               map_info = list_itr->vsi_list_info;
+                               if (!map_info)
+                                       continue;
+
+                               if (!ice_is_bit_set(map_info->vsi_map,
+                                                   vsi_handle))
+                                       continue;
+                       } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
+                               continue;
+                       }
+
                        rinfo.sw_act.vsi_handle = vsi_handle;
                        status = ice_rem_adv_rule(hw, list_itr->lkups,
                                                  list_itr->lkups_cnt, &rinfo);
+
                        if (status)
                                return status;
-                       map_info = NULL;
                }
        }
        return ICE_SUCCESS;
@@ -7925,6 +8189,7 @@ ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
        LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
                            list_entry) {
                struct ice_fltr_list_entry f_entry;
+               u16 vsi_handle;
 
                f_entry.fltr_info = itr->fltr_info;
                if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
@@ -7936,12 +8201,8 @@ ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
                }
 
                /* Add a filter per VSI separately */
-               while (1) {
-                       u16 vsi_handle;
-
-                       vsi_handle =
-                               ice_find_first_bit(itr->vsi_list_info->vsi_map,
-                                                  ICE_MAX_VSI);
+               ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
+                                    ICE_MAX_VSI) {
                        if (!ice_is_vsi_valid(hw, vsi_handle))
                                break;