net/ice/base: fix RSS interference
[dpdk.git] / drivers / net / ice / base / ice_flex_pipe.c
index c874246..999ad6b 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2019
+ * Copyright(c) 2001-2020 Intel Corporation
  */
 
 #include "ice_common.h"
@@ -7,19 +7,12 @@
 #include "ice_protocol_type.h"
 #include "ice_flow.h"
 
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
 static const struct ice_tunnel_type_scan tnls[] = {
-       { TNL_VXLAN,            "TNL_VXLAN" },
-       { TNL_GTPC,             "TNL_GTPC" },
-       { TNL_GTPC_TEID,        "TNL_GTPC_TEID" },
-       { TNL_GTPU,             "TNL_GTPC" },
-       { TNL_GTPU_TEID,        "TNL_GTPU_TEID" },
-       { TNL_VXLAN_GPE,        "TNL_VXLAN_GPE" },
-       { TNL_GENEVE,           "TNL_GENEVE" },
-       { TNL_NAT,              "TNL_NAT" },
-       { TNL_ROCE_V2,          "TNL_ROCE_V2" },
-       { TNL_MPLSO_UDP,        "TNL_MPLSO_UDP" },
-       { TNL_UDP2_END,         "TNL_UDP2_END" },
-       { TNL_UPD_END,          "TNL_UPD_END" },
+       { TNL_VXLAN,            "TNL_VXLAN_PF" },
+       { TNL_GENEVE,           "TNL_GENEVE_PF" },
        { TNL_LAST,             "" }
 };
 
@@ -138,10 +131,11 @@ static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
 {
        struct ice_nvm_table *nvms;
 
-       nvms = (struct ice_nvm_table *)(ice_seg->device_table +
-               LE32_TO_CPU(ice_seg->device_table_count));
+       nvms = (struct ice_nvm_table *)
+               (ice_seg->device_table +
+                LE32_TO_CPU(ice_seg->device_table_count));
 
-       return (struct ice_buf_table *)
+       return (_FORCE_ struct ice_buf_table *)
                (nvms->vers + LE32_TO_CPU(nvms->table_count));
 }
 
@@ -405,7 +399,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
  * Handles enumeration of individual label entries.
  */
 static void *
-ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
+ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
                       u32 *offset)
 {
        struct ice_label_section *labels;
@@ -468,7 +462,7 @@ ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
  * since the first call to ice_enum_labels requires a pointer to an actual
  * ice_seg structure.
  */
-void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
 {
        struct ice_pkg_enum state;
        char *label_name;
@@ -476,6 +470,7 @@ void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
        int i;
 
        ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
+       ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
 
        if (!ice_seg)
                return;
@@ -485,8 +480,17 @@ void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
 
        while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
                for (i = 0; tnls[i].type != TNL_LAST; i++) {
-                       if (!strncmp(label_name, tnls[i].label_prefix,
-                                    strlen(tnls[i].label_prefix))) {
+                       size_t len = strlen(tnls[i].label_prefix);
+
+                       /* Look for matching label start, before continuing */
+                       if (strncmp(label_name, tnls[i].label_prefix, len))
+                               continue;
+
+                       /* Make sure this label matches our PF. Note that the PF
+                        * character ('0' - '7') will be located where our
+                        * prefix string's null terminator is located.
+                        */
+                       if ((label_name[len] - '0') == hw->pf_id) {
                                hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
                                hw->tnl.tbl[hw->tnl.count].valid = false;
                                hw->tnl.tbl[hw->tnl.count].in_use = false;
@@ -606,7 +610,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
 {
        u16 count = 0;
-       u16 i, j;
+       u16 i;
 
        /* check each byte */
        for (i = 0; i < size; i++) {
@@ -622,11 +626,9 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
                        return false;
 
                /* count the bits in this byte, checking threshold */
-               for (j = 0; j < BITS_PER_BYTE; j++) {
-                       count += (mask[i] & (0x1 << j)) ? 1 : 0;
-                       if (count > max)
-                               return false;
-               }
+               count += ice_hweight8(mask[i]);
+               if (count > max)
+                       return false;
        }
 
        return true;
@@ -638,7 +640,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
  * @size: the size of the complete key in bytes (must be even)
  * @val: array of 8-bit values that makes up the value portion of the key
  * @upd: array of 8-bit masks that determine what key portion to update
- * @dc: array of 8-bit masks that make up the dont' care mask
+ * @dc: array of 8-bit masks that make up the don't care mask
  * @nm: array of 8-bit masks that make up the never match mask
  * @off: the offset of the first byte in the key to update
  * @len: the number of bytes in the key update
@@ -646,7 +648,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
  * This function generates a key from a value, a don't care mask and a never
  * match mask.
  * upd, dc, and nm are optional parameters, and can be NULL:
- *     upd == NULL --> udp mask is all 1's (update all bits)
+ *     upd == NULL --> upd mask is all 1's (update all bits)
  *     dc == NULL --> dc mask is all 0's (no don't care bits)
  *     nm == NULL --> nm mask is all 0's (no never match bits)
  */
@@ -704,7 +706,7 @@ ice_acquire_global_cfg_lock(struct ice_hw *hw,
 {
        enum ice_status status;
 
-       ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_global_cfg_lock");
+       ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 
        status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
                                 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
@@ -734,10 +736,10 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
  *
  * This function will request ownership of the change lock.
  */
-static enum ice_status
+enum ice_status
 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
 {
-       ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_change_lock");
+       ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 
        return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
                               ICE_CHANGE_LOCK_TIMEOUT);
@@ -749,9 +751,9 @@ ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
  *
  * This function will release the change lock using the proper Admin Command.
  */
-static void ice_release_change_lock(struct ice_hw *hw)
+void ice_release_change_lock(struct ice_hw *hw)
 {
-       ice_debug(hw, ICE_DBG_TRACE, "ice_release_change_lock");
+       ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 
        ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
 }
@@ -777,7 +779,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
        struct ice_aq_desc desc;
        enum ice_status status;
 
-       ice_debug(hw, ICE_DBG_TRACE, "ice_aq_download_pkg");
+       ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 
        if (error_offset)
                *error_offset = 0;
@@ -806,28 +808,6 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
        return status;
 }
 
-/**
- * ice_aq_upload_section
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer which will receive the section
- * @buf_size: the size of the package buffer
- * @cd: pointer to command details structure or NULL
- *
- * Upload Section (0x0C41)
- */
-enum ice_status
-ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
-                     u16 buf_size, struct ice_sq_cd *cd)
-{
-       struct ice_aq_desc desc;
-
-       ice_debug(hw, ICE_DBG_TRACE, "ice_aq_upload_section");
-       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
-       desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-
-       return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
-}
-
 /**
  * ice_aq_update_pkg
  * @hw: pointer to the hardware structure
@@ -849,7 +829,7 @@ ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
        struct ice_aq_desc desc;
        enum ice_status status;
 
-       ice_debug(hw, ICE_DBG_TRACE, "ice_aq_update_pkg");
+       ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 
        if (error_offset)
                *error_offset = 0;
@@ -888,16 +868,17 @@ ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
  * success it returns a pointer to the segment header, otherwise it will
  * return NULL.
  */
-struct ice_generic_seg_hdr *
+static struct ice_generic_seg_hdr *
 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
                    struct ice_pkg_hdr *pkg_hdr)
 {
        u32 i;
 
        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-       ice_debug(hw, ICE_DBG_PKG, "Package version: %d.%d.%d.%d\n",
-                 pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
-                 pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
+       ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
+                 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+                 pkg_hdr->pkg_format_ver.update,
+                 pkg_hdr->pkg_format_ver.draft);
 
        /* Search all package segments for the requested segment type */
        for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
@@ -932,9 +913,8 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
                return status;
 
        for (i = 0; i < count; i++) {
-               bool last = ((i + 1) == count);
-
                struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+               bool last = ((i + 1) == count);
 
                status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
                                           last, &offset, &info, NULL);
@@ -980,9 +960,19 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
        if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
                return ICE_SUCCESS;
 
+       /* reset pkg_dwnld_status in case this function is called in the
+        * reset/rebuild flow
+        */
+       hw->pkg_dwnld_status = ICE_AQ_RC_OK;
+
        status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
-       if (status)
+       if (status) {
+               if (status == ICE_ERR_AQ_NO_WORK)
+                       hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
+               else
+                       hw->pkg_dwnld_status = hw->adminq.sq_last_status;
                return status;
+       }
 
        for (i = 0; i < count; i++) {
                bool last = ((i + 1) == count);
@@ -1003,9 +993,11 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
 
                bh = (struct ice_buf_hdr *)(bufs + i);
 
-               status = ice_aq_download_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
-                                            last, &offset, &info, NULL);
+               status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+                                            &offset, &info, NULL);
 
+               /* Save AQ status from download package */
+               hw->pkg_dwnld_status = hw->adminq.sq_last_status;
                if (status) {
                        ice_debug(hw, ICE_DBG_PKG,
                                  "Pkg download failed: err %d off %d inf %d\n",
@@ -1038,7 +1030,7 @@ ice_aq_get_pkg_info_list(struct ice_hw *hw,
 {
        struct ice_aq_desc desc;
 
-       ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_pkg_info_list");
+       ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
 
        return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
@@ -1051,18 +1043,21 @@ ice_aq_get_pkg_info_list(struct ice_hw *hw,
  *
  * Handles the download of a complete package.
  */
-enum ice_status ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
+static enum ice_status
+ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
 {
        struct ice_buf_table *ice_buf_tbl;
 
        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-       ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
-                 ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
-                 ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
+       ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+                 ice_seg->hdr.seg_format_ver.major,
+                 ice_seg->hdr.seg_format_ver.minor,
+                 ice_seg->hdr.seg_format_ver.update,
+                 ice_seg->hdr.seg_format_ver.draft);
 
        ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
                  LE32_TO_CPU(ice_seg->hdr.seg_type),
-                 LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
+                 LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
 
        ice_buf_tbl = ice_find_buf_table(ice_seg);
 
@@ -1080,15 +1075,11 @@ enum ice_status ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
  *
  * Saves off the package details into the HW structure.
  */
-enum ice_status
+static enum ice_status
 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
 {
-       struct ice_aqc_get_pkg_info_resp *pkg_info;
        struct ice_global_metadata_seg *meta_seg;
        struct ice_generic_seg_hdr *seg_hdr;
-       enum ice_status status;
-       u16 size;
-       u32 i;
 
        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
        if (!pkg_hdr)
@@ -1113,23 +1104,41 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
 
        seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
        if (seg_hdr) {
-               hw->ice_pkg_ver = seg_hdr->seg_ver;
-               ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
+               hw->ice_pkg_ver = seg_hdr->seg_format_ver;
+               ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
                           sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA);
 
-               ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
-                         seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
-                         seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
-                         seg_hdr->seg_name);
+               ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+                         seg_hdr->seg_format_ver.major,
+                         seg_hdr->seg_format_ver.minor,
+                         seg_hdr->seg_format_ver.update,
+                         seg_hdr->seg_format_ver.draft,
+                         seg_hdr->seg_id);
        } else {
                ice_debug(hw, ICE_DBG_INIT,
                          "Did not find ice segment in driver package\n");
                return ICE_ERR_CFG;
        }
 
-#define ICE_PKG_CNT    4
-       size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
-                                   (ICE_PKG_CNT - 1));
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_pkg_info
+ * @hw: pointer to the hardware structure
+ *
+ * Store details of the package currently loaded in HW into the HW structure.
+ */
+static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
+{
+       struct ice_aqc_get_pkg_info_resp *pkg_info;
+       enum ice_status status;
+       u16 size;
+       u32 i;
+
+       ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+       size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT - 1);
        pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
        if (!pkg_info)
                return ICE_ERR_NO_MEMORY;
@@ -1146,10 +1155,13 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
                if (pkg_info->pkg_info[i].is_active) {
                        flags[place++] = 'A';
                        hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+                       hw->active_track_id =
+                               LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
                        ice_memcpy(hw->active_pkg_name,
                                   pkg_info->pkg_info[i].name,
-                                  sizeof(hw->active_pkg_name),
+                                  sizeof(pkg_info->pkg_info[i].name),
                                   ICE_NONDMA_TO_NONDMA);
+                       hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
                }
                if (pkg_info->pkg_info[i].is_active_at_boot)
                        flags[place++] = 'B';
@@ -1172,41 +1184,6 @@ init_pkg_free_alloc:
        return status;
 }
 
-/**
- * ice_find_label_value
- * @ice_seg: pointer to the ice segment (non-NULL)
- * @name: name of the label to search for
- * @type: the section type that will contain the label
- * @value: pointer to a value that will return the label's value if found
- *
- * Finds a label's value given the label name and the section type to search.
- * The ice_seg parameter must not be NULL since the first call to
- * ice_enum_labels requires a pointer to an actual ice_seg structure.
- */
-enum ice_status
-ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
-                    u16 *value)
-{
-       struct ice_pkg_enum state;
-       char *label_name;
-       u16 val;
-
-       if (!ice_seg)
-               return ICE_ERR_PARAM;
-
-       do {
-               label_name = ice_enum_labels(ice_seg, type, &state, &val);
-               if (label_name && !strcmp(label_name, name)) {
-                       *value = val;
-                       return ICE_SUCCESS;
-               }
-
-               ice_seg = NULL;
-       } while (label_name);
-
-       return ICE_ERR_CFG;
-}
-
 /**
  * ice_verify_pkg - verify package
  * @pkg: pointer to the package buffer
@@ -1223,10 +1200,10 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
        if (len < sizeof(*pkg))
                return ICE_ERR_BUF_TOO_SHORT;
 
-       if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
-           pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
-           pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
-           pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
+       if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+           pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+           pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+           pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
                return ICE_ERR_CFG;
 
        /* pkg must have at least one segment */
@@ -1235,7 +1212,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
                return ICE_ERR_CFG;
 
        /* make sure segment array fits in package length */
-       if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
+       if (len < ice_struct_size(pkg, seg_offset, seg_count - 1))
                return ICE_ERR_BUF_TOO_SHORT;
 
        /* all segments must fit within length */
@@ -1269,6 +1246,7 @@ void ice_free_seg(struct ice_hw *hw)
        if (hw->pkg_copy) {
                ice_free(hw, hw->pkg_copy);
                hw->pkg_copy = NULL;
+               hw->pkg_size = 0;
        }
        hw->seg = NULL;
 }
@@ -1282,12 +1260,96 @@ static void ice_init_pkg_regs(struct ice_hw *hw)
 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
 #define ICE_SW_BLK_IDX 0
+       if (hw->dcf_enabled)
+               return;
 
        /* setup Switch block input mask, which is 48-bits in two parts */
        wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
        wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
 }
 
+/**
+ * ice_chk_pkg_version - check package version for compatibility with driver
+ * @pkg_ver: pointer to a version structure to check
+ *
+ * Check to make sure that the package about to be downloaded is compatible with
+ * the driver. To be compatible, the major and minor components of the package
+ * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
+ * definitions.
+ */
+static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+{
+       if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
+           pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
+               return ICE_ERR_NOT_SUPPORTED;
+
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_status
+ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
+                  struct ice_seg **seg)
+{
+       struct ice_aqc_get_pkg_info_resp *pkg;
+       enum ice_status status;
+       u16 size;
+       u32 i;
+
+       ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+       /* Check package version compatibility */
+       status = ice_chk_pkg_version(&hw->pkg_ver);
+       if (status) {
+               ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+               return status;
+       }
+
+       /* find ICE segment in given package */
+       *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+                                                    ospkg);
+       if (!*seg) {
+               ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+               return ICE_ERR_CFG;
+       }
+
+       /* Check if FW is compatible with the OS package */
+       size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+       pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+       if (!pkg)
+               return ICE_ERR_NO_MEMORY;
+
+       status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
+       if (status)
+               goto fw_ddp_compat_free_alloc;
+
+       for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
+               /* loop till we find the NVM package */
+               if (!pkg->pkg_info[i].is_in_nvm)
+                       continue;
+               if ((*seg)->hdr.seg_format_ver.major !=
+                       pkg->pkg_info[i].ver.major ||
+                   (*seg)->hdr.seg_format_ver.minor >
+                       pkg->pkg_info[i].ver.minor) {
+                       status = ICE_ERR_FW_DDP_MISMATCH;
+                       ice_debug(hw, ICE_DBG_INIT,
+                                 "OS package is not compatible with NVM.\n");
+               }
+               /* done processing NVM package so break */
+               break;
+       }
+fw_ddp_compat_free_alloc:
+       ice_free(hw, pkg);
+       return status;
+}
+
 /**
  * ice_init_pkg - initialize/download package
  * @hw: pointer to the hardware structure
@@ -1313,7 +1375,7 @@ static void ice_init_pkg_regs(struct ice_hw *hw)
  * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
  * case.
  */
-static enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
 {
        struct ice_pkg_hdr *pkg;
        enum ice_status status;
@@ -1335,12 +1397,12 @@ static enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
        if (status)
                return status;
 
-       /* find segment in given package */
-       seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
-       if (!seg) {
-               ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
-               return ICE_ERR_CFG;
-       }
+       /* before downloading the package, check package version for
+        * compatibility with driver
+        */
+       status = ice_chk_pkg_compat(hw, pkg, &seg);
+       if (status)
+               return status;
 
        /* initialize package hints and then download package */
        ice_init_pkg_hints(hw, seg);
@@ -1351,14 +1413,23 @@ static enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
                status = ICE_SUCCESS;
        }
 
-       /* Free a previous segment, if necessary */
-       ice_free_seg(hw);
+       /* Get information on the package currently loaded in HW, then make sure
+        * the driver is compatible with this version.
+        */
+       if (!status) {
+               status = ice_get_pkg_info(hw);
+               if (!status)
+                       status = ice_chk_pkg_version(&hw->active_pkg_ver);
+       }
+
        if (!status) {
                hw->seg = seg;
-               /* on successful package download, update other required
-                * registers to support the package
+               /* on successful package download update other required
+                * registers to support the package and fill HW tables
+                * with package content.
                 */
                ice_init_pkg_regs(hw);
+               ice_fill_blk_tbls(hw);
        } else {
                ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
                          status);
@@ -1401,12 +1472,14 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
        buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
 
        status = ice_init_pkg(hw, buf_copy, len);
-       if (status)
+       if (status) {
                /* Free the copy, since we failed to initialize the package */
                ice_free(hw, buf_copy);
-       else
+       } else {
                /* Track the copied pkg so we can free it later */
                hw->pkg_copy = buf_copy;
+               hw->pkg_size = len;
+       }
 
        return status;
 }
@@ -1418,7 +1491,7 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
  * Allocates a package buffer and returns a pointer to the buffer header.
  * Note: all package contents must be in Little Endian form.
  */
-struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
 {
        struct ice_buf_build *bld;
        struct ice_buf_hdr *buf;
@@ -1428,8 +1501,8 @@ struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
                return NULL;
 
        buf = (struct ice_buf_hdr *)bld;
-       buf->data_end = CPU_TO_LE16(sizeof(*buf) -
-                                   sizeof(buf->section_entry[0]));
+       buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
+                                            section_entry));
        return bld;
 }
 
@@ -1465,11 +1538,86 @@ ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
        return fv_section->fv + index;
 }
 
+/**
+ * ice_get_sw_prof_type - determine switch profile type
+ * @hw: pointer to the HW structure
+ * @fv: pointer to the switch field vector
+ */
+static enum ice_prof_type
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+{
+       u16 i;
+
+       for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+               /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+               if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+                   fv->ew[i].off == ICE_VNI_OFFSET)
+                       return ICE_PROF_TUN_UDP;
+
+               /* GRE tunnel will have GRE protocol */
+               if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
+                       return ICE_PROF_TUN_GRE;
+
+               /* PPPOE tunnel will have PPPOE protocol */
+               if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
+                       return ICE_PROF_TUN_PPPOE;
+       }
+
+       return ICE_PROF_NON_TUN;
+}
+
+/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+                    ice_bitmap_t *bm)
+{
+       struct ice_pkg_enum state;
+       struct ice_seg *ice_seg;
+       struct ice_fv *fv;
+
+       ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+       if (req_profs == ICE_PROF_ALL) {
+               u16 i;
+
+               for (i = 0; i < ICE_MAX_NUM_PROFILES; i++)
+                       ice_set_bit(i, bm);
+               return;
+       }
+
+       ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
+
+       ice_seg = hw->seg;
+       do {
+               enum ice_prof_type prof_type;
+               u32 offset;
+
+               fv = (struct ice_fv *)
+                       ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+                                          &offset, ice_sw_fv_handler);
+               ice_seg = NULL;
+
+               if (fv) {
+                       /* Determine field vector type */
+                       prof_type = ice_get_sw_prof_type(hw, fv);
+
+                       if (req_profs & prof_type)
+                               ice_set_bit((u16)offset, bm);
+               }
+       } while (fv);
+}
+
 /**
  * ice_get_sw_fv_list
  * @hw: pointer to the HW structure
  * @prot_ids: field vector to search for with a given protocol ID
  * @ids_cnt: lookup/protocol count
+ * @bm: bitmap of field vectors to consider
  * @fv_list: Head of a list
  *
  * Finds all the field vector entries from switch block that contain
@@ -1480,8 +1628,8 @@ ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
  * allocated for every list entry.
  */
 enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt,
-                  struct LIST_HEAD_TYPE *fv_list)
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+                  ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
 {
        struct ice_sw_fv_list_entry *fvl;
        struct ice_sw_fv_list_entry *tmp;
@@ -1490,18 +1638,29 @@ ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt,
        struct ice_fv *fv;
        u32 offset;
 
+       ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
        if (!ids_cnt || !hw->seg)
                return ICE_ERR_PARAM;
 
        ice_seg = hw->seg;
        do {
-               u8 i;
+               u16 i;
 
                fv = (struct ice_fv *)
                        ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
                                           &offset, ice_sw_fv_handler);
+               if (!fv)
+                       break;
+               ice_seg = NULL;
 
-               for (i = 0; i < ids_cnt && fv; i++) {
+               /* If field vector is not in the bitmap list, then skip this
+                * profile.
+                */
+               if (!ice_is_bit_set(bm, (u16)offset))
+                       continue;
+
+               for (i = 0; i < ids_cnt; i++) {
                        int j;
 
                        /* This code assumes that if a switch field vector line
@@ -1525,7 +1684,6 @@ ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt,
                                break;
                        }
                }
-               ice_seg = NULL;
        } while (fv);
        if (LIST_EMPTY(fv_list))
                return ICE_ERR_CFG;
@@ -1542,40 +1700,57 @@ err:
 }
 
 /**
- * ice_pkg_buf_alloc_single_section
- * @hw: pointer to the HW structure
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- * @section: returns pointer to the section
- *
- * Allocates a package buffer with a single section.
- * Note: all package contents must be in Little Endian form.
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
  */
-static struct ice_buf_build *
-ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
-                                void **section)
+void ice_init_prof_result_bm(struct ice_hw *hw)
 {
-       struct ice_buf_build *buf;
+       struct ice_pkg_enum state;
+       struct ice_seg *ice_seg;
+       struct ice_fv *fv;
 
-       if (!section)
-               return NULL;
+       ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
 
-       buf = ice_pkg_buf_alloc(hw);
-       if (!buf)
-               return NULL;
+       if (!hw->seg)
+               return;
+
+       ice_seg = hw->seg;
+       do {
+               u32 off;
+               u16 i;
 
-       if (ice_pkg_buf_reserve_section(buf, 1))
-               goto ice_pkg_buf_alloc_single_section_err;
+               fv = (struct ice_fv *)
+                       ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+                                          &off, ice_sw_fv_handler);
+               ice_seg = NULL;
+               if (!fv)
+                       break;
 
-       *section = ice_pkg_buf_alloc_section(buf, type, size);
-       if (!*section)
-               goto ice_pkg_buf_alloc_single_section_err;
+               ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
+                               ICE_MAX_FV_WORDS);
 
-       return buf;
+               /* Determine empty field vector indices, these can be
+                * used for recipe results. Skip index 0, since it is
+                * always used for Switch ID.
+                */
+               for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+                       if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+                           fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+                               ice_set_bit(i,
+                                           hw->switch_info->prof_res_bm[off]);
+       } while (fv);
+}
 
-ice_pkg_buf_alloc_single_section_err:
-       ice_pkg_buf_free(hw, buf);
-       return NULL;
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+       ice_free(hw, bld);
 }
 
 /**
@@ -1591,7 +1766,7 @@ ice_pkg_buf_alloc_single_section_err:
  * result in some wasted space in the buffer.
  * Note: all package contents must be in Little Endian form.
  */
-enum ice_status
+static enum ice_status
 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
 {
        struct ice_buf_hdr *buf;
@@ -1620,79 +1795,37 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
 }
 
 /**
- * ice_pkg_buf_unreserve_section
+ * ice_pkg_buf_alloc_section
  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @count: the number of sections to unreserve
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
  *
- * Unreserves one or more section table entries in a package buffer, releasing
- * space that can be used for section data. This routine can be called
- * multiple times as long as they are made before calling
- * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
- * is called once, the number of sections that can be allocated will not be able
- * to be increased; not using all reserved sections is fine, but this will
- * result in some wasted space in the buffer.
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
  * Note: all package contents must be in Little Endian form.
  */
-enum ice_status
-ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
+static void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
 {
        struct ice_buf_hdr *buf;
-       u16 section_count;
+       u16 sect_count;
        u16 data_end;
 
-       if (!bld)
-               return ICE_ERR_PARAM;
+       if (!bld || !type || !size)
+               return NULL;
 
        buf = (struct ice_buf_hdr *)&bld->buf;
 
-       /* already an active section, can't decrease table size */
-       section_count = LE16_TO_CPU(buf->section_count);
-       if (section_count > 0)
-               return ICE_ERR_CFG;
+       /* check for enough space left in buffer */
+       data_end = LE16_TO_CPU(buf->data_end);
 
-       if (count > bld->reserved_section_table_entries)
-               return ICE_ERR_CFG;
-       bld->reserved_section_table_entries -= count;
+       /* section start must align on 4 byte boundary */
+       data_end = ICE_ALIGN(data_end, 4);
 
-       data_end = LE16_TO_CPU(buf->data_end) -
-                  (count * sizeof(buf->section_entry[0]));
-       buf->data_end = CPU_TO_LE16(data_end);
-
-       return ICE_SUCCESS;
-}
-
-/**
- * ice_pkg_buf_alloc_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- *
- * Reserves memory in the buffer for a section's content and updates the
- * buffers' status accordingly. This routine returns a pointer to the first
- * byte of the section start within the buffer, which is used to fill in the
- * section contents.
- * Note: all package contents must be in Little Endian form.
- */
-void *
-ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
-{
-       struct ice_buf_hdr *buf;
-       u16 sect_count;
-       u16 data_end;
-
-       if (!bld || !type || !size)
-               return NULL;
-
-       buf = (struct ice_buf_hdr *)&bld->buf;
-
-       /* check for enough space left in buffer */
-       data_end = LE16_TO_CPU(buf->data_end);
-
-       /* section start must align on 4 byte boundary */
-       data_end = ICE_ALIGN(data_end, 4);
-
-       if ((data_end + size) > ICE_MAX_S_DATA_END)
-               return NULL;
+       if ((data_end + size) > ICE_MAX_S_DATA_END)
+               return NULL;
 
        /* check for more available section table entries */
        sect_count = LE16_TO_CPU(buf->section_count);
@@ -1714,24 +1847,6 @@ ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
        return NULL;
 }
 
-/**
- * ice_pkg_buf_get_free_space
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Returns the number of free bytes remaining in the buffer.
- * Note: all package contents must be in Little Endian form.
- */
-u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
-{
-       struct ice_buf_hdr *buf;
-
-       if (!bld)
-               return 0;
-
-       buf = (struct ice_buf_hdr *)&bld->buf;
-       return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
-}
-
 /**
  * ice_pkg_buf_get_active_sections
  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
@@ -1742,7 +1857,7 @@ u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
  * not be used.
  * Note: all package contents must be in Little Endian form.
  */
-u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
 {
        struct ice_buf_hdr *buf;
 
@@ -1754,12 +1869,12 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
 }
 
 /**
- * ice_pkg_buf_header
+ * ice_pkg_buf
  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
  *
  * Return a pointer to the buffer's header
  */
-struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
 {
        if (!bld)
                return NULL;
@@ -1768,52 +1883,351 @@ struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
 }
 
 /**
- * ice_pkg_buf_free
+ * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
  * @hw: pointer to the HW structure
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @port: port to search for
+ * @index: optionally returns index
  *
- * Frees a package buffer
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
  */
-void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
 {
-       ice_free(hw, bld);
+       u16 i;
+
+       for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+               if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+                       if (index)
+                               *index = i;
+                       return true;
+               }
+
+       return false;
 }
 
-/* PTG Management */
+/**
+ * ice_tunnel_port_in_use
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
+{
+       bool res;
+
+       ice_acquire_lock(&hw->tnl_lock);
+       res = ice_tunnel_port_in_use_hlpr(hw, port, index);
+       ice_release_lock(&hw->tnl_lock);
+
+       return res;
+}
 
 /**
- * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table
- * @hw: pointer to the hardware structure
- * @blk: HW block
+ * ice_tunnel_get_type
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @type: returns tunnel index
+ *
+ * For a given port number, will return the type of tunnel.
+ */
+bool
+ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
+{
+       bool res = false;
+       u16 i;
+
+       ice_acquire_lock(&hw->tnl_lock);
+
+       for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+               if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+                       *type = hw->tnl.tbl[i].type;
+                       res = true;
+                       break;
+               }
+
+       ice_release_lock(&hw->tnl_lock);
+
+       return res;
+}
+
+/**
+ * ice_find_free_tunnel_entry
+ * @hw: pointer to the HW structure
+ * @type: tunnel type
+ * @index: optionally returns index
+ *
+ * Returns whether there is a free tunnel entry, and optionally its index
+ */
+static bool
+ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+                          u16 *index)
+{
+       u16 i;
+
+       for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+               if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
+                   hw->tnl.tbl[i].type == type) {
+                       if (index)
+                               *index = i;
+                       return true;
+               }
+
+       return false;
+}
+
+/**
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
+ * @hw: pointer to the HW structure
+ * @type: tunnel type (TNL_ALL will return any open port)
+ * @port: returns open port
+ */
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+                        u16 *port)
+{
+       bool res = false;
+       u16 i;
+
+       ice_acquire_lock(&hw->tnl_lock);
+
+       for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+               if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+                   (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
+                       *port = hw->tnl.tbl[i].port;
+                       res = true;
+                       break;
+               }
+
+       ice_release_lock(&hw->tnl_lock);
+
+       return res;
+}
+
+/**
+ * ice_create_tunnel
+ * @hw: pointer to the HW structure
+ * @type: type of tunnel
+ * @port: port of tunnel to create
  *
- * This function will update the XLT1 hardware table to reflect the new
- * packet type group configuration.
+ * Create a tunnel by updating the parse graph in the parser. We do that by
+ * creating a package buffer with the tunnel info and issuing an update package
+ * command.
  */
-enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
 {
-       struct ice_xlt1_section *sect;
+       struct ice_boost_tcam_section *sect_rx, *sect_tx;
+       enum ice_status status = ICE_ERR_MAX_LIMIT;
        struct ice_buf_build *bld;
-       enum ice_status status;
        u16 index;
 
-       bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
-                                              ICE_XLT1_SIZE(ICE_XLT1_CNT),
-                                              (void **)&sect);
-       if (!bld)
-               return ICE_ERR_NO_MEMORY;
+       ice_acquire_lock(&hw->tnl_lock);
+
+       if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
+               hw->tnl.tbl[index].ref++;
+               status = ICE_SUCCESS;
+               goto ice_create_tunnel_end;
+       }
+
+       if (!ice_find_free_tunnel_entry(hw, type, &index)) {
+               status = ICE_ERR_OUT_OF_RANGE;
+               goto ice_create_tunnel_end;
+       }
+
+       bld = ice_pkg_buf_alloc(hw);
+       if (!bld) {
+               status = ICE_ERR_NO_MEMORY;
+               goto ice_create_tunnel_end;
+       }
+
+       /* allocate 2 sections, one for Rx parser, one for Tx parser */
+       if (ice_pkg_buf_reserve_section(bld, 2))
+               goto ice_create_tunnel_err;
+
+       sect_rx = (struct ice_boost_tcam_section *)
+               ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+                                         sizeof(*sect_rx));
+       if (!sect_rx)
+               goto ice_create_tunnel_err;
+       sect_rx->count = CPU_TO_LE16(1);
+
+       sect_tx = (struct ice_boost_tcam_section *)
+               ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+                                         sizeof(*sect_tx));
+       if (!sect_tx)
+               goto ice_create_tunnel_err;
+       sect_tx->count = CPU_TO_LE16(1);
+
+       /* copy original boost entry to update package buffer */
+       ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+                  sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
+
+       /* over-write the never-match dest port key bits with the encoded port
+        * bits
+        */
+       ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+                   (u8 *)&port, NULL, NULL, NULL,
+                   (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
+                   sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
+
+       /* exact copy of entry to Tx section entry */
+       ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
+                  ICE_NONDMA_TO_NONDMA);
+
+       status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+       if (!status) {
+               hw->tnl.tbl[index].port = port;
+               hw->tnl.tbl[index].in_use = true;
+               hw->tnl.tbl[index].ref = 1;
+       }
+
+ice_create_tunnel_err:
+       ice_pkg_buf_free(hw, bld);
+
+ice_create_tunnel_end:
+       ice_release_lock(&hw->tnl_lock);
+
+       return status;
+}
+
+/**
+ * ice_destroy_tunnel
+ * @hw: pointer to the HW structure
+ * @port: port of tunnel to destroy (ignored if the all parameter is true)
+ * @all: flag that states to destroy all tunnels
+ *
+ * Destroys a tunnel or all tunnels by creating an update package buffer
+ * targeting the specific updates requested and then performing an update
+ * package.
+ */
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+{
+       struct ice_boost_tcam_section *sect_rx, *sect_tx;
+       enum ice_status status = ICE_ERR_MAX_LIMIT;
+       struct ice_buf_build *bld;
+       u16 count = 0;
+       u16 index;
+       u16 size;
+       u16 i;
+
+       ice_acquire_lock(&hw->tnl_lock);
+
+       if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
+               if (hw->tnl.tbl[index].ref > 1) {
+                       hw->tnl.tbl[index].ref--;
+                       status = ICE_SUCCESS;
+                       goto ice_destroy_tunnel_end;
+               }
+
+       /* determine count */
+       for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+               if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+                   (all || hw->tnl.tbl[i].port == port))
+                       count++;
 
-       sect->count = CPU_TO_LE16(ICE_XLT1_CNT);
-       sect->offset = CPU_TO_LE16(0);
-       for (index = 0; index < ICE_XLT1_CNT; index++)
-               sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg;
+       if (!count) {
+               status = ICE_ERR_PARAM;
+               goto ice_destroy_tunnel_end;
+       }
+
+       /* size of section - there is at least one entry */
+       size = ice_struct_size(sect_rx, tcam, count - 1);
+
+       bld = ice_pkg_buf_alloc(hw);
+       if (!bld) {
+               status = ICE_ERR_NO_MEMORY;
+               goto ice_destroy_tunnel_end;
+       }
+
+       /* allocate 2 sections, one for Rx parser, one for Tx parser */
+       if (ice_pkg_buf_reserve_section(bld, 2))
+               goto ice_destroy_tunnel_err;
+
+       sect_rx = (struct ice_boost_tcam_section *)
+               ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+                                         size);
+       if (!sect_rx)
+               goto ice_destroy_tunnel_err;
+       sect_rx->count = CPU_TO_LE16(1);
+
+       sect_tx = (struct ice_boost_tcam_section *)
+               ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+                                         size);
+       if (!sect_tx)
+               goto ice_destroy_tunnel_err;
+       sect_tx->count = CPU_TO_LE16(1);
+
+       /* copy original boost entry to update package buffer, one copy to Rx
+        * section, another copy to the Tx section
+        */
+       for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+               if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+                   (all || hw->tnl.tbl[i].port == port)) {
+                       ice_memcpy(sect_rx->tcam + i,
+                                  hw->tnl.tbl[i].boost_entry,
+                                  sizeof(*sect_rx->tcam),
+                                  ICE_NONDMA_TO_NONDMA);
+                       ice_memcpy(sect_tx->tcam + i,
+                                  hw->tnl.tbl[i].boost_entry,
+                                  sizeof(*sect_tx->tcam),
+                                  ICE_NONDMA_TO_NONDMA);
+                       hw->tnl.tbl[i].marked = true;
+               }
 
        status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+       if (!status)
+               for (i = 0; i < hw->tnl.count &&
+                    i < ICE_TUNNEL_MAX_ENTRIES; i++)
+                       if (hw->tnl.tbl[i].marked) {
+                               hw->tnl.tbl[i].ref = 0;
+                               hw->tnl.tbl[i].port = 0;
+                               hw->tnl.tbl[i].in_use = false;
+                               hw->tnl.tbl[i].marked = false;
+                       }
 
+ice_destroy_tunnel_err:
        ice_pkg_buf_free(hw, bld);
 
+ice_destroy_tunnel_end:
+       ice_release_lock(&hw->tnl_lock);
+
        return status;
 }
 
+/**
+ * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
+ * @hw: pointer to the hardware structure
+ * @blk: hardware block
+ * @prof: profile ID
+ * @fv_idx: field vector word index
+ * @prot: variable to receive the protocol ID
+ * @off: variable to receive the protocol offset
+ */
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+                 u8 *prot, u16 *off)
+{
+       struct ice_fv_word *fv_ext;
+
+       if (prof >= hw->blk[blk].es.count)
+               return ICE_ERR_PARAM;
+
+       if (fv_idx >= hw->blk[blk].es.fvw)
+               return ICE_ERR_PARAM;
+
+       fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
+
+       *prot = fv_ext[fv_idx].prot_id;
+       *off = fv_ext[fv_idx].off;
+
+       return ICE_SUCCESS;
+}
+
+/* PTG Management */
+
 /**
  * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
  * @hw: pointer to the hardware structure
@@ -1822,10 +2236,10 @@ enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
  * @ptg: pointer to variable that receives the PTG
  *
  * This function will search the PTGs for a particular ptype, returning the
- * PTG ID that contains it through the ptg parameter, with the value of
+ * PTG ID that contains it through the PTG parameter, with the value of
  * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
  */
-enum ice_status
+static enum ice_status
 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
 {
        if (ptype >= ICE_XLT1_CNT || !ptg)
@@ -1839,74 +2253,24 @@ ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
  * ice_ptg_alloc_val - Allocates a new packet type group ID by value
  * @hw: pointer to the hardware structure
  * @blk: HW block
- * @ptg: the ptg to allocate
+ * @ptg: the PTG to allocate
  *
- * This function allocates a given packet type group ID specified by the ptg
+ * This function allocates a given packet type group ID specified by the PTG
  * parameter.
  */
-static
-void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
 {
        hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
 }
 
-/**
- * ice_ptg_alloc - Find a free entry and allocates a new packet type group ID
- * @hw: pointer to the hardware structure
- * @blk: HW block
- *
- * This function allocates and returns a new packet type group ID. Note
- * that 0 is the default packet type group, so successfully created PTGs will
- * have a non-zero ID value; which means a 0 return value indicates an error.
- */
-u8 ice_ptg_alloc(struct ice_hw *hw, enum ice_block blk)
-{
-       u16 i;
-
-       /* Skip the default PTG of 0 */
-       for (i = 1; i < ICE_MAX_PTGS; i++)
-               if (!hw->blk[blk].xlt1.ptg_tbl[i].in_use) {
-                       /* found a free PTG ID */
-                       ice_ptg_alloc_val(hw, blk, i);
-                       return (u8)i;
-               }
-
-       return 0;
-}
-
-/**
- * ice_ptg_free - Frees a packet type group
- * @hw: pointer to the hardware structure
- * @blk: HW block
- * @ptg: the ptg ID to free
- *
- * This function frees a packet type group, and returns all the current ptypes
- * within it to the default PTG.
- */
-void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
-{
-       struct ice_ptg_ptype *p, *temp;
-
-       hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false;
-       p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
-       while (p) {
-               p->ptg = ICE_DEFAULT_PTG;
-               temp = p->next_ptype;
-               p->next_ptype = NULL;
-               p = temp;
-       }
-
-       hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL;
-}
-
 /**
  * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
  * @hw: pointer to the hardware structure
  * @blk: HW block
  * @ptype: the ptype to remove
- * @ptg: the ptg to remove the ptype from
+ * @ptg: the PTG to remove the ptype from
  *
- * This function will remove the ptype from the specific ptg, and move it to
+ * This function will remove the ptype from the specific PTG, and move it to
  * the default PTG (ICE_DEFAULT_PTG).
  */
 static enum ice_status
@@ -1949,14 +2313,14 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
  * @hw: pointer to the hardware structure
  * @blk: HW block
  * @ptype: the ptype to add or move
- * @ptg: the ptg to add or move the ptype to
+ * @ptg: the PTG to add or move the ptype to
  *
  * This function will either add or move a ptype to a particular PTG depending
  * on if the ptype is already part of another group. Note that using a
  * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
  * default PTG.
  */
-enum ice_status
+static enum ice_status
 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
 {
        enum ice_status status;
@@ -2002,7 +2366,7 @@ struct ice_blk_size_details {
        u16 xlt2;                       /* # XLT2 entries */
        u16 prof_tcam;                  /* # profile ID TCAM entries */
        u16 prof_id;                    /* # profile IDs */
-       u8 prof_cdid_bits;              /* # cdid one-hot bits used in key */
+       u8 prof_cdid_bits;              /* # CDID one-hot bits used in key */
        u16 prof_redir;                 /* # profile redirection entries */
        u16 es;                         /* # extraction sequence entries */
        u16 fvw;                        /* # field vector words */
@@ -2064,12 +2428,10 @@ ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
        u16 count = 0;
 
        /* compare counts */
-       LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) {
+       LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list)
                count++;
-       }
-       LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) {
+       LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
                chk_count++;
-       }
        if (!count || count != chk_count)
                return false;
 
@@ -2092,71 +2454,6 @@ ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
 
 /* VSIG Management */
 
-/**
- * ice_vsig_update_xlt2_sect - update one section of XLT2 table
- * @hw: pointer to the hardware structure
- * @blk: HW block
- * @vsi: HW VSI number to program
- * @vsig: vsig for the VSI
- *
- * This function will update the XLT2 hardware table with the input VSI
- * group configuration.
- */
-static enum ice_status
-ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
-                         u16 vsig)
-{
-       struct ice_xlt2_section *sect;
-       struct ice_buf_build *bld;
-       enum ice_status status;
-
-       bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
-                                              sizeof(struct ice_xlt2_section),
-                                              (void **)&sect);
-       if (!bld)
-               return ICE_ERR_NO_MEMORY;
-
-       sect->count = CPU_TO_LE16(1);
-       sect->offset = CPU_TO_LE16(vsi);
-       sect->value[0] = CPU_TO_LE16(vsig);
-
-       status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
-
-       ice_pkg_buf_free(hw, bld);
-
-       return status;
-}
-
-/**
- * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration
- * @hw: pointer to the hardware structure
- * @blk: HW block
- *
- * This function will update the XLT2 hardware table with the input VSI
- * group configuration of used vsis.
- */
-enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
-{
-       u16 vsi;
-
-       for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
-               /* update only vsis that have been changed */
-               if (hw->blk[blk].xlt2.vsis[vsi].changed) {
-                       enum ice_status status;
-                       u16 vsig;
-
-                       vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
-                       status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig);
-                       if (status)
-                               return status;
-
-                       hw->blk[blk].xlt2.vsis[vsi].changed = 0;
-               }
-       }
-
-       return ICE_SUCCESS;
-}
-
 /**
  * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
  * @hw: pointer to the hardware structure
@@ -2186,9 +2483,9 @@ ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
  * ice_vsig_alloc_val - allocate a new VSIG by value
  * @hw: pointer to the hardware structure
  * @blk: HW block
- * @vsig: the vsig to allocate
+ * @vsig: the VSIG to allocate
  *
- * This function will allocate a given VSIG specified by the vsig parameter.
+ * This function will allocate a given VSIG specified by the VSIG parameter.
  */
 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
 {
@@ -2236,22 +2533,19 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
  * for, the list must match exactly, including the order in which the
  * characteristics are listed.
  */
-enum ice_status
+static enum ice_status
 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
                        struct LIST_HEAD_TYPE *chs, u16 *vsig)
 {
        struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
        u16 i;
 
-       for (i = 0; i < xlt2->count; i++) {
+       for (i = 0; i < xlt2->count; i++)
                if (xlt2->vsig_tbl[i].in_use &&
                    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
-                       *vsig = (i | ((hw->pf_id << ICE_PF_NUM_S) &
-                                     ICE_PF_NUM_M));
                        *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
                        return ICE_SUCCESS;
                }
-       }
 
        return ICE_ERR_DOES_NOT_EXIST;
 }
@@ -2265,7 +2559,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
  * The function will remove all VSIs associated with the input VSIG and move
  * them to the DEFAULT_VSIG and mark the VSIG available.
  */
-enum ice_status
+static enum ice_status
 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
 {
        struct ice_vsig_prof *dtmp, *del;
@@ -2282,21 +2576,23 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
        hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
 
        vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
-       if (!vsi_cur)
-               return ICE_ERR_CFG;
-
-       /* remove all vsis associated with this VSIG XLT2 entry */
-       do {
-               struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
-
-               vsi_cur->vsig = ICE_DEFAULT_VSIG;
-               vsi_cur->changed = 1;
-               vsi_cur->next_vsi = NULL;
-               vsi_cur = tmp;
-       } while (vsi_cur);
-
-       /* NULL terminate head of vsi list */
-       hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
+       /* If the VSIG has at least 1 VSI then iterate through the
+        * list and remove the VSIs before deleting the group.
+        */
+       if (vsi_cur) {
+               /* remove all vsis associated with this VSIG XLT2 entry */
+               do {
+                       struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+
+                       vsi_cur->vsig = ICE_DEFAULT_VSIG;
+                       vsi_cur->changed = 1;
+                       vsi_cur->next_vsi = NULL;
+                       vsi_cur = tmp;
+               } while (vsi_cur);
+
+               /* NULL terminate head of VSI list */
+               hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
+       }
 
        /* free characteristic list */
        LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
@@ -2306,6 +2602,67 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
                ice_free(hw, del);
        }
 
+       /* if VSIG characteristic list was cleared for reset
+        * re-initialize the list head
+        */
+       INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_remove_vsi - remove VSI from VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI to remove
+ * @vsig: VSI group to remove from
+ *
+ * The function will remove the input VSI from its VSI group and move it
+ * to the DEFAULT_VSIG.
+ */
+static enum ice_status
+ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+       struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
+       u16 idx;
+
+       idx = vsig & ICE_VSIG_IDX_M;
+
+       if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+               return ICE_ERR_PARAM;
+
+       if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       /* entry already in default VSIG, don't have to remove */
+       if (idx == ICE_DEFAULT_VSIG)
+               return ICE_SUCCESS;
+
+       vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+       if (!(*vsi_head))
+               return ICE_ERR_CFG;
+
+       vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
+       vsi_cur = (*vsi_head);
+
+       /* iterate the VSI list, skip over the entry to be removed */
+       while (vsi_cur) {
+               if (vsi_tgt == vsi_cur) {
+                       (*vsi_head) = vsi_cur->next_vsi;
+                       break;
+               }
+               vsi_head = &vsi_cur->next_vsi;
+               vsi_cur = vsi_cur->next_vsi;
+       }
+
+       /* verify if VSI was removed from group list */
+       if (!vsi_cur)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       vsi_cur->vsig = ICE_DEFAULT_VSIG;
+       vsi_cur->changed = 1;
+       vsi_cur->next_vsi = NULL;
+
        return ICE_SUCCESS;
 }
 
@@ -2321,7 +2678,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
  * move the entry to the DEFAULT_VSIG, update the original VSIG and
  * then move entry to the new VSIG.
  */
-enum ice_status
+static enum ice_status
 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
 {
        struct ice_vsig_vsi *tmp;
@@ -2373,79 +2730,98 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
 }
 
 /**
- * ice_vsig_remove_vsi - remove VSI from VSIG
+ * ice_prof_has_mask_idx - determine if profile index masking is identical
  * @hw: pointer to the hardware structure
  * @blk: HW block
- * @vsi: VSI to remove
- * @vsig: VSI group to remove from
- *
- * The function will remove the input VSI from its VSI group and move it
- * to the DEFAULT_VSIG.
+ * @prof: profile to check
+ * @idx: profile index to check
+ * @mask: mask to match
  */
-enum ice_status
-ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+static bool
+ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
+                     u16 mask)
 {
-       struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
-       u16 idx;
-
-       idx = vsig & ICE_VSIG_IDX_M;
-
-       if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
-               return ICE_ERR_PARAM;
-
-       if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
-               return ICE_ERR_DOES_NOT_EXIST;
-
-       /* entry already in default VSIG, don't have to remove */
-       if (idx == ICE_DEFAULT_VSIG)
-               return ICE_SUCCESS;
-
-       vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
-       if (!(*vsi_head))
-               return ICE_ERR_CFG;
+       bool expect_no_mask = false;
+       bool found = false;
+       bool match = false;
+       u16 i;
 
-       vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
-       vsi_cur = (*vsi_head);
+       /* If mask is 0x0000 or 0xffff, then there is no masking */
+       if (mask == 0 || mask == 0xffff)
+               expect_no_mask = true;
+
+       /* Scan the enabled masks on this profile, for the specified idx */
+       for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
+            hw->blk[blk].masks.count; i++)
+               if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
+                       if (hw->blk[blk].masks.masks[i].in_use &&
+                           hw->blk[blk].masks.masks[i].idx == idx) {
+                               found = true;
+                               if (hw->blk[blk].masks.masks[i].mask == mask)
+                                       match = true;
+                               break;
+                       }
 
-       /* iterate the VSI list, skip over the entry to be removed */
-       while (vsi_cur) {
-               if (vsi_tgt == vsi_cur) {
-                       (*vsi_head) = vsi_cur->next_vsi;
-                       break;
-               }
-               vsi_head = &vsi_cur->next_vsi;
-               vsi_cur = vsi_cur->next_vsi;
+       if (expect_no_mask) {
+               if (found)
+                       return false;
+       } else {
+               if (!match)
+                       return false;
        }
 
-       /* verify if VSI was removed from group list */
-       if (!vsi_cur)
-               return ICE_ERR_DOES_NOT_EXIST;
+       return true;
+}
 
-       vsi_cur->vsig = ICE_DEFAULT_VSIG;
-       vsi_cur->changed = 1;
-       vsi_cur->next_vsi = NULL;
+/**
+ * ice_prof_has_mask - determine if profile masking is identical
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @prof: profile to check
+ * @masks: masks to match
+ */
+static bool
+ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
+{
+       u16 i;
 
-       return ICE_SUCCESS;
+       /* es->mask_ena[prof] will have the mask */
+       for (i = 0; i < hw->blk[blk].es.fvw; i++)
+               if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
+                       return false;
+
+       return true;
 }
 
 /**
- * ice_find_prof_id - find profile ID for a given field vector
+ * ice_find_prof_id_with_mask - find profile ID for a given field vector
  * @hw: pointer to the hardware structure
  * @blk: HW block
  * @fv: field vector to search for
+ * @masks: masks for fv
  * @prof_id: receives the profile ID
  */
 static enum ice_status
-ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
-                struct ice_fv_word *fv, u8 *prof_id)
+ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
+                          struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
 {
        struct ice_es *es = &hw->blk[blk].es;
-       u16 off, i;
+       u8 i;
+
+       /* For FD and RSS, we don't want to re-use an existed profile with the
+        * same field vector and mask. This will cause rule interference.
+        */
+       if (blk == ICE_BLK_FD || blk == ICE_BLK_RSS)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       for (i = 0; i < (u8)es->count; i++) {
+               u16 off = i * es->fvw;
 
-       for (i = 0; i < es->count; i++) {
-               off = i * es->fvw;
+               if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
+                       continue;
 
-               if (memcmp(&es->t[off], fv, es->fvw * 2))
+               /* check if masks settings are the same for this profile */
+               if (masks && !ice_prof_has_mask(hw, blk, i, masks))
                        continue;
 
                *prof_id = i;
@@ -2513,217 +2889,26 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
        return true;
 }
 
-/**
- * ice_workaround_get_res_blk - determine the block from a resource type
- * @type: type of resource
- * @blk: pointer to a enum that will receive the block type
- * @tcam: pointer to variable that will be set to true for a TCAM resource type
- */
-static enum
-ice_status ice_workaround_get_res_blk(u16 type, enum ice_block *blk, bool *tcam)
-{
-       /* just need to support TCAM entries and Profile IDs for now */
-       *tcam = false;
-
-       switch (type) {
-       case ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM:
-               *blk = ICE_BLK_SW;
-               *tcam = true;
-               break;
-       case ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM:
-               *blk = ICE_BLK_ACL;
-               *tcam = true;
-               break;
-       case ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM:
-               *blk = ICE_BLK_FD;
-               *tcam = true;
-               break;
-       case ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM:
-               *blk = ICE_BLK_RSS;
-               *tcam = true;
-               break;
-       case ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM:
-               *blk = ICE_BLK_PE;
-               *tcam = true;
-               break;
-       case ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID:
-               *blk = ICE_BLK_SW;
-               break;
-       case ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID:
-               *blk = ICE_BLK_ACL;
-               break;
-       case ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID:
-               *blk = ICE_BLK_FD;
-               break;
-       case ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID:
-               *blk = ICE_BLK_RSS;
-               break;
-       case ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID:
-               *blk = ICE_BLK_PE;
-               break;
-       default:
-               return ICE_ERR_PARAM;
-       }
-
-       return ICE_SUCCESS;
-}
-
-/**
- * ice_alloc_res_workaround
- * @hw: pointer to the hw struct
- * @type: type of resource
- * @num: number of resources to allocate
- * @res: pointer to array that will receive the resources
- */
-static enum ice_status
-ice_alloc_res_workaround(struct ice_hw *hw, u16 type, u16 num, u16 *res)
-{
-       enum ice_block blk;
-       u16 count = 0;
-       bool tcam;
-       u16 first;
-       u16 last;
-       u16 max;
-       u16 i;
-
-/* Number of PFs we support with this workaround */
-#define ICE_WA_PF_COUNT        4
-#define ICE_WA_1ST_TCAM        4
-#define ICE_WA_1ST_FV  4
-
-       /* Only allow our supported PFs */
-       if (hw->pf_id >= ICE_WA_PF_COUNT)
-               return ICE_ERR_AQ_ERROR;
-
-       if (ice_workaround_get_res_blk(type, &blk, &tcam))
-               return ICE_ERR_AQ_ERROR;
-
-       if (tcam) {
-               /* range of entries based on PF */
-               max = hw->blk[blk].prof.count / ICE_WA_PF_COUNT;
-               first = max * hw->pf_id;
-               last = first + max;
-
-               /* Profile IDs - start at non-zero index for PROF ID TCAM table
-                * The first few entries are for bypass, default and errors
-                * (only relevant for PF 0)
-                */
-               first += hw->pf_id ? 0 : ICE_WA_1ST_TCAM;
-
-               for (i = first; i < last && count < num; i++) {
-                       if (!hw->blk[blk].prof.resource_used_hack[i]) {
-                               res[count++] = i;
-                               hw->blk[blk].prof.resource_used_hack[i] = true;
-                       }
-               }
-
-               /* handle failure case */
-               if (count < num) {
-                       for (i = 0; i < count; i++) {
-                               hw->blk[blk].prof.resource_used_hack[res[i]] =
-                                       false;
-                               res[i] = 0;
-                       }
-
-                       return ICE_ERR_AQ_ERROR;
-               }
-       } else {
-               /* range of entries based on PF */
-               max = hw->blk[blk].es.count / ICE_WA_PF_COUNT;
-               first = max * hw->pf_id;
-               last = first + max;
-
-               /* FV index - start at non-zero index for Field vector table
-                * The first few entries are for bypass, default and errors
-                * (only relevant for PF 0)
-                */
-               first += hw->pf_id ? 0 : ICE_WA_1ST_FV;
-
-               for (i = first; i < last && count < num; i++) {
-                       if (!hw->blk[blk].es.resource_used_hack[i]) {
-                               res[count++] = i;
-                               hw->blk[blk].es.resource_used_hack[i] = true;
-                       }
-               }
-
-               /* handle failure case */
-               if (count < num) {
-                       for (i = 0; i < count; i++) {
-                               hw->blk[blk].es.resource_used_hack[res[i]] =
-                                       false;
-                               res[i] = 0;
-                       }
-
-                       return ICE_ERR_AQ_ERROR;
-               }
-       }
-
-       return ICE_SUCCESS;
-}
-
-/**
- * ice_free_res_workaround
- * @hw: pointer to the hw struct
- * @type: type of resource to free
- * @num: number of resources
- * @res: array of resource ids to free
- */
-static enum ice_status
-ice_free_res_workaround(struct ice_hw *hw, u16 type, u16 num, u16 *res)
-{
-       enum ice_block blk;
-       bool tcam = false;
-       u16 i;
-
-       if (ice_workaround_get_res_blk(type, &blk, &tcam))
-               return ICE_ERR_AQ_ERROR;
-
-       if (tcam) {
-               /* TCAM entries */
-               for (i = 0; i < num; i++) {
-                       if (res[i] < hw->blk[blk].prof.count) {
-                               u16 idx = res[i];
-
-                               ice_free_hw_res(hw, type, 1, &idx);
-                               hw->blk[blk].prof.resource_used_hack[res[i]] =
-                                       false;
-                       }
-               }
-
-       } else {
-               /* Profile IDs */
-               for (i = 0; i < num; i++) {
-                       if (res[i] < hw->blk[blk].es.count) {
-                               u16 idx = res[i];
-
-                               ice_free_hw_res(hw, type, 1, &idx);
-                               hw->blk[blk].es.resource_used_hack[res[i]] =
-                                       false;
-                       }
-               }
-       }
-
-       return ICE_SUCCESS;
-}
-
 /**
  * ice_alloc_tcam_ent - allocate hardware TCAM entry
  * @hw: pointer to the HW struct
  * @blk: the block to allocate the TCAM for
+ * @btm: true to allocate from bottom of table, false to allocate from top
  * @tcam_idx: pointer to variable to receive the TCAM entry
  *
  * This function allocates a new entry in a Profile ID TCAM for a specific
  * block.
  */
 static enum ice_status
-ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
+                  u16 *tcam_idx)
 {
        u16 res_type;
 
        if (!ice_tcam_ent_rsrc_type(blk, &res_type))
                return ICE_ERR_PARAM;
 
-       return ice_alloc_res_workaround(hw, res_type, 1, tcam_idx);
+       return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
 }
 
 /**
@@ -2742,7 +2927,7 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
        if (!ice_tcam_ent_rsrc_type(blk, &res_type))
                return ICE_ERR_PARAM;
 
-       return ice_free_res_workaround(hw, res_type, 1, &tcam_idx);
+       return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
 }
 
 /**
@@ -2764,7 +2949,7 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
        if (!ice_prof_id_rsrc_type(blk, &res_type))
                return ICE_ERR_PARAM;
 
-       status = ice_alloc_res_workaround(hw, res_type, 1, &get_prof);
+       status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
        if (!status)
                *prof_id = (u8)get_prof;
 
@@ -2788,15 +2973,7 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
        if (!ice_prof_id_rsrc_type(blk, &res_type))
                return ICE_ERR_PARAM;
 
-       return ice_free_res_workaround(hw, res_type, 1, &tmp_prof_id);
-       /* The following code is a WORKAROUND until DCR 076 is available.
-        * DCR 076 - Update to Profile ID TCAM Resource Allocation
-        *
-        * Once the DCR 076 changes are available in FW, this code can be
-        * restored. Original code:
-        *
-        * return ice_free_res(hw, res_type, 1, &tmp_prof_id);
-        */
+       return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
 }
 
 /**
@@ -2817,22 +2994,330 @@ ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
 }
 
 /**
- * ice_prof_dec_ref - decrement reference count for profile
+ * ice_write_prof_mask_reg - write profile mask register
  * @hw: pointer to the HW struct
- * @blk: the block from which to free the profile ID
- * @prof_id: the profile ID for which to decrement the reference count
+ * @blk: hardware block
+ * @mask_idx: mask index
+ * @idx: index of the FV which will use the mask
+ * @mask: the 16-bit mask
+ */
+static void
+ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
+                       u16 idx, u16 mask)
+{
+       u32 offset;
+       u32 val;
+
+       switch (blk) {
+       case ICE_BLK_RSS:
+               offset = GLQF_HMASK(mask_idx);
+               val = (idx << GLQF_HMASK_MSK_INDEX_S) &
+                       GLQF_HMASK_MSK_INDEX_M;
+               val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
+               break;
+       case ICE_BLK_FD:
+               offset = GLQF_FDMASK(mask_idx);
+               val = (idx << GLQF_FDMASK_MSK_INDEX_S) &
+                       GLQF_FDMASK_MSK_INDEX_M;
+               val |= (mask << GLQF_FDMASK_MASK_S) &
+                       GLQF_FDMASK_MASK_M;
+               break;
+       default:
+               ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
+                         blk);
+               return;
+       }
+
+       wr32(hw, offset, val);
+       ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
+                 blk, idx, offset, val);
+}
+
+/**
+ * ice_write_prof_mask_enable_res - write profile mask enable register
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof_id: profile ID
+ * @enable_mask: enable mask
+ */
+static void
+ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
+                              u16 prof_id, u32 enable_mask)
+{
+       u32 offset;
+
+       switch (blk) {
+       case ICE_BLK_RSS:
+               offset = GLQF_HMASK_SEL(prof_id);
+               break;
+       case ICE_BLK_FD:
+               offset = GLQF_FDMASK_SEL(prof_id);
+               break;
+       default:
+               ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
+                         blk);
+               return;
+       }
+
+       wr32(hw, offset, enable_mask);
+       ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
+                 blk, prof_id, offset, enable_mask);
+}
+
+/**
+ * ice_init_prof_masks - initial prof masks
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ */
+static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
+{
+       u16 per_pf;
+       u16 i;
+
+       ice_init_lock(&hw->blk[blk].masks.lock);
+
+       per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
+
+       hw->blk[blk].masks.count = per_pf;
+       hw->blk[blk].masks.first = hw->pf_id * per_pf;
+
+       ice_memset(hw->blk[blk].masks.masks, 0,
+                  sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM);
+
+       for (i = hw->blk[blk].masks.first;
+            i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
+               ice_write_prof_mask_reg(hw, blk, i, 0, 0);
+}
+
+/**
+ * ice_init_all_prof_masks - initial all prof masks
+ * @hw: pointer to the HW struct
+ */
+void ice_init_all_prof_masks(struct ice_hw *hw)
+{
+       ice_init_prof_masks(hw, ICE_BLK_RSS);
+       ice_init_prof_masks(hw, ICE_BLK_FD);
+}
+
+/**
+ * ice_alloc_prof_mask - allocate profile mask
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @idx: index of FV which will use the mask
+ * @mask: the 16-bit mask
+ * @mask_idx: variable to receive the mask index
  */
 static enum ice_status
-ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
+                   u16 *mask_idx)
 {
-       if (prof_id > hw->blk[blk].es.count)
+       bool found_unused = false, found_copy = false;
+       enum ice_status status = ICE_ERR_MAX_LIMIT;
+       u16 unused_idx = 0, copy_idx = 0;
+       u16 i;
+
+       if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
                return ICE_ERR_PARAM;
 
-       if (hw->blk[blk].es.ref_count[prof_id] > 0) {
-               if (!--hw->blk[blk].es.ref_count[prof_id])
-                       return ice_free_prof_id(hw, blk, prof_id);
+       ice_acquire_lock(&hw->blk[blk].masks.lock);
+
+       for (i = hw->blk[blk].masks.first;
+            i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
+               if (hw->blk[blk].masks.masks[i].in_use) {
+                       /* if mask is in use and it exactly duplicates the
+                        * desired mask and index, then in can be reused
+                        */
+                       if (hw->blk[blk].masks.masks[i].mask == mask &&
+                           hw->blk[blk].masks.masks[i].idx == idx) {
+                               found_copy = true;
+                               copy_idx = i;
+                               break;
+                       }
+               } else {
+                       /* save off unused index, but keep searching in case
+                        * there is an exact match later on
+                        */
+                       if (!found_unused) {
+                               found_unused = true;
+                               unused_idx = i;
+                       }
+               }
+
+       if (found_copy)
+               i = copy_idx;
+       else if (found_unused)
+               i = unused_idx;
+       else
+               goto err_ice_alloc_prof_mask;
+
+       /* update mask for a new entry */
+       if (found_unused) {
+               hw->blk[blk].masks.masks[i].in_use = true;
+               hw->blk[blk].masks.masks[i].mask = mask;
+               hw->blk[blk].masks.masks[i].idx = idx;
+               hw->blk[blk].masks.masks[i].ref = 0;
+               ice_write_prof_mask_reg(hw, blk, i, idx, mask);
+       }
+
+       hw->blk[blk].masks.masks[i].ref++;
+       *mask_idx = i;
+       status = ICE_SUCCESS;
+
+err_ice_alloc_prof_mask:
+       ice_release_lock(&hw->blk[blk].masks.lock);
+
+       return status;
+}
+
+/**
+ * ice_free_prof_mask - free profile mask
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @mask_idx: index of mask
+ */
+static enum ice_status
+ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
+{
+       if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+               return ICE_ERR_PARAM;
+
+       if (!(mask_idx >= hw->blk[blk].masks.first &&
+             mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       ice_acquire_lock(&hw->blk[blk].masks.lock);
+
+       if (!hw->blk[blk].masks.masks[mask_idx].in_use)
+               goto exit_ice_free_prof_mask;
+
+       if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
+               hw->blk[blk].masks.masks[mask_idx].ref--;
+               goto exit_ice_free_prof_mask;
+       }
+
+       /* remove mask */
+       hw->blk[blk].masks.masks[mask_idx].in_use = false;
+       hw->blk[blk].masks.masks[mask_idx].mask = 0;
+       hw->blk[blk].masks.masks[mask_idx].idx = 0;
+
+       /* update mask as unused entry */
+       ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
+                 mask_idx);
+       ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
+
+exit_ice_free_prof_mask:
+       ice_release_lock(&hw->blk[blk].masks.lock);
+
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_free_prof_masks - free all profile masks for a profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof_id: profile ID
+ */
+static enum ice_status
+ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
+{
+       u32 mask_bm;
+       u16 i;
+
+       if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+               return ICE_ERR_PARAM;
+
+       mask_bm = hw->blk[blk].es.mask_ena[prof_id];
+       for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
+               if (mask_bm & BIT(i))
+                       ice_free_prof_mask(hw, blk, i);
+
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_shutdown_prof_masks - releases lock for masking
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ *
+ * This should be called before unloading the driver
+ */
+static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
+{
+       u16 i;
+
+       ice_acquire_lock(&hw->blk[blk].masks.lock);
+
+       for (i = hw->blk[blk].masks.first;
+            i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
+               ice_write_prof_mask_reg(hw, blk, i, 0, 0);
+
+               hw->blk[blk].masks.masks[i].in_use = false;
+               hw->blk[blk].masks.masks[i].idx = 0;
+               hw->blk[blk].masks.masks[i].mask = 0;
        }
 
+       ice_release_lock(&hw->blk[blk].masks.lock);
+       ice_destroy_lock(&hw->blk[blk].masks.lock);
+}
+
+/**
+ * ice_shutdown_all_prof_masks - releases all locks for masking
+ * @hw: pointer to the HW struct
+ *
+ * This should be called before unloading the driver
+ */
+void ice_shutdown_all_prof_masks(struct ice_hw *hw)
+{
+       ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
+       ice_shutdown_prof_masks(hw, ICE_BLK_FD);
+}
+
+/**
+ * ice_update_prof_masking - set registers according to masking
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof_id: profile ID
+ * @masks: masks
+ */
+static enum ice_status
+ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
+                       u16 *masks)
+{
+       bool err = false;
+       u32 ena_mask = 0;
+       u16 idx;
+       u16 i;
+
+       /* Only support FD and RSS masking, otherwise nothing to be done */
+       if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+               return ICE_SUCCESS;
+
+       for (i = 0; i < hw->blk[blk].es.fvw; i++)
+               if (masks[i] && masks[i] != 0xFFFF) {
+                       if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
+                               ena_mask |= BIT(idx);
+                       } else {
+                               /* not enough bitmaps */
+                               err = true;
+                               break;
+                       }
+               }
+
+       if (err) {
+               /* free any bitmaps we have allocated */
+               for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
+                       if (ena_mask & BIT(i))
+                               ice_free_prof_mask(hw, blk, i);
+
+               return ICE_ERR_OUT_OF_RANGE;
+       }
+
+       /* enable the masks for this profile */
+       ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
+
+       /* store enabled masks with profile so that they can be freed later */
+       hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
+
        return ICE_SUCCESS;
 }
 
@@ -2841,7 +3326,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
  * @hw: pointer to the HW struct
  * @blk: the block in which to write the extraction sequence
  * @prof_id: the profile ID to write
- * @fv: pointer to the extraction sequence to write
+ * @fv: pointer to the extraction sequence to write - NULL to clear extraction
  */
 static void
 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
@@ -2850,8 +3335,37 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
        u16 off;
 
        off = prof_id * hw->blk[blk].es.fvw;
-       ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * 2,
-                  ICE_NONDMA_TO_NONDMA);
+       if (!fv) {
+               ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
+                          sizeof(*fv), ICE_NONDMA_MEM);
+               hw->blk[blk].es.written[prof_id] = false;
+       } else {
+               ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
+                          sizeof(*fv), ICE_NONDMA_TO_NONDMA);
+       }
+}
+
+/**
+ * ice_prof_dec_ref - decrement reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to decrement the reference count
+ */
+static enum ice_status
+ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+       if (prof_id > hw->blk[blk].es.count)
+               return ICE_ERR_PARAM;
+
+       if (hw->blk[blk].es.ref_count[prof_id] > 0) {
+               if (!--hw->blk[blk].es.ref_count[prof_id]) {
+                       ice_write_es(hw, blk, prof_id, NULL);
+                       ice_free_prof_masks(hw, blk, prof_id);
+                       return ice_free_prof_id(hw, blk, prof_id);
+               }
+       }
+
+       return ICE_SUCCESS;
 }
 
 /* Block / table section IDs */
@@ -2897,6 +3411,64 @@ static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
        }
 };
 
+/**
+ * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block to initialize
+ */
+static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
+{
+       u16 pt;
+
+       for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
+               u8 ptg;
+
+               ptg = hw->blk[blk].xlt1.t[pt];
+               if (ptg != ICE_DEFAULT_PTG) {
+                       ice_ptg_alloc_val(hw, blk, ptg);
+                       ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
+               }
+       }
+}
+
+/**
+ * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block to initialize
+ */
+static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
+{
+       u16 vsi;
+
+       for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
+               u16 vsig;
+
+               vsig = hw->blk[blk].xlt2.t[vsi];
+               if (vsig) {
+                       ice_vsig_alloc_val(hw, blk, vsig);
+                       ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+                       /* no changes at this time, since this has been
+                        * initialized from the original package
+                        */
+                       hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+               }
+       }
+}
+
+/**
+ * ice_init_sw_db - init software database from HW tables
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_sw_db(struct ice_hw *hw)
+{
+       u16 i;
+
+       for (i = 0; i < ICE_BLK_COUNT; i++) {
+               ice_init_sw_xlt1_db(hw, (enum ice_block)i);
+               ice_init_sw_xlt2_db(hw, (enum ice_block)i);
+       }
+}
+
 /**
  * ice_fill_tbl - Reads content of a single table type into database
  * @hw: pointer to the hardware structure
@@ -2922,7 +3494,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
        void *sect;
 
        /* if the HW segment pointer is null then the first iteration of
-        * ice_pkg_enum_section() will fail. In this case the Hw tables will
+        * ice_pkg_enum_section() will fail. In this case the HW tables will
         * not be filled and return success.
         */
        if (!hw->seg) {
@@ -2936,9 +3508,11 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
 
        while (sect) {
                switch (sid) {
+               case ICE_SID_XLT1_SW:
                case ICE_SID_XLT1_FD:
                case ICE_SID_XLT1_RSS:
                case ICE_SID_XLT1_ACL:
+               case ICE_SID_XLT1_PE:
                        xlt1 = (struct ice_xlt1_section *)sect;
                        src = xlt1->value;
                        sect_len = LE16_TO_CPU(xlt1->count) *
@@ -2947,20 +3521,24 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
                        dst_len = hw->blk[block_id].xlt1.count *
                                sizeof(*hw->blk[block_id].xlt1.t);
                        break;
+               case ICE_SID_XLT2_SW:
                case ICE_SID_XLT2_FD:
                case ICE_SID_XLT2_RSS:
                case ICE_SID_XLT2_ACL:
+               case ICE_SID_XLT2_PE:
                        xlt2 = (struct ice_xlt2_section *)sect;
-                       src = (u8 *)xlt2->value;
+                       src = (_FORCE_ u8 *)xlt2->value;
                        sect_len = LE16_TO_CPU(xlt2->count) *
                                sizeof(*hw->blk[block_id].xlt2.t);
                        dst = (u8 *)hw->blk[block_id].xlt2.t;
                        dst_len = hw->blk[block_id].xlt2.count *
                                sizeof(*hw->blk[block_id].xlt2.t);
                        break;
+               case ICE_SID_PROFID_TCAM_SW:
                case ICE_SID_PROFID_TCAM_FD:
                case ICE_SID_PROFID_TCAM_RSS:
                case ICE_SID_PROFID_TCAM_ACL:
+               case ICE_SID_PROFID_TCAM_PE:
                        pid = (struct ice_prof_id_section *)sect;
                        src = (u8 *)pid->entry;
                        sect_len = LE16_TO_CPU(pid->count) *
@@ -2969,9 +3547,11 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
                        dst_len = hw->blk[block_id].prof.count *
                                sizeof(*hw->blk[block_id].prof.t);
                        break;
+               case ICE_SID_PROFID_REDIR_SW:
                case ICE_SID_PROFID_REDIR_FD:
                case ICE_SID_PROFID_REDIR_RSS:
                case ICE_SID_PROFID_REDIR_ACL:
+               case ICE_SID_PROFID_REDIR_PE:
                        pr = (struct ice_prof_redir_section *)sect;
                        src = pr->redir_value;
                        sect_len = LE16_TO_CPU(pr->count) *
@@ -2980,15 +3560,19 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
                        dst_len = hw->blk[block_id].prof_redir.count *
                                sizeof(*hw->blk[block_id].prof_redir.t);
                        break;
+               case ICE_SID_FLD_VEC_SW:
                case ICE_SID_FLD_VEC_FD:
                case ICE_SID_FLD_VEC_RSS:
                case ICE_SID_FLD_VEC_ACL:
+               case ICE_SID_FLD_VEC_PE:
                        es = (struct ice_sw_fv_section *)sect;
                        src = (u8 *)es->fv;
-                       sect_len = LE16_TO_CPU(es->count) *
-                               sizeof(*hw->blk[block_id].prof_redir.t);
+                       sect_len = (u32)(LE16_TO_CPU(es->count) *
+                                        hw->blk[block_id].es.fvw) *
+                               sizeof(*hw->blk[block_id].es.t);
                        dst = (u8 *)hw->blk[block_id].es.t;
-                       dst_len = hw->blk[block_id].es.count *
+                       dst_len = (u32)(hw->blk[block_id].es.count *
+                                       hw->blk[block_id].es.fvw) *
                                sizeof(*hw->blk[block_id].es.t);
                        break;
                default:
@@ -3002,7 +3586,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
                        return;
 
                /* if the sum of section size and offset exceed destination size
-                * then we are out of bounds of the Hw table size for that PF.
+                * then we are out of bounds of the HW table size for that PF.
                 * Changing section length to fill the remaining table space
                 * of that PF.
                 */
@@ -3016,76 +3600,82 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
 }
 
 /**
- * ice_fill_blk_tbls - Read package content for tables of a block
+ * ice_fill_blk_tbls - Read package context for tables
  * @hw: pointer to the hardware structure
- * @block_id: The block ID which contains the tables to be copied
  *
  * Reads the current package contents and populates the driver
- * database with the data it contains to allow for advanced driver
- * features.
+ * database with the data iteratively for all advanced feature
+ * blocks. Assume that the HW tables have been allocated.
  */
-static void ice_fill_blk_tbls(struct ice_hw *hw, enum ice_block block_id)
-{
-       ice_fill_tbl(hw, block_id, hw->blk[block_id].xlt1.sid);
-       ice_fill_tbl(hw, block_id, hw->blk[block_id].xlt2.sid);
-       ice_fill_tbl(hw, block_id, hw->blk[block_id].prof.sid);
-       ice_fill_tbl(hw, block_id, hw->blk[block_id].prof_redir.sid);
-       ice_fill_tbl(hw, block_id, hw->blk[block_id].es.sid);
-}
-
-/**
- * ice_free_flow_profs - free flow profile entries
- * @hw: pointer to the hardware structure
- */
-static void ice_free_flow_profs(struct ice_hw *hw)
+void ice_fill_blk_tbls(struct ice_hw *hw)
 {
        u8 i;
 
        for (i = 0; i < ICE_BLK_COUNT; i++) {
-               struct ice_flow_prof *p, *tmp;
-
-               if (!&hw->fl_profs[i])
-                       continue;
-
-               /* This call is being made as part of resource deallocation
-                * during unload. Lock acquire and release will not be
-                * necessary here.
-                */
-               LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[i],
-                                        ice_flow_prof, l_entry) {
-                       struct ice_flow_entry *e, *t;
-
-                       LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
-                                                ice_flow_entry, l_entry)
-                               ice_flow_rem_entry(hw, ICE_FLOW_ENTRY_HNDL(e));
+               enum ice_block blk_id = (enum ice_block)i;
 
-                       LIST_DEL(&p->l_entry);
-                       if (p->acts)
-                               ice_free(hw, p->acts);
-                       ice_free(hw, p);
-               }
-
-               ice_destroy_lock(&hw->fl_profs_locks[i]);
+               ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
+               ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
+               ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
+               ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
+               ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
        }
+
+       ice_init_sw_db(hw);
 }
 
 /**
- * ice_free_prof_map - frees the profile map
+ * ice_free_prof_map - free profile map
  * @hw: pointer to the hardware structure
- * @blk: the HW block which contains the profile map to be freed
+ * @blk_idx: HW block index
  */
-static void ice_free_prof_map(struct ice_hw *hw, enum ice_block blk)
+static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
 {
+       struct ice_es *es = &hw->blk[blk_idx].es;
        struct ice_prof_map *del, *tmp;
 
-       if (LIST_EMPTY(&hw->blk[blk].es.prof_map))
-               return;
-
-       LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &hw->blk[blk].es.prof_map,
+       ice_acquire_lock(&es->prof_map_lock);
+       LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
                                 ice_prof_map, list) {
                LIST_DEL(&del->list);
                ice_free(hw, del);
        }
+       INIT_LIST_HEAD(&es->prof_map);
+       ice_release_lock(&es->prof_map_lock);
+}
+
+/**
+ * ice_free_flow_profs - free flow profile entries
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+       struct ice_flow_prof *p, *tmp;
+
+       ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
+       LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
+                                ice_flow_prof, l_entry) {
+               struct ice_flow_entry *e, *t;
+
+               LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
+                                        ice_flow_entry, l_entry)
+                       ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
+                                          ICE_FLOW_ENTRY_HNDL(e));
+
+               LIST_DEL(&p->l_entry);
+               if (p->acts)
+                       ice_free(hw, p->acts);
+
+               ice_destroy_lock(&p->entries_lock);
+               ice_free(hw, p);
+       }
+       ice_release_lock(&hw->fl_profs_locks[blk_idx]);
+
+       /* if driver is in reset and tables are being cleared
+        * re-initialize the flow profile list heads
+        */
+       INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
 }
 
 /**
@@ -3111,10 +3701,20 @@ static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
  */
 void ice_free_hw_tbls(struct ice_hw *hw)
 {
+       struct ice_rss_cfg *r, *rt;
        u8 i;
 
        for (i = 0; i < ICE_BLK_COUNT; i++) {
-               ice_free_prof_map(hw, (enum ice_block)i);
+               if (hw->blk[i].is_list_init) {
+                       struct ice_es *es = &hw->blk[i].es;
+
+                       ice_free_prof_map(hw, i);
+                       ice_destroy_lock(&es->prof_map_lock);
+                       ice_free_flow_profs(hw, i);
+                       ice_destroy_lock(&hw->fl_profs_locks[i]);
+
+                       hw->blk[i].is_list_init = false;
+               }
                ice_free_vsig_tbl(hw, (enum ice_block)i);
                ice_free(hw, hw->blk[i].xlt1.ptypes);
                ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
@@ -3126,88 +3726,84 @@ void ice_free_hw_tbls(struct ice_hw *hw)
                ice_free(hw, hw->blk[i].prof_redir.t);
                ice_free(hw, hw->blk[i].es.t);
                ice_free(hw, hw->blk[i].es.ref_count);
+               ice_free(hw, hw->blk[i].es.written);
+               ice_free(hw, hw->blk[i].es.mask_ena);
+       }
 
-               ice_free(hw, hw->blk[i].es.resource_used_hack);
-               ice_free(hw, hw->blk[i].prof.resource_used_hack);
-       }
-
-       ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
-
-       ice_free_flow_profs(hw);
-}
-
-/**
- * ice_init_flow_profs - init flow profile locks and list heads
- * @hw: pointer to the hardware structure
- */
-static void ice_init_flow_profs(struct ice_hw *hw)
-{
-       u8 i;
-
-       for (i = 0; i < ICE_BLK_COUNT; i++) {
-               ice_init_lock(&hw->fl_profs_locks[i]);
-               INIT_LIST_HEAD(&hw->fl_profs[i]);
+       LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
+                                ice_rss_cfg, l_entry) {
+               LIST_DEL(&r->l_entry);
+               ice_free(hw, r);
        }
+       ice_destroy_lock(&hw->rss_locks);
+       if (!hw->dcf_enabled)
+               ice_shutdown_all_prof_masks(hw);
+       ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
 }
 
 /**
- * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
+ * ice_init_flow_profs - init flow profile locks and list heads
  * @hw: pointer to the hardware structure
- * @blk: the HW block to initialize
+ * @blk_idx: HW block index
  */
-static
-void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
+static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
 {
-       u16 pt;
-
-       for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
-               u8 ptg;
-
-               ptg = hw->blk[blk].xlt1.t[pt];
-               if (ptg != ICE_DEFAULT_PTG) {
-                       ice_ptg_alloc_val(hw, blk, ptg);
-                       ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
-               }
-       }
+       ice_init_lock(&hw->fl_profs_locks[blk_idx]);
+       INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
 }
 
 /**
- * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
+ * ice_clear_hw_tbls - clear HW tables and flow profiles
  * @hw: pointer to the hardware structure
- * @blk: the HW block to initialize
  */
-static
-void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
+void ice_clear_hw_tbls(struct ice_hw *hw)
 {
-       u16 vsi;
+       ui;
 
-       for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
-               u16 vsig;
+       for (i = 0; i < ICE_BLK_COUNT; i++) {
+               struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+               struct ice_prof_tcam *prof = &hw->blk[i].prof;
+               struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+               struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+               struct ice_es *es = &hw->blk[i].es;
 
-               vsig = hw->blk[blk].xlt2.t[vsi];
-               if (vsig) {
-                       ice_vsig_alloc_val(hw, blk, vsig);
-                       ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
-                       /* no changes at this time, since this has been
-                        * initialized from the original package
-                        */
-                       hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+               if (hw->blk[i].is_list_init) {
+                       ice_free_prof_map(hw, i);
+                       ice_free_flow_profs(hw, i);
                }
-       }
-}
 
-/**
- * ice_init_sw_db - init software database from HW tables
- * @hw: pointer to the hardware structure
- */
-static
-void ice_init_sw_db(struct ice_hw *hw)
-{
-       u16 i;
+               ice_free_vsig_tbl(hw, (enum ice_block)i);
 
-       for (i = 0; i < ICE_BLK_COUNT; i++) {
-               ice_init_sw_xlt1_db(hw, (enum ice_block)i);
-               ice_init_sw_xlt2_db(hw, (enum ice_block)i);
+               ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
+                          ICE_NONDMA_MEM);
+               ice_memset(xlt1->ptg_tbl, 0,
+                          ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
+                          ICE_NONDMA_MEM);
+               ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
+                          ICE_NONDMA_MEM);
+
+               ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
+                          ICE_NONDMA_MEM);
+               ice_memset(xlt2->vsig_tbl, 0,
+                          xlt2->count * sizeof(*xlt2->vsig_tbl),
+                          ICE_NONDMA_MEM);
+               ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
+                          ICE_NONDMA_MEM);
+
+               ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
+                          ICE_NONDMA_MEM);
+               ice_memset(prof_redir->t, 0,
+                          prof_redir->count * sizeof(*prof_redir->t),
+                          ICE_NONDMA_MEM);
+
+               ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
+                          ICE_NONDMA_MEM);
+               ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
+                          ICE_NONDMA_MEM);
+               ice_memset(es->written, 0, es->count * sizeof(*es->written),
+                          ICE_NONDMA_MEM);
+               ice_memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena),
+                          ICE_NONDMA_MEM);
        }
 }
 
@@ -3219,14 +3815,25 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
 {
        u8 i;
 
-       ice_init_flow_profs(hw);
-
+       ice_init_lock(&hw->rss_locks);
+       INIT_LIST_HEAD(&hw->rss_list_head);
+       if (!hw->dcf_enabled)
+               ice_init_all_prof_masks(hw);
        for (i = 0; i < ICE_BLK_COUNT; i++) {
                struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
                struct ice_prof_tcam *prof = &hw->blk[i].prof;
                struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
                struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
                struct ice_es *es = &hw->blk[i].es;
+               u16 j;
+
+               if (hw->blk[i].is_list_init)
+                       continue;
+
+               ice_init_flow_profs(hw, i);
+               ice_init_lock(&es->prof_map_lock);
+               INIT_LIST_HEAD(&es->prof_map);
+               hw->blk[i].is_list_init = true;
 
                hw->blk[i].overwrite = blk_sizes[i].overwrite;
                es->reverse = blk_sizes[i].reverse;
@@ -3264,6 +3871,9 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
                if (!xlt2->vsig_tbl)
                        goto err;
 
+               for (j = 0; j < xlt2->count; j++)
+                       INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
+
                xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
                if (!xlt2->t)
                        goto err;
@@ -3290,37 +3900,21 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
                es->count = blk_sizes[i].es;
                es->fvw = blk_sizes[i].fvw;
                es->t = (struct ice_fv_word *)
-                       ice_calloc(hw, es->count * es->fvw, sizeof(*es->t));
-
+                       ice_calloc(hw, (u32)(es->count * es->fvw),
+                                  sizeof(*es->t));
                if (!es->t)
                        goto err;
 
                es->ref_count = (u16 *)
                        ice_calloc(hw, es->count, sizeof(*es->ref_count));
 
+               es->written = (u8 *)
+                       ice_calloc(hw, es->count, sizeof(*es->written));
+               es->mask_ena = (u32 *)
+                       ice_calloc(hw, es->count, sizeof(*es->mask_ena));
                if (!es->ref_count)
                        goto err;
-
-               es->resource_used_hack = (u8 *)
-                       ice_calloc(hw, hw->blk[i].es.count, sizeof(u8));
-
-               if (!es->resource_used_hack)
-                       goto err;
-
-               prof->resource_used_hack = (u8 *)ice_calloc(hw, prof->count,
-                                                           sizeof(u8));
-
-               if (!prof->resource_used_hack)
-                       goto err;
-
-               INIT_LIST_HEAD(&es->prof_map);
-
-               /* Now that tables are allocated, read in package data */
-               ice_fill_blk_tbls(hw, (enum ice_block)i);
        }
-
-       ice_init_sw_db(hw);
-
        return ICE_SUCCESS;
 
 err:
@@ -3334,7 +3928,7 @@ err:
  * @blk: the block in which to write profile ID to
  * @ptg: packet type group (PTG) portion of key
  * @vsig: VSIG portion of key
- * @cdid: cdid portion of key
+ * @cdid: CDID portion of key
  * @flags: flag portion of key
  * @vl_msk: valid mask
  * @dc_msk: don't care mask
@@ -3377,7 +3971,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
        default:
                ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
                break;
-       };
+       }
 
        return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
                           nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
@@ -3391,7 +3985,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
  * @prof_id: profile ID
  * @ptg: packet type group (PTG) portion of key
  * @vsig: VSIG portion of key
- * @cdid: cdid portion of key
+ * @cdid: CDID portion of key
  * @flags: flag portion of key
  * @vl_msk: valid mask
  * @dc_msk: don't care mask
@@ -3429,6 +4023,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
 {
        u16 idx = vsig & ICE_VSIG_IDX_M;
        struct ice_vsig_vsi *ptr;
+
        *refs = 0;
 
        if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
@@ -3443,43 +4038,6 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
        return ICE_SUCCESS;
 }
 
-/**
- * ice_get_ptg - get or allocate a ptg for a ptype
- * @hw: pointer to the hardware structure
- * @blk: HW block
- * @ptype: the ptype to retrieve the PTG for
- * @ptg: receives the PTG of the ptype
- * @add: receive boolean indicating whether PTG was added or not
- */
-static enum ice_status
-ice_get_ptg(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg,
-           bool *add)
-{
-       enum ice_status status;
-
-       *ptg = ICE_DEFAULT_PTG;
-       *add = false;
-
-       status = ice_ptg_find_ptype(hw, blk, ptype, ptg);
-       if (status)
-               return status;
-
-       if (*ptg == ICE_DEFAULT_PTG) {
-               /* need to allocate a PTG, and add ptype to it */
-               *ptg = ice_ptg_alloc(hw, blk);
-               if (*ptg == ICE_DEFAULT_PTG)
-                       return ICE_ERR_HW_TABLE;
-
-               status = ice_ptg_add_mv_ptype(hw, blk, ptype, *ptg);
-               if (status)
-                       return ICE_ERR_HW_TABLE;
-
-               *add = true;
-       }
-
-       return ICE_SUCCESS;
-};
-
 /**
  * ice_has_prof_vsig - check to see if VSIG has a specific profile
  * @hw: pointer to the hardware structure
@@ -3494,10 +4052,9 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
        struct ice_vsig_prof *ent;
 
        LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
-                           ice_vsig_prof, list) {
+                           ice_vsig_prof, list)
                if (ent->profile_cookie == hdl)
                        return true;
-       }
 
        ice_debug(hw, ICE_DBG_INIT,
                  "Characteristic list for VSI group %d not found.\n",
@@ -3519,7 +4076,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
        u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
        struct ice_chs_chg *tmp;
 
-       LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+       LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
                if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
                        u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
                        struct ice_pkg_es *p;
@@ -3540,7 +4097,6 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
                        ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
                                   ICE_NONDMA_TO_NONDMA);
                }
-       }
 
        return ICE_SUCCESS;
 }
@@ -3558,9 +4114,8 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
 {
        struct ice_chs_chg *tmp;
 
-       LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
-               if ((tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) ||
-                   tmp->type == ICE_TCAM_REM) {
+       LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
+               if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
                        struct ice_prof_id_section *p;
                        u32 id;
 
@@ -3580,7 +4135,6 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
                                   sizeof(hw->blk[blk].prof.t->key),
                                   ICE_NONDMA_TO_NONDMA);
                }
-       }
 
        return ICE_SUCCESS;
 }
@@ -3597,7 +4151,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
 {
        struct ice_chs_chg *tmp;
 
-       LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+       LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
                if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
                        struct ice_xlt1_section *p;
                        u32 id;
@@ -3613,7 +4167,6 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
                        p->offset = CPU_TO_LE16(tmp->ptype);
                        p->value[0] = tmp->ptg;
                }
-       }
 
        return ICE_SUCCESS;
 }
@@ -3631,19 +4184,13 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
        struct ice_chs_chg *tmp;
 
        LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
-               bool found = false;
-
-               if (tmp->type == ICE_VSIG_ADD)
-                       found = true;
-               else if (tmp->type == ICE_VSI_MOVE)
-                       found = true;
-               else if (tmp->type == ICE_VSIG_REM)
-                       found = true;
-
-               if (found) {
-                       struct ice_xlt2_section *p;
-                       u32 id;
+               struct ice_xlt2_section *p;
+               u32 id;
 
+               switch (tmp->type) {
+               case ICE_VSIG_ADD:
+               case ICE_VSI_MOVE:
+               case ICE_VSIG_REM:
                        id = ice_sect_id(blk, ICE_XLT2);
                        p = (struct ice_xlt2_section *)
                                ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
@@ -3654,6 +4201,9 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
                        p->count = CPU_TO_LE16(1);
                        p->offset = CPU_TO_LE16(tmp->vsi);
                        p->value[0] = CPU_TO_LE16(tmp->vsig);
+                       break;
+               default:
+                       break;
                }
        }
 
@@ -3673,12 +4223,12 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
        struct ice_buf_build *b;
        struct ice_chs_chg *tmp;
        enum ice_status status;
-       u16 pkg_sects = 0;
-       u16 sects = 0;
+       u16 pkg_sects;
        u16 xlt1 = 0;
        u16 xlt2 = 0;
        u16 tcam = 0;
        u16 es = 0;
+       u16 sects;
 
        /* count number of sections we need */
        LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
@@ -3690,7 +4240,6 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
                                es++;
                        break;
                case ICE_TCAM_ADD:
-               case ICE_TCAM_REM:
                        tcam++;
                        break;
                case ICE_VSIG_ADD:
@@ -3754,20 +4303,283 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
        /* update package */
        status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
        if (status == ICE_ERR_AQ_ERROR)
-               ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile.");
+               ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
 
 error_tmp:
        ice_pkg_buf_free(hw, b);
        return status;
 }
 
+/**
+ * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ * @mask_sel: mask select
+ *
+ * This function enable any of the masks selected by the mask select parameter
+ * for the profile specified.
+ */
+static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
+{
+       wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
+
+       ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
+                 GLQF_FDMASK_SEL(prof_id), mask_sel);
+}
+
+struct ice_fd_src_dst_pair {
+       u8 prot_id;
+       u8 count;
+       u16 off;
+};
+
+static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
+       /* These are defined in pairs */
+       { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
+       { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
+
+       { ICE_PROT_IPV4_IL, 2, 12 },
+       { ICE_PROT_IPV4_IL, 2, 16 },
+
+       { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
+       { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
+
+       { ICE_PROT_IPV6_IL, 8, 8 },
+       { ICE_PROT_IPV6_IL, 8, 24 },
+
+       { ICE_PROT_TCP_IL, 1, 0 },
+       { ICE_PROT_TCP_IL, 1, 2 },
+
+       { ICE_PROT_UDP_OF, 1, 0 },
+       { ICE_PROT_UDP_OF, 1, 2 },
+
+       { ICE_PROT_UDP_IL_OR_S, 1, 0 },
+       { ICE_PROT_UDP_IL_OR_S, 1, 2 },
+
+       { ICE_PROT_SCTP_IL, 1, 0 },
+       { ICE_PROT_SCTP_IL, 1, 2 }
+};
+
+#define ICE_FD_SRC_DST_PAIR_COUNT      ARRAY_SIZE(ice_fd_pairs)
+
+/**
+ * ice_update_fd_swap - set register appropriately for a FD FV extraction
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ * @es: extraction sequence (length of array is determined by the block)
+ */
+static enum ice_status
+ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
+{
+       ice_declare_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
+       u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
+#define ICE_FD_FV_NOT_FOUND (-2)
+       s8 first_free = ICE_FD_FV_NOT_FOUND;
+       u8 used[ICE_MAX_FV_WORDS] = { 0 };
+       s8 orig_free, si;
+       u32 mask_sel = 0;
+       u8 i, j, k;
+
+       ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
+
+       /* This code assumes that the Flow Director field vectors are assigned
+        * from the end of the FV indexes working towards the zero index, that
+        * only complete fields will be included and will be consecutive, and
+        * that there are no gaps between valid indexes.
+        */
+
+       /* Determine swap fields present */
+       for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
+               /* Find the first free entry, assuming right to left population.
+                * This is where we can start adding additional pairs if needed.
+                */
+               if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
+                   ICE_PROT_INVALID)
+                       first_free = i - 1;
+
+               for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
+                       if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
+                           es[i].off == ice_fd_pairs[j].off) {
+                               ice_set_bit(j, pair_list);
+                               pair_start[j] = i;
+                       }
+       }
+
+       orig_free = first_free;
+
+       /* determine missing swap fields that need to be added */
+       for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
+               u8 bit1 = ice_is_bit_set(pair_list, i + 1);
+               u8 bit0 = ice_is_bit_set(pair_list, i);
+
+               if (bit0 ^ bit1) {
+                       u8 index;
+
+                       /* add the appropriate 'paired' entry */
+                       if (!bit0)
+                               index = i;
+                       else
+                               index = i + 1;
+
+                       /* check for room */
+                       if (first_free + 1 < (s8)ice_fd_pairs[index].count)
+                               return ICE_ERR_MAX_LIMIT;
+
+                       /* place in extraction sequence */
+                       for (k = 0; k < ice_fd_pairs[index].count; k++) {
+                               es[first_free - k].prot_id =
+                                       ice_fd_pairs[index].prot_id;
+                               es[first_free - k].off =
+                                       ice_fd_pairs[index].off + (k * 2);
+
+                               if (k > first_free)
+                                       return ICE_ERR_OUT_OF_RANGE;
+
+                               /* keep track of non-relevant fields */
+                               mask_sel |= BIT(first_free - k);
+                       }
+
+                       pair_start[index] = first_free;
+                       first_free -= ice_fd_pairs[index].count;
+               }
+       }
+
+       /* fill in the swap array */
+       si = hw->blk[ICE_BLK_FD].es.fvw - 1;
+       while (si >= 0) {
+               u8 indexes_used = 1;
+
+               /* assume flat at this index */
+#define ICE_SWAP_VALID 0x80
+               used[si] = si | ICE_SWAP_VALID;
+
+               if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
+                       si -= indexes_used;
+                       continue;
+               }
+
+               /* check for a swap location */
+               for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
+                       if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
+                           es[si].off == ice_fd_pairs[j].off) {
+                               u8 idx;
+
+                               /* determine the appropriate matching field */
+                               idx = j + ((j % 2) ? -1 : 1);
+
+                               indexes_used = ice_fd_pairs[idx].count;
+                               for (k = 0; k < indexes_used; k++) {
+                                       used[si - k] = (pair_start[idx] - k) |
+                                               ICE_SWAP_VALID;
+                               }
+
+                               break;
+                       }
+
+               si -= indexes_used;
+       }
+
+       /* for each set of 4 swap and 4 inset indexes, write the appropriate
+        * register
+        */
+       for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
+               u32 raw_swap = 0;
+               u32 raw_in = 0;
+
+               for (k = 0; k < 4; k++) {
+                       u8 idx;
+
+                       idx = (j * 4) + k;
+                       if (used[idx] && !(mask_sel & BIT(idx))) {
+                               raw_swap |= used[idx] << (k * BITS_PER_BYTE);
+#define ICE_INSET_DFLT 0x9f
+                               raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
+                       }
+               }
+
+               /* write the appropriate swap register set */
+               wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
+
+               ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
+                         prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
+
+               /* write the appropriate inset register set */
+               wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
+
+               ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
+                         prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
+       }
+
+       /* initially clear the mask select for this profile */
+       ice_update_fd_mask(hw, prof_id, 0);
+
+       return ICE_SUCCESS;
+}
+
+/* The entries here needs to match the order of enum ice_ptype_attrib */
+static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
+       { ICE_GTP_PDU_EH,       ICE_GTP_PDU_FLAG_MASK },
+       { ICE_GTP_SESSION,      ICE_GTP_FLAGS_MASK },
+       { ICE_GTP_DOWNLINK,     ICE_GTP_FLAGS_MASK },
+       { ICE_GTP_UPLINK,       ICE_GTP_FLAGS_MASK },
+};
+
+/**
+ * ice_get_ptype_attrib_info - get ptype attribute information
+ * @type: attribute type
+ * @info: pointer to variable to the attribute information
+ */
+static void
+ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
+                         struct ice_ptype_attrib_info *info)
+{
+       *info = ice_ptype_attributes[type];
+}
+
+/**
+ * ice_add_prof_attrib - add any PTG with attributes to profile
+ * @prof: pointer to the profile to which PTG entries will be added
+ * @ptg: PTG to be added
+ * @ptype: PTYPE that needs to be looked up
+ * @attr: array of attributes that will be considered
+ * @attr_cnt: number of elements in the attribute array
+ */
+static enum ice_status
+ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
+                   const struct ice_ptype_attributes *attr, u16 attr_cnt)
+{
+       bool found = false;
+       u16 i;
+
+       for (i = 0; i < attr_cnt; i++) {
+               if (attr[i].ptype == ptype) {
+                       found = true;
+
+                       prof->ptg[prof->ptg_cnt] = ptg;
+                       ice_get_ptype_attrib_info(attr[i].attrib,
+                                                 &prof->attr[prof->ptg_cnt]);
+
+                       if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+                               return ICE_ERR_MAX_LIMIT;
+               }
+       }
+
+       if (!found)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       return ICE_SUCCESS;
+}
+
 /**
  * ice_add_prof - add profile
  * @hw: pointer to the HW struct
  * @blk: hardware block
  * @id: profile tracking ID
  * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @attr: array of attributes
+ * @attr_cnt: number of elements in attrib array
  * @es: extraction sequence (length of array is determined by the block)
+ * @masks: mask for extraction sequence
  *
  * This function registers a profile, which matches a set of PTYPES with a
  * particular extraction sequence. While the hardware profile is allocated
@@ -3776,19 +4588,40 @@ error_tmp:
  */
 enum ice_status
 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
-            struct ice_fv_word *es)
+            const struct ice_ptype_attributes *attr, u16 attr_cnt,
+            struct ice_fv_word *es, u16 *masks)
 {
        u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
+       ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
        struct ice_prof_map *prof;
        enum ice_status status;
-       u32 byte = 0;
+       u8 byte = 0;
        u8 prof_id;
 
+       ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
+
+       ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+
        /* search for existing profile */
-       status = ice_find_prof_id(hw, blk, es, &prof_id);
+       status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
        if (status) {
                /* allocate profile ID */
                status = ice_alloc_prof_id(hw, blk, &prof_id);
+               if (status)
+                       goto err_ice_add_prof;
+               if (blk == ICE_BLK_FD) {
+                       /* For Flow Director block, the extraction sequence may
+                        * need to be altered in the case where there are paired
+                        * fields that have no match. This is necessary because
+                        * for Flow Director, src and dest fields need to paired
+                        * for filter programming and these values are swapped
+                        * during Tx.
+                        */
+                       status = ice_update_fd_swap(hw, prof_id, es);
+                       if (status)
+                               goto err_ice_add_prof;
+               }
+               status = ice_update_prof_masking(hw, blk, prof_id, masks);
                if (status)
                        goto err_ice_add_prof;
 
@@ -3796,6 +4629,8 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
                ice_write_es(hw, blk, prof_id, es);
        }
 
+       ice_prof_inc_ref(hw, blk, prof_id);
+
        /* add profile info */
 
        prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
@@ -3804,12 +4639,12 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
 
        prof->profile_cookie = id;
        prof->prof_id = prof_id;
-       prof->ptype_count = 0;
+       prof->ptg_cnt = 0;
        prof->context = 0;
 
        /* build list of ptgs */
-       while (bytes && prof->ptype_count < ICE_MAX_PTYPE_PER_PROFILE) {
-               u32 bit;
+       while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
+               u8 bit;
 
                if (!ptypes[byte]) {
                        bytes--;
@@ -3818,21 +4653,47 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
                }
                /* Examine 8 bits per byte */
                for (bit = 0; bit < 8; bit++) {
-                       if (ptypes[byte] & 1 << bit) {
+                       if (ptypes[byte] & BIT(bit)) {
                                u16 ptype;
+                               u8 ptg;
                                u8 m;
 
-                               ptype = byte * 8 + bit;
-                               if (ptype < ICE_FLOW_PTYPE_MAX) {
-                                       prof->ptype[prof->ptype_count] = ptype;
+                               ptype = byte * BITS_PER_BYTE + bit;
+
+                               /* The package should place all ptypes in a
+                                * non-zero PTG, so the following call should
+                                * never fail.
+                                */
+                               if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+                                       continue;
+
+                               /* If PTG is already added, skip and continue */
+                               if (ice_is_bit_set(ptgs_used, ptg))
+                                       continue;
 
-                                       if (++prof->ptype_count >=
-                                               ICE_MAX_PTYPE_PER_PROFILE)
+                               ice_set_bit(ptg, ptgs_used);
+                               /* Check to see there are any attributes for
+                                * this ptype, and add them if found.
+                                */
+                               status = ice_add_prof_attrib(prof, ptg, ptype,
+                                                            attr, attr_cnt);
+                               if (status == ICE_ERR_MAX_LIMIT)
+                                       break;
+                               if (status) {
+                                       /* This is simple a ptype/PTG with no
+                                        * attribute
+                                        */
+                                       prof->ptg[prof->ptg_cnt] = ptg;
+                                       prof->attr[prof->ptg_cnt].flags = 0;
+                                       prof->attr[prof->ptg_cnt].mask = 0;
+
+                                       if (++prof->ptg_cnt >=
+                                           ICE_MAX_PTG_PER_PROFILE)
                                                break;
                                }
 
                                /* nothing left in byte, then exit */
-                               m = ~((1 << (bit + 1)) - 1);
+                               m = ~(u8)((1 << (bit + 1)) - 1);
                                if (!(ptypes[byte] & m))
                                        break;
                        }
@@ -3841,11 +4702,12 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
                bytes--;
                byte++;
        }
-       LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
 
-       return ICE_SUCCESS;
+       LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
+       status = ICE_SUCCESS;
 
 err_ice_add_prof:
+       ice_release_lock(&hw->blk[blk].es.prof_map_lock);
        return status;
 }
 
@@ -3856,6 +4718,7 @@ err_ice_add_prof:
  * @id: profile tracking ID
  *
  * This will search for a profile tracking ID which was previously added.
+ * The profile map lock should be held before calling this function.
  */
 struct ice_prof_map *
 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
@@ -3863,51 +4726,11 @@ ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
        struct ice_prof_map *entry = NULL;
        struct ice_prof_map *map;
 
-       LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map,
-                           list) {
+       LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
                if (map->profile_cookie == id) {
                        entry = map;
                        break;
                }
-       }
-
-       return entry;
-}
-
-/**
- * ice_set_prof_context - Set context for a given profile
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- * @cntxt: context
- */
-struct ice_prof_map *
-ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
-{
-       struct ice_prof_map *entry;
-
-       entry = ice_search_prof_id(hw, blk, id);
-       if (entry)
-               entry->context = cntxt;
-
-       return entry;
-}
-
-/**
- * ice_get_prof_context - Get context for a given profile
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- * @cntxt: pointer to variable to receive the context
- */
-struct ice_prof_map *
-ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
-{
-       struct ice_prof_map *entry;
-
-       entry = ice_search_prof_id(hw, blk, id);
-       if (entry)
-               *cntxt = entry->context;
 
        return entry;
 }
@@ -3925,9 +4748,8 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
        struct ice_vsig_prof *p;
 
        LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
-                           ice_vsig_prof, list) {
+                           ice_vsig_prof, list)
                count++;
-       }
 
        return count;
 }
@@ -3963,46 +4785,25 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
  * ice_rem_prof_id - remove one profile from a VSIG
  * @hw: pointer to the HW struct
  * @blk: hardware block
- * @vsig: VSIG to remove the profile from
  * @prof: pointer to profile structure to remove
- * @chg: pointer to list to record changes
  */
 static enum ice_status
-ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, u16 vsig,
-               struct ice_vsig_prof *prof, struct LIST_HEAD_TYPE *chg)
+ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
+               struct ice_vsig_prof *prof)
 {
        enum ice_status status;
-       struct ice_chs_chg *p;
        u16 i;
 
-       for (i = 0; i < prof->tcam_count; i++) {
-               p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
-               if (!p)
-                       goto err_ice_rem_prof_id;
-
-               p->type = ICE_TCAM_REM;
-               p->vsig = vsig;
-               p->prof_id = prof->tcam[i].prof_id;
-               p->tcam_idx = prof->tcam[i].tcam_idx;
-
-               p->ptg = prof->tcam[i].ptg;
-               prof->tcam[i].in_use = false;
-               p->orig_ent = hw->blk[blk].prof.t[p->tcam_idx];
-               status = ice_rel_tcam_idx(hw, blk, p->tcam_idx);
-               if (!status)
-                       status = ice_prof_dec_ref(hw, blk, p->prof_id);
-
-               LIST_ADD(&p->list_entry, chg);
-
-               if (status)
-                       goto err_ice_rem_prof_id;
-       }
+       for (i = 0; i < prof->tcam_count; i++)
+               if (prof->tcam[i].in_use) {
+                       prof->tcam[i].in_use = false;
+                       status = ice_rel_tcam_idx(hw, blk,
+                                                 prof->tcam[i].tcam_idx);
+                       if (status)
+                               return ICE_ERR_HW_TABLE;
+               }
 
        return ICE_SUCCESS;
-
-err_ice_rem_prof_id:
-       /* caller will clean up the change list */
-       return ICE_ERR_NO_MEMORY;
 }
 
 /**
@@ -4025,9 +4826,9 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
        LIST_FOR_EACH_ENTRY_SAFE(d, t,
                                 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
                                 ice_vsig_prof, list) {
-               status = ice_rem_prof_id(hw, blk, vsig, d, chg);
+               status = ice_rem_prof_id(hw, blk, d);
                if (status)
-                       goto err_ice_rem_vsig;
+                       return status;
 
                LIST_DEL(&d->list);
                ice_free(hw, d);
@@ -4035,36 +4836,29 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
 
        /* Move all VSIS associated with this VSIG to the default VSIG */
        vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
-       if (!vsi_cur)
-               return ICE_ERR_CFG;
-
-       do {
-               struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
-               struct ice_chs_chg *p;
-
-               p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
-               if (!p)
-                       goto err_ice_rem_vsig;
-
-               p->type = ICE_VSIG_REM;
-               p->orig_vsig = vsig;
-               p->vsig = ICE_DEFAULT_VSIG;
-               p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+       /* If the VSIG has at least 1 VSI then iterate through the list
+        * and remove the VSIs before deleting the group.
+        */
+       if (vsi_cur)
+               do {
+                       struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+                       struct ice_chs_chg *p;
 
-               LIST_ADD(&p->list_entry, chg);
+                       p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+                       if (!p)
+                               return ICE_ERR_NO_MEMORY;
 
-               status = ice_vsig_free(hw, blk, vsig);
-               if (status)
-                       return status;
+                       p->type = ICE_VSIG_REM;
+                       p->orig_vsig = vsig;
+                       p->vsig = ICE_DEFAULT_VSIG;
+                       p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
 
-               vsi_cur = tmp;
-       } while (vsi_cur);
+                       LIST_ADD(&p->list_entry, chg);
 
-       return ICE_SUCCESS;
+                       vsi_cur = tmp;
+               } while (vsi_cur);
 
-err_ice_rem_vsig:
-       /* the caller will free up the change list */
-       return ICE_ERR_NO_MEMORY;
+       return ice_vsig_free(hw, blk, vsig);
 }
 
 /**
@@ -4085,20 +4879,19 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
 
        LIST_FOR_EACH_ENTRY_SAFE(p, t,
                                 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
-                                ice_vsig_prof, list) {
+                                ice_vsig_prof, list)
                if (p->profile_cookie == hdl) {
                        if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
                                /* this is the last profile, remove the VSIG */
                                return ice_rem_vsig(hw, blk, vsig, chg);
 
-                       status = ice_rem_prof_id(hw, blk, vsig, p, chg);
+                       status = ice_rem_prof_id(hw, blk, p);
                        if (!status) {
                                LIST_DEL(&p->list);
                                ice_free(hw, p);
                        }
                        return status;
                }
-       }
 
        return ICE_ERR_DOES_NOT_EXIST;
 }
@@ -4119,7 +4912,7 @@ ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
 
        INIT_LIST_HEAD(&chg);
 
-       for (i = 1; i < ICE_MAX_VSIGS; i++) {
+       for (i = 1; i < ICE_MAX_VSIGS; i++)
                if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
                        if (ice_has_prof_vsig(hw, blk, i, id)) {
                                status = ice_rem_prof_id_vsig(hw, blk, i, id,
@@ -4128,7 +4921,6 @@ ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
                                        goto err_ice_rem_flow_all;
                        }
                }
-       }
 
        status = ice_upd_prof_hw(hw, blk, &chg);
 
@@ -4153,80 +4945,84 @@ err_ice_rem_flow_all:
  */
 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
 {
-       enum ice_status status;
        struct ice_prof_map *pmap;
+       enum ice_status status;
 
-       pmap = ice_search_prof_id(hw, blk, id);
-       if (!pmap)
-               return ICE_ERR_DOES_NOT_EXIST;
-
-       status = ice_free_prof_id(hw, blk, pmap->prof_id);
+       ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
 
-       if (status)
-               return status;
+       pmap = ice_search_prof_id(hw, blk, id);
+       if (!pmap) {
+               status = ICE_ERR_DOES_NOT_EXIST;
+               goto err_ice_rem_prof;
+       }
 
        /* remove all flows with this profile */
        status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
        if (status)
-               return status;
+               goto err_ice_rem_prof;
+
+       /* dereference profile, and possibly remove */
+       ice_prof_dec_ref(hw, blk, pmap->prof_id);
+
        LIST_DEL(&pmap->list);
        ice_free(hw, pmap);
 
-       return ICE_SUCCESS;
+err_ice_rem_prof:
+       ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+       return status;
 }
 
 /**
- * ice_get_prof_ptgs - get ptgs for profile
+ * ice_get_prof - get profile
  * @hw: pointer to the HW struct
  * @blk: hardware block
  * @hdl: profile handle
  * @chg: change list
  */
 static enum ice_status
-ice_get_prof_ptgs(struct ice_hw *hw, enum ice_block blk, u64 hdl,
-                 struct LIST_HEAD_TYPE *chg)
+ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
+            struct LIST_HEAD_TYPE *chg)
 {
+       enum ice_status status = ICE_SUCCESS;
        struct ice_prof_map *map;
        struct ice_chs_chg *p;
        u16 i;
 
+       ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
        /* Get the details on the profile specified by the handle ID */
        map = ice_search_prof_id(hw, blk, hdl);
-       if (!map)
-               return ICE_ERR_DOES_NOT_EXIST;
-
-       for (i = 0; i < map->ptype_count; i++) {
-               enum ice_status status;
-               bool add;
-               u8 ptg;
-
-               status = ice_get_ptg(hw, blk, map->ptype[i], &ptg, &add);
-               if (status)
-                       goto err_ice_get_prof_ptgs;
+       if (!map) {
+               status = ICE_ERR_DOES_NOT_EXIST;
+               goto err_ice_get_prof;
+       }
 
-               if (add || !hw->blk[blk].es.ref_count[map->prof_id]) {
-                       /* add PTG to change list */
+       for (i = 0; i < map->ptg_cnt; i++)
+               if (!hw->blk[blk].es.written[map->prof_id]) {
+                       /* add ES to change list */
                        p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
-                       if (!p)
-                               goto err_ice_get_prof_ptgs;
+                       if (!p) {
+                               status = ICE_ERR_NO_MEMORY;
+                               goto err_ice_get_prof;
+                       }
 
                        p->type = ICE_PTG_ES_ADD;
-                       p->ptype = map->ptype[i];
-                       p->ptg = ptg;
-                       p->add_ptg = add;
+                       p->ptype = 0;
+                       p->ptg = map->ptg[i];
+                       p->attr = map->attr[i];
+                       p->add_ptg = 0;
 
-                       p->add_prof = !hw->blk[blk].es.ref_count[map->prof_id];
+                       p->add_prof = 1;
                        p->prof_id = map->prof_id;
 
+                       hw->blk[blk].es.written[map->prof_id] = true;
+
                        LIST_ADD(&p->list_entry, chg);
                }
-       }
-
-       return ICE_SUCCESS;
 
-err_ice_get_prof_ptgs:
+err_ice_get_prof:
+       ice_release_lock(&hw->blk[blk].es.prof_map_lock);
        /* let caller clean up the change list */
-       return ICE_ERR_NO_MEMORY;
+       return status;
 }
 
 /**
@@ -4250,13 +5046,12 @@ ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
                struct ice_vsig_prof *p;
 
                /* copy to the input list */
-               p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
+               p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
+                                                      ICE_NONDMA_TO_NONDMA);
                if (!p)
                        goto err_ice_get_profs_vsig;
 
-               ice_memcpy(p, ent1, sizeof(*p), ICE_NONDMA_TO_NONDMA);
-
-               LIST_ADD(&p->list, lst);
+               LIST_ADD_TAIL(&p->list, lst);
        }
 
        return ICE_SUCCESS;
@@ -4281,41 +5076,40 @@ static enum ice_status
 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
                    struct LIST_HEAD_TYPE *lst, u64 hdl)
 {
-       struct ice_vsig_prof *p;
+       enum ice_status status = ICE_SUCCESS;
        struct ice_prof_map *map;
+       struct ice_vsig_prof *p;
        u16 i;
 
+       ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
        map = ice_search_prof_id(hw, blk, hdl);
-       if (!map)
-               return ICE_ERR_DOES_NOT_EXIST;
+       if (!map) {
+               status = ICE_ERR_DOES_NOT_EXIST;
+               goto err_ice_add_prof_to_lst;
+       }
 
        p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
-       if (!p)
-               return ICE_ERR_NO_MEMORY;
+       if (!p) {
+               status = ICE_ERR_NO_MEMORY;
+               goto err_ice_add_prof_to_lst;
+       }
 
        p->profile_cookie = map->profile_cookie;
        p->prof_id = map->prof_id;
-       p->tcam_count = map->ptype_count;
-
-       for (i = 0; i < map->ptype_count; i++) {
-               enum ice_status status;
-               u8 ptg;
+       p->tcam_count = map->ptg_cnt;
 
+       for (i = 0; i < map->ptg_cnt; i++) {
                p->tcam[i].prof_id = map->prof_id;
                p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
-
-               status = ice_ptg_find_ptype(hw, blk, map->ptype[i], &ptg);
-               if (status) {
-                       ice_free(hw, p);
-                       return status;
-               }
-
-               p->tcam[i].ptg = ptg;
+               p->tcam[i].ptg = map->ptg[i];
+               p->tcam[i].attr = map->attr[i];
        }
 
        LIST_ADD(&p->list, lst);
 
-       return ICE_SUCCESS;
+err_ice_add_prof_to_lst:
+       ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+       return status;
 }
 
 /**
@@ -4341,6 +5135,7 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
        status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
        if (!status)
                status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+
        if (status) {
                ice_free(hw, p);
                return status;
@@ -4356,12 +5151,44 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
        return ICE_SUCCESS;
 }
 
+/**
+ * ice_set_tcam_flags - set TCAM flag don't care mask
+ * @mask: mask for flags
+ * @dc_mask: pointer to the don't care mask
+ */
+static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
+{
+       u16 *flag_word;
+
+       /* flags are lowest u16 */
+       flag_word = (u16 *)dc_mask;
+       *flag_word = ~mask;
+}
+
+/**
+ * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
+ * @hw: pointer to the HW struct
+ * @idx: the index of the TCAM entry to remove
+ * @chg: the list of change structures to search
+ */
+static void
+ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
+{
+       struct ice_chs_chg *pos, *tmp;
+
+       LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry)
+               if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
+                       LIST_DEL(&tmp->list_entry);
+                       ice_free(hw, tmp);
+               }
+}
+
 /**
  * ice_prof_tcam_ena_dis - add enable or disable TCAM change
  * @hw: pointer to the HW struct
  * @blk: hardware block
  * @enable: true to enable, false to disable
- * @vsig: the vsig of the TCAM entry
+ * @vsig: the VSIG of the TCAM entry
  * @tcam: pointer the TCAM info structure of the TCAM to disable
  * @chg: the change list
  *
@@ -4375,29 +5202,49 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
        enum ice_status status;
        struct ice_chs_chg *p;
 
-       /* Default: enable means change the low flag bit to don't care */
-       u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+       u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+       u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
        u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
-       u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
 
-       /* If disabled, change the low flag bit to never match */
+       /* if disabling, free the TCAM */
        if (!enable) {
-               dc_msk[0] = 0x00;
-               nm_msk[0] = 0x01;
+               status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
+
+               /* if we have already created a change for this TCAM entry, then
+                * we need to remove that entry, in order to prevent writing to
+                * a TCAM entry we no longer will have ownership of.
+                */
+               ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
+               tcam->tcam_idx = 0;
+               tcam->in_use = 0;
+               return status;
        }
 
+       /* for re-enabling, reallocate a TCAM */
+       /* for entries with empty attribute masks, allocate entry from
+        * the bottom of the tcam table; otherwise, allocate from the
+        * top of the table in order to give it higher priority
+        */
+       status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
+                                   &tcam->tcam_idx);
+       if (status)
+               return status;
+
        /* add TCAM to change list */
        p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
        if (!p)
                return ICE_ERR_NO_MEMORY;
 
+       /* set don't care masks for TCAM flags */
+       ice_set_tcam_flags(tcam->attr.mask, dc_msk);
+
        status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
-                                     tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
-                                     nm_msk);
+                                     tcam->ptg, vsig, 0, tcam->attr.flags,
+                                     vl_msk, dc_msk, nm_msk);
        if (status)
                goto err_ice_prof_tcam_ena_dis;
 
-       tcam->in_use = enable;
+       tcam->in_use = 1;
 
        p->type = ICE_TCAM_ADD;
        p->add_tcam_idx = true;
@@ -4416,6 +5263,32 @@ err_ice_prof_tcam_ena_dis:
        return status;
 }
 
+/**
+ * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use
+ * @ptg_attr: pointer to the PTG and attribute pair to check
+ * @ptgs_used: bitmap that denotes which PTGs are in use
+ * @attr_used: array of PTG and attributes pairs already used
+ * @attr_cnt: count of entries in the attr_used array
+ */
+static bool
+ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, ice_bitmap_t *ptgs_used,
+                   struct ice_tcam_inf *attr_used[], u16 attr_cnt)
+{
+       u16 i;
+
+       if (!ice_is_bit_set(ptgs_used, ptg_attr->ptg))
+               return false;
+
+       /* the PTG is used, so now look for correct attributes */
+       for (i = 0; i < attr_cnt; i++)
+               if (attr_used[i]->ptg == ptg_attr->ptg &&
+                   attr_used[i]->attr.flags == ptg_attr->attr.flags &&
+                   attr_used[i]->attr.mask == ptg_attr->attr.mask)
+                       return true;
+
+       return false;
+}
+
 /**
  * ice_adj_prof_priorities - adjust profile based on priorities
  * @hw: pointer to the HW struct
@@ -4428,11 +5301,19 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
                        struct LIST_HEAD_TYPE *chg)
 {
        ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
+       struct ice_tcam_inf **attr_used;
+       enum ice_status status = ICE_SUCCESS;
        struct ice_vsig_prof *t;
-       enum ice_status status;
+       u16 attr_used_cnt = 0;
        u16 idx;
 
-       ice_memset(ptgs_used, 0, sizeof(ptgs_used), ICE_NONDMA_MEM);
+#define ICE_MAX_PTG_ATTRS      1024
+       attr_used = (struct ice_tcam_inf **)ice_calloc(hw, ICE_MAX_PTG_ATTRS,
+                                                      sizeof(*attr_used));
+       if (!attr_used)
+               return ICE_ERR_NO_MEMORY;
+
+       ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
        idx = vsig & ICE_VSIG_IDX_M;
 
        /* Priority is based on the order in which the profiles are added. The
@@ -4449,11 +5330,15 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
                u16 i;
 
                for (i = 0; i < t->tcam_count; i++) {
+                       bool used;
+
                        /* Scan the priorities from newest to oldest.
                         * Make sure that the newest profiles take priority.
                         */
-                       if (ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
-                           t->tcam[i].in_use) {
+                       used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used,
+                                                  attr_used, attr_used_cnt);
+
+                       if (used && t->tcam[i].in_use) {
                                /* need to mark this PTG as never match, as it
                                 * was already in use and therefore duplicate
                                 * (and lower priority)
@@ -4463,9 +5348,8 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
                                                               &t->tcam[i],
                                                               chg);
                                if (status)
-                                       return status;
-                       } else if (!ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
-                                  !t->tcam[i].in_use) {
+                                       goto err_ice_adj_prof_priorities;
+                       } else if (!used && !t->tcam[i].in_use) {
                                /* need to enable this PTG, as it in not in use
                                 * and not enabled (highest priority)
                                 */
@@ -4474,15 +5358,22 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
                                                               &t->tcam[i],
                                                               chg);
                                if (status)
-                                       return status;
+                                       goto err_ice_adj_prof_priorities;
                        }
 
                        /* keep track of used ptgs */
                        ice_set_bit(t->tcam[i].ptg, ptgs_used);
+                       if (attr_used_cnt < ICE_MAX_PTG_ATTRS)
+                               attr_used[attr_used_cnt++] = &t->tcam[i];
+                       else
+                               ice_debug(hw, ICE_DBG_INIT,
+                                         "Warn: ICE_MAX_PTG_ATTRS exceeded\n");
                }
        }
 
-       return ICE_SUCCESS;
+err_ice_adj_prof_priorities:
+       ice_free(hw, attr_used);
+       return status;
 }
 
 /**
@@ -4491,25 +5382,22 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
  * @blk: hardware block
  * @vsig: the VSIG to which this profile is to be added
  * @hdl: the profile handle indicating the profile to add
+ * @rev: true to add entries to the end of the list
  * @chg: the change list
  */
 static enum ice_status
 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
-                    struct LIST_HEAD_TYPE *chg)
+                    bool rev, struct LIST_HEAD_TYPE *chg)
 {
        /* Masks that ignore flags */
        u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
        u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
        u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+       enum ice_status status = ICE_SUCCESS;
        struct ice_prof_map *map;
        struct ice_vsig_prof *t;
        struct ice_chs_chg *p;
-       u16 i;
-
-       /* Get the details on the profile specified by the handle ID */
-       map = ice_search_prof_id(hw, blk, hdl);
-       if (!map)
-               return ICE_ERR_DOES_NOT_EXIST;
+       u16 vsig_idx, i;
 
        /* Error, if this VSIG already has this profile */
        if (ice_has_prof_vsig(hw, blk, vsig, hdl))
@@ -4518,39 +5406,47 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
        /* new VSIG profile structure */
        t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
        if (!t)
+               return ICE_ERR_NO_MEMORY;
+
+       ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+       /* Get the details on the profile specified by the handle ID */
+       map = ice_search_prof_id(hw, blk, hdl);
+       if (!map) {
+               status = ICE_ERR_DOES_NOT_EXIST;
                goto err_ice_add_prof_id_vsig;
+       }
 
        t->profile_cookie = map->profile_cookie;
        t->prof_id = map->prof_id;
-       t->tcam_count = map->ptype_count;
+       t->tcam_count = map->ptg_cnt;
 
        /* create TCAM entries */
-       for (i = 0; i < map->ptype_count; i++) {
-               enum ice_status status;
+       for (i = 0; i < map->ptg_cnt; i++) {
                u16 tcam_idx;
-               bool add;
-               u8 ptg;
-
-               /* If properly sequenced, we should never have to allocate new
-                * PTGs
-                */
-               status = ice_get_ptg(hw, blk, map->ptype[i], &ptg, &add);
-               if (status)
-                       goto err_ice_add_prof_id_vsig;
 
                /* add TCAM to change list */
                p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
-               if (!p)
+               if (!p) {
+                       status = ICE_ERR_NO_MEMORY;
                        goto err_ice_add_prof_id_vsig;
+               }
 
                /* allocate the TCAM entry index */
-               status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
-               if (status)
+               /* for entries with empty attribute masks, allocate entry from
+                * the bottom of the tcam table; otherwise, allocate from the
+                * top of the table in order to give it higher priority
+                */
+               status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
+                                           &tcam_idx);
+               if (status) {
+                       ice_free(hw, p);
                        goto err_ice_add_prof_id_vsig;
+               }
 
-               t->tcam[i].ptg = ptg;
+               t->tcam[i].ptg = map->ptg[i];
                t->tcam[i].prof_id = map->prof_id;
                t->tcam[i].tcam_idx = tcam_idx;
+               t->tcam[i].attr = map->attr[i];
                t->tcam[i].in_use = true;
 
                p->type = ICE_TCAM_ADD;
@@ -4560,32 +5456,41 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
                p->vsig = vsig;
                p->tcam_idx = t->tcam[i].tcam_idx;
 
+               /* set don't care masks for TCAM flags */
+               ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
+
                /* write the TCAM entry */
                status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
                                              t->tcam[i].prof_id,
-                                             t->tcam[i].ptg, vsig, 0, 0,
-                                             vl_msk, dc_msk, nm_msk);
-               if (status)
+                                             t->tcam[i].ptg, vsig, 0,
+                                             t->tcam[i].attr.flags, vl_msk,
+                                             dc_msk, nm_msk);
+               if (status) {
+                       ice_free(hw, p);
                        goto err_ice_add_prof_id_vsig;
-
-               /* this increments the reference count of how many TCAM entries
-                * are using this HW profile ID
-                */
-               status = ice_prof_inc_ref(hw, blk, t->tcam[i].prof_id);
+               }
 
                /* log change */
                LIST_ADD(&p->list_entry, chg);
        }
 
        /* add profile to VSIG */
-       LIST_ADD(&t->list,
-                &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+       vsig_idx = vsig & ICE_VSIG_IDX_M;
+       if (rev)
+               LIST_ADD_TAIL(&t->list,
+                             &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
+       else
+               LIST_ADD(&t->list,
+                        &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
 
-       return ICE_SUCCESS;
+       ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+       return status;
 
 err_ice_add_prof_id_vsig:
+       ice_release_lock(&hw->blk[blk].es.prof_map_lock);
        /* let caller clean up the change list */
-       return ICE_ERR_NO_MEMORY;
+       ice_free(hw, t);
+       return status;
 }
 
 /**
@@ -4609,16 +5514,18 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
                return ICE_ERR_NO_MEMORY;
 
        new_vsig = ice_vsig_alloc(hw, blk);
-       if (!new_vsig)
-               return ICE_ERR_HW_TABLE;
+       if (!new_vsig) {
+               status = ICE_ERR_HW_TABLE;
+               goto err_ice_create_prof_id_vsig;
+       }
 
        status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
        if (status)
-               return status;
+               goto err_ice_create_prof_id_vsig;
 
-       status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+       status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
        if (status)
-               return status;
+               goto err_ice_create_prof_id_vsig;
 
        p->type = ICE_VSIG_ADD;
        p->vsi = vsi;
@@ -4628,19 +5535,26 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
        LIST_ADD(&p->list_entry, chg);
 
        return ICE_SUCCESS;
+
+err_ice_create_prof_id_vsig:
+       /* let caller clean up the change list */
+       ice_free(hw, p);
+       return status;
 }
 
 /**
- * ice_create_vsig_from_list - create a new VSIG with a list of profiles
+ * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
  * @hw: pointer to the HW struct
  * @blk: hardware block
  * @vsi: the initial VSI that will be in VSIG
  * @lst: the list of profile that will be added to the VSIG
+ * @new_vsig: return of new VSIG
  * @chg: the change list
  */
 static enum ice_status
 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
-                        struct LIST_HEAD_TYPE *lst, struct LIST_HEAD_TYPE *chg)
+                        struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
+                        struct LIST_HEAD_TYPE *chg)
 {
        struct ice_vsig_prof *t;
        enum ice_status status;
@@ -4655,12 +5569,15 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
                return status;
 
        LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
+               /* Reverse the order here since we are copying the list */
                status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
-                                             chg);
+                                             true, chg);
                if (status)
                        return status;
        }
 
+       *new_vsig = vsig;
+
        return ICE_SUCCESS;
 }
 
@@ -4695,6 +5612,47 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
        return status == ICE_SUCCESS;
 }
 
+/**
+ * ice_add_vsi_flow - add VSI flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: input VSI
+ * @vsig: target VSIG to include the input VSI
+ *
+ * Calling this function will add the VSI to a given VSIG and
+ * update the HW tables accordingly. This call can be used to
+ * add multiple VSIs to a VSIG if we know beforehand that those
+ * VSIs have the same characteristics of the VSIG. This will
+ * save time in generating a new VSIG and TCAMs till a match is
+ * found and subsequent rollback when a matching VSIG is found.
+ */
+enum ice_status
+ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+       struct ice_chs_chg *tmp, *del;
+       struct LIST_HEAD_TYPE chg;
+       enum ice_status status;
+
+       /* if target VSIG is default the move is invalid */
+       if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
+               return ICE_ERR_PARAM;
+
+       INIT_LIST_HEAD(&chg);
+
+       /* move VSI to the VSIG that matches */
+       status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+       /* update hardware if success */
+       if (!status)
+               status = ice_upd_prof_hw(hw, blk, &chg);
+
+       LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+               LIST_DEL(&del->list_entry);
+               ice_free(hw, del);
+       }
+
+       return status;
+}
+
 /**
  * ice_add_prof_id_flow - add profile flow
  * @hw: pointer to the HW struct
@@ -4712,16 +5670,15 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
        struct ice_vsig_prof *tmp1, *del1;
        struct LIST_HEAD_TYPE union_lst;
        struct ice_chs_chg *tmp, *del;
-       struct LIST_HEAD_TYPE chrs;
        struct LIST_HEAD_TYPE chg;
        enum ice_status status;
-       u16 vsig, or_vsig = 0;
+       u16 vsig;
 
        INIT_LIST_HEAD(&union_lst);
-       INIT_LIST_HEAD(&chrs);
        INIT_LIST_HEAD(&chg);
 
-       status = ice_get_prof_ptgs(hw, blk, hdl, &chg);
+       /* Get profile */
+       status = ice_get_prof(hw, blk, hdl, &chg);
        if (status)
                return status;
 
@@ -4729,9 +5686,10 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
        status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
        if (!status && vsig) {
                bool only_vsi;
+               u16 or_vsig;
                u16 ref;
 
-               /* found in vsig */
+               /* found in VSIG */
                or_vsig = vsig;
 
                /* make sure that there is no overlap/conflict between the new
@@ -4763,14 +5721,13 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
                /* search for an existing VSIG with an exact charc match */
                status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
                if (!status) {
-                       /* found an exact match */
-                       /* move vsi to the VSIG that matches */
+                       /* move VSI to the VSIG that matches */
                        status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
                        if (status)
                                goto err_ice_add_prof_id_flow;
 
-                       /* remove original VSIG if we just moved the only VSI
-                        * from it
+                       /* VSI has been moved out of or_vsig. If the or_vsig had
+                        * only that VSI it is now empty and can be removed.
                         */
                        if (only_vsi) {
                                status = ice_rem_vsig(hw, blk, or_vsig, &chg);
@@ -4783,7 +5740,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
                         * not sharing entries and we can simply add the new
                         * profile to the VSIG.
                         */
-                       status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+                       status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
+                                                     &chg);
                        if (status)
                                goto err_ice_add_prof_id_flow;
 
@@ -4794,7 +5752,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
                } else {
                        /* No match, so we need a new VSIG */
                        status = ice_create_vsig_from_lst(hw, blk, vsi,
-                                                         &union_lst, &chg);
+                                                         &union_lst, &vsig,
+                                                         &chg);
                        if (status)
                                goto err_ice_add_prof_id_flow;
 
@@ -4837,42 +5796,9 @@ err_ice_add_prof_id_flow:
                ice_free(hw, del1);
        }
 
-       LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &chrs, ice_vsig_prof, list) {
-               LIST_DEL(&del1->list);
-               ice_free(hw, del1);
-       }
-
        return status;
 }
 
-/**
- * ice_add_flow - add flow
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @vsi: array of VSIs to enable with the profile specified by ID
- * @count: number of elements in the VSI array
- * @id: profile tracking ID
- *
- * Calling this function will update the hardware tables to enable the
- * profile indicated by the ID parameter for the VSIs specified in the VSI
- * array. Once successfully called, the flow will be enabled.
- */
-enum ice_status
-ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
-            u64 id)
-{
-       enum ice_status status;
-       u16 i;
-
-       for (i = 0; i < count; i++) {
-               status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
-               if (status)
-                       return status;
-       }
-
-       return ICE_SUCCESS;
-}
-
 /**
  * ice_rem_prof_from_list - remove a profile from list
  * @hw: pointer to the HW struct
@@ -4884,13 +5810,12 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
 {
        struct ice_vsig_prof *ent, *tmp;
 
-       LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) {
+       LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list)
                if (ent->profile_cookie == hdl) {
                        LIST_DEL(&ent->list);
                        ice_free(hw, ent);
                        return ICE_SUCCESS;
                }
-       }
 
        return ICE_ERR_DOES_NOT_EXIST;
 }
@@ -4976,8 +5901,8 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
                                if (status)
                                        goto err_ice_rem_prof_id_flow;
 
-                       } else if (ice_find_dup_props_vsig(hw, blk, &copy,
-                                                          &vsig)) {
+                       } else if (!ice_find_dup_props_vsig(hw, blk, &copy,
+                                                           &vsig)) {
                                /* found an exact match */
                                /* add or move VSI to the VSIG that matches */
                                /* Search for a VSIG with a matching profile
@@ -4994,7 +5919,8 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
                                 * new VSIG and TCAM entries
                                 */
                                status = ice_create_vsig_from_lst(hw, blk, vsi,
-                                                                 &copy, &chg);
+                                                                 &copy, &vsig,
+                                                                 &chg);
                                if (status)
                                        goto err_ice_rem_prof_id_flow;
 
@@ -5026,31 +5952,3 @@ err_ice_rem_prof_id_flow:
 
        return status;
 }
-
-/**
- * ice_rem_flow - remove flow
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @vsi: array of VSIs from which to remove the profile specified by ID
- * @count: number of elements in the VSI array
- * @id: profile tracking ID
- *
- * The function will remove flows from the specified VSIs that were enabled
- * using ice_add_flow. The ID value will indicated which profile will be
- * removed. Once successfully called, the flow will be disabled.
- */
-enum ice_status
-ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
-            u64 id)
-{
-       enum ice_status status;
-       u16 i;
-
-       for (i = 0; i < count; i++) {
-               status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
-               if (status)
-                       return status;
-       }
-
-       return ICE_SUCCESS;
-}