/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2019
+ * Copyright(c) 2001-2021 Intel Corporation
*/
#include "ice_common.h"
#include "ice_protocol_type.h"
#include "ice_flow.h"
+/* For supporting double VLAN mode, it is necessary to enable or disable certain
+ * boost tcam entries. The metadata labels names that match the following
+ * prefixes will be saved to allow enabling double VLAN mode.
+ */
+#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
+#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
+
/* To support tunneling entries by PF, the package will append the PF number to
* the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
*/
+#define ICE_TNL_PRE "TNL_"
static const struct ice_tunnel_type_scan tnls[] = {
{ TNL_VXLAN, "TNL_VXLAN_PF" },
{ TNL_GENEVE, "TNL_GENEVE_PF" },
+ { TNL_ECPRI, "TNL_UDP_ECPRI_PF" },
{ TNL_LAST, "" }
};
{
struct ice_nvm_table *nvms;
- nvms = (struct ice_nvm_table *)(ice_seg->device_table +
- LE32_TO_CPU(ice_seg->device_table_count));
+ nvms = (struct ice_nvm_table *)
+ (ice_seg->device_table +
+ LE32_TO_CPU(ice_seg->device_table_count));
return (_FORCE_ struct ice_buf_table *)
(nvms->vers + LE32_TO_CPU(nvms->table_count));
return entry;
}
+/**
+ * ice_hw_ptype_ena - check if the PTYPE is enabled or not
+ * @hw: pointer to the HW structure
+ * @ptype: the hardware PTYPE
+ */
+bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
+{
+ return ptype < ICE_FLOW_PTYPE_MAX &&
+ ice_is_bit_set(hw->hw_ptype, ptype);
+}
+
+/**
+ * ice_marker_ptype_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the Marker PType TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual Marker PType TCAM entries.
+ */
+static void *
+ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_marker_ptype_tcam_section *marker_ptype;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
+ return NULL;
+
+ if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ marker_ptype = (struct ice_marker_ptype_tcam_section *)section;
+ if (index >= LE16_TO_CPU(marker_ptype->count))
+ return NULL;
+
+ return marker_ptype->tcam + index;
+}
+
+/**
+ * ice_fill_hw_ptype - fill the enabled PTYPE bit information
+ * @hw: pointer to the HW structure
+ */
+static void
+ice_fill_hw_ptype(struct ice_hw *hw)
+{
+ struct ice_marker_ptype_tcam_entry *tcam;
+ struct ice_seg *seg = hw->seg;
+ struct ice_pkg_enum state;
+
+ ice_zero_bitmap(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
+ if (!seg)
+ return;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ do {
+ tcam = (struct ice_marker_ptype_tcam_entry *)
+ ice_pkg_enum_entry(seg, &state,
+ ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
+ ice_marker_ptype_tcam_handler);
+ if (tcam &&
+ LE16_TO_CPU(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
+ LE16_TO_CPU(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
+ ice_set_bit(LE16_TO_CPU(tcam->ptype), hw->hw_ptype);
+
+ seg = NULL;
+ } while (tcam);
+}
+
/**
* ice_boost_tcam_handler
* @sect_type: section type
}
/**
- * ice_init_pkg_hints
+ * ice_add_tunnel_hint
* @hw: pointer to the HW structure
- * @ice_seg: pointer to the segment of the package scan (non-NULL)
- *
- * This function will scan the package and save off relevant information
- * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
- * since the first call to ice_enum_labels requires a pointer to an actual
- * ice_seg structure.
+ * @label_name: label text
+ * @val: value of the tunnel port boost entry
*/
-static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
{
- struct ice_pkg_enum state;
- char *label_name;
- u16 val;
- int i;
-
- ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
-
- if (!ice_seg)
- return;
-
- label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
- &val);
+ if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ u16 i;
- while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
for (i = 0; tnls[i].type != TNL_LAST; i++) {
size_t len = strlen(tnls[i].label_prefix);
break;
}
}
+ }
+}
+
+/**
+ * ice_add_dvm_hint
+ * @hw: pointer to the HW structure
+ * @val: value of the boost entry
+ * @enable: true if entry needs to be enabled, or false if needs to be disabled
+ */
+static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
+{
+ if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
+ hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
+ hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
+ hw->dvm_upd.count++;
+ }
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name) {
+ if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
+ /* check for a tunnel entry */
+ ice_add_tunnel_hint(hw, label_name, val);
+
+ /* check for a dvm mode entry */
+ else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
+ ice_add_dvm_hint(hw, val, true);
+
+ /* check for a svm mode entry */
+ else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
+ ice_add_dvm_hint(hw, val, false);
label_name = ice_enum_labels(NULL, 0, &state, &val);
}
- /* Cache the appropriate boost TCAM entry pointers */
+ /* Cache the appropriate boost TCAM entry pointers for tunnels */
for (i = 0; i < hw->tnl.count; i++) {
ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
&hw->tnl.tbl[i].boost_entry);
if (hw->tnl.tbl[i].boost_entry)
hw->tnl.tbl[i].valid = true;
}
+
+ /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
+ for (i = 0; i < hw->dvm_upd.count; i++)
+ ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
+ &hw->dvm_upd.tbl[i].boost_entry);
}
/* Key creation */
static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
{
u16 count = 0;
- u16 i, j;
+ u16 i;
/* check each byte */
for (i = 0; i < size; i++) {
return false;
/* count the bits in this byte, checking threshold */
- for (j = 0; j < BITS_PER_BYTE; j++) {
- count += (mask[i] & (0x1 << j)) ? 1 : 0;
- if (count > max)
- return false;
- }
+ count += ice_hweight8(mask[i]);
+ if (count > max)
+ return false;
}
return true;
* This function generates a key from a value, a don't care mask and a never
* match mask.
* upd, dc, and nm are optional parameters, and can be NULL:
- * upd == NULL --> udp mask is all 1's (update all bits)
+ * upd == NULL --> upd mask is all 1's (update all bits)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (status == ICE_ERR_AQ_NO_WORK)
- ice_debug(hw, ICE_DBG_PKG,
- "Global config lock: No work to do\n");
+ ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
return status;
}
return status;
}
+/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+enum ice_status
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
/**
* ice_aq_update_pkg
* @hw: pointer to the hardware structure
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
- pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
- pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
/* Search all package segments for the requested segment type */
for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
}
/**
- * ice_update_pkg
+ * ice_update_pkg_no_lock
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
* @count: the number of buffers in the array
- *
- * Obtains change lock and updates package.
*/
-enum ice_status
-ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+static enum ice_status
+ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status;
- u32 offset, info, i;
-
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- return status;
+ enum ice_status status = ICE_SUCCESS;
+ u32 i;
for (i = 0; i < count; i++) {
- bool last = ((i + 1) == count);
-
struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+ u32 offset, info;
status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
last, &offset, &info, NULL);
if (status) {
- ice_debug(hw, ICE_DBG_PKG,
- "Update pkg failed: err %d off %d inf %d\n",
+ ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
status, offset, info);
break;
}
}
+ return status;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ice_update_pkg_no_lock(hw, bufs, count);
+
ice_release_change_lock(hw);
return status;
/* Save AQ status from download package */
hw->pkg_dwnld_status = hw->adminq.sq_last_status;
if (status) {
- ice_debug(hw, ICE_DBG_PKG,
- "Pkg download failed: err %d off %d inf %d\n",
+ ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
status, offset, info);
break;
}
break;
}
+ if (!status) {
+ status = ice_set_vlan_mode(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
+ status);
+ }
+
ice_release_global_cfg_lock(hw);
return status;
ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
+ enum ice_status status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
- ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
- ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
LE32_TO_CPU(ice_seg->hdr.seg_type),
- LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
+ LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
ice_buf_tbl = ice_find_buf_table(ice_seg);
ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
LE32_TO_CPU(ice_buf_tbl->buf_count));
- return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- LE32_TO_CPU(ice_buf_tbl->buf_count));
+ status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ LE32_TO_CPU(ice_buf_tbl->buf_count));
+
+ ice_post_pkg_dwnld_vlan_mode_cfg(hw);
+
+ return status;
}
/**
static enum ice_status
ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
- struct ice_global_metadata_seg *meta_seg;
struct ice_generic_seg_hdr *seg_hdr;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
if (!pkg_hdr)
return ICE_ERR_PARAM;
- meta_seg = (struct ice_global_metadata_seg *)
- ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
- if (meta_seg) {
- hw->pkg_ver = meta_seg->pkg_ver;
- ice_memcpy(hw->pkg_name, meta_seg->pkg_name,
- sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA);
+ seg_hdr = (struct ice_generic_seg_hdr *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+ if (seg_hdr) {
+ struct ice_meta_sect *meta;
+ struct ice_pkg_enum state;
- ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
- meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
- meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
- meta_seg->pkg_name);
- } else {
- ice_debug(hw, ICE_DBG_INIT,
- "Did not find metadata segment in driver package\n");
- return ICE_ERR_CFG;
- }
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
- seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
- if (seg_hdr) {
- hw->ice_pkg_ver = seg_hdr->seg_ver;
- ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
- sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA);
-
- ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
- seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
- seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
- seg_hdr->seg_name);
+ /* Get package information from the Metadata Section */
+ meta = (struct ice_meta_sect *)
+ ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
+ ICE_SID_METADATA);
+ if (!meta) {
+ ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
+ return ICE_ERR_CFG;
+ }
+
+ hw->pkg_ver = meta->ver;
+ ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+ meta->ver.major, meta->ver.minor, meta->ver.update,
+ meta->ver.draft, meta->name);
+
+ hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
+ ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
+ sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft,
+ seg_hdr->seg_id);
} else {
- ice_debug(hw, ICE_DBG_INIT,
- "Did not find ice segment in driver package\n");
+ ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
return ICE_ERR_CFG;
}
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
- (ICE_PKG_CNT - 1));
+ size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
if (!pkg_info)
return ICE_ERR_NO_MEMORY;
if (pkg_info->pkg_info[i].is_active) {
flags[place++] = 'A';
hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
ice_memcpy(hw->active_pkg_name,
pkg_info->pkg_info[i].name,
- sizeof(hw->active_pkg_name),
+ sizeof(pkg_info->pkg_info[i].name),
ICE_NONDMA_TO_NONDMA);
hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
}
u32 seg_count;
u32 i;
- if (len < sizeof(*pkg))
+ if (len < ice_struct_size(pkg, seg_offset, 1))
return ICE_ERR_BUF_TOO_SHORT;
- if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
- pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
- pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
- pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
return ICE_ERR_CFG;
/* pkg must have at least one segment */
return ICE_ERR_CFG;
/* make sure segment array fits in package length */
- if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
+ if (len < ice_struct_size(pkg, seg_offset, seg_count))
return ICE_ERR_BUF_TOO_SHORT;
/* all segments must fit within length */
hw->seg = NULL;
}
-/**
- * ice_init_fd_mask_regs - initialize Flow Director mask registers
- * @hw: pointer to the HW struct
- *
- * This function sets up the Flow Director mask registers to allow for complete
- * masking off of any of the 24 Field Vector words. After this call, mask 0 will
- * mask off all of FV index 0, mask 1 will mask off all of FV index 1, etc.
- */
-static void ice_init_fd_mask_regs(struct ice_hw *hw)
-{
- u16 i;
-
- for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
- wr32(hw, GLQF_FDMASK(i), i);
- ice_debug(hw, ICE_DBG_INIT, "init fd mask(%d): %x = %x\n", i,
- GLQF_FDMASK(i), i);
- }
-}
-
/**
* ice_init_pkg_regs - initialize additional package registers
* @hw: pointer to the hardware structure
#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
#define ICE_SW_BLK_IDX 0
+ if (hw->dcf_enabled)
+ return;
/* setup Switch block input mask, which is 48-bits in two parts */
wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
- /* setup default flow director masks */
- ice_init_fd_mask_regs(hw);
}
/**
* ice_chk_pkg_version - check package version for compatibility with driver
- * @hw: pointer to the hardware structure
* @pkg_ver: pointer to a version structure to check
*
* Check to make sure that the package about to be downloaded is compatible with
* version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
* definitions.
*/
-static enum ice_status
-ice_chk_pkg_version(struct ice_hw *hw, struct ice_pkg_ver *pkg_ver)
+static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
{
if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
- pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) {
- ice_info(hw, "ERROR: Incompatible package: %d.%d.%d.%d - requires package version: %d.%d.*.*\n",
- pkg_ver->major, pkg_ver->minor, pkg_ver->update,
- pkg_ver->draft, ICE_PKG_SUPP_VER_MAJ,
- ICE_PKG_SUPP_VER_MNR);
-
+ pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
return ICE_ERR_NOT_SUPPORTED;
- }
return ICE_SUCCESS;
}
/**
- * ice_init_pkg - initialize/download package
+ * ice_chk_pkg_compat
* @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function initializes a package. The package contains HW tables
- * required to do packet processing. First, the function extracts package
- * information such as version. Then it finds the ice configuration segment
- * within the package; this function then saves a copy of the segment pointer
- * within the supplied package buffer. Next, the function will cache any hints
- * from the package, followed by downloading the package itself. Note, that if
- * a previous PF driver has already downloaded the package successfully, then
- * the current driver will not have to download the package again.
- *
- * The local package contents will be used to query default behavior and to
- * update specific sections of the HW's version of the package (e.g. to update
- * the parse graph to understand new protocols).
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
*
- * This function stores a pointer to the package buffer memory, and it is
- * expected that the supplied buffer will not be freed immediately. If the
- * package buffer needs to be freed, such as when read from a file, use
- * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
- * case.
+ * This function checks the package version compatibility with driver and NVM
*/
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+static enum ice_status
+ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
{
- struct ice_pkg_hdr *pkg;
+ struct ice_aqc_get_pkg_info_resp *pkg;
enum ice_status status;
- struct ice_seg *seg;
+ u16 size;
+ u32 i;
- if (!buf || !len)
- return ICE_ERR_PARAM;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- pkg = (struct ice_pkg_hdr *)buf;
- status = ice_verify_pkg(pkg, len);
+ /* Check package version compatibility */
+ status = ice_chk_pkg_version(&hw->pkg_ver);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
- status);
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
return status;
}
- /* initialize package info */
- status = ice_init_pkg_info(hw, pkg);
- if (status)
- return status;
-
- /* before downloading the package, check package version for
- * compatibility with driver
- */
- status = ice_chk_pkg_version(hw, &hw->pkg_ver);
- if (status)
- return status;
-
- /* find segment in given package */
- seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
- if (!seg) {
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ ospkg);
+ if (!*seg) {
ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
return ICE_ERR_CFG;
}
- /* initialize package hints and then download package */
- ice_init_pkg_hints(hw, seg);
- status = ice_download_pkg(hw, seg);
- if (status == ICE_ERR_AQ_NO_WORK) {
- ice_debug(hw, ICE_DBG_INIT,
- "package previously loaded - no work.\n");
- status = ICE_SUCCESS;
+ /* Check if FW is compatible with the OS package */
+ size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
+ pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+ if (!pkg)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
+ if (status)
+ goto fw_ddp_compat_free_alloc;
+
+ for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ status = ICE_ERR_FW_DDP_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ ice_free(hw, pkg);
+ return status;
+}
+
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section =
+ (struct ice_sw_fv_section *)section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= LE16_TO_CPU(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = LE16_TO_CPU(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in hw for following use.
+ */
+static int ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ struct ice_pkg_hdr *pkg;
+ enum ice_status status;
+ struct ice_seg *seg;
+
+ if (!buf || !len)
+ return ICE_ERR_PARAM;
+
+ pkg = (struct ice_pkg_hdr *)buf;
+ status = ice_verify_pkg(pkg, len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ status);
+ return status;
+ }
+
+ /* initialize package info */
+ status = ice_init_pkg_info(hw, pkg);
+ if (status)
+ return status;
+
+ /* before downloading the package, check package version for
+ * compatibility with driver
+ */
+ status = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (status)
+ return status;
+
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
+ status = ice_download_pkg(hw, seg);
+ if (status == ICE_ERR_AQ_NO_WORK) {
+ ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
+ status = ICE_SUCCESS;
}
/* Get information on the package currently loaded in HW, then make sure
if (!status) {
status = ice_get_pkg_info(hw);
if (!status)
- status = ice_chk_pkg_version(hw, &hw->active_pkg_ver);
+ status = ice_chk_pkg_version(&hw->active_pkg_ver);
}
if (!status) {
*/
ice_init_pkg_regs(hw);
ice_fill_blk_tbls(hw);
+ ice_fill_hw_ptype(hw);
+ ice_get_prof_index_max(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
status);
return NULL;
buf = (struct ice_buf_hdr *)bld;
- buf->data_end = CPU_TO_LE16(sizeof(*buf) -
- sizeof(buf->section_entry[0]));
+ buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
+ section_entry));
return bld;
}
-/**
- * ice_sw_fv_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the field vector entry to be returned
- * @offset: ptr to variable that receives the offset in the field vector table
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * This function treats the given section as of type ice_sw_fv_section and
- * enumerates offset field. "offset" is an index into the field vector
- * vector table.
- */
-static void *
-ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_sw_fv_section *fv_section =
- (struct ice_sw_fv_section *)section;
-
- if (!section || sect_type != ICE_SID_FLD_VEC_SW)
- return NULL;
- if (index >= LE16_TO_CPU(fv_section->count))
- return NULL;
- if (offset)
- /* "index" passed in to this function is relative to a given
- * 4k block. To get to the true index into the field vector
- * table need to add the relative index to the base_offset
- * field of this section
- */
- *offset = LE16_TO_CPU(fv_section->base_offset) + index;
- return fv_section->fv + index;
-}
-
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
/**
* ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
* @hw: pointer to hardware structure
- * @type: type of profiles requested
+ * @req_profs: type of profiles requested
* @bm: pointer to memory for returning the bitmap of field vectors
*/
void
-ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
ice_bitmap_t *bm)
{
struct ice_pkg_enum state;
struct ice_seg *ice_seg;
struct ice_fv *fv;
- if (type == ICE_PROF_ALL) {
- u16 i;
-
- for (i = 0; i < ICE_MAX_NUM_PROFILES; i++)
- ice_set_bit(i, bm);
+ if (req_profs == ICE_PROF_ALL) {
+ ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
return;
}
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
-
ice_seg = hw->seg;
do {
enum ice_prof_type prof_type;
/* Determine field vector type */
prof_type = ice_get_sw_prof_type(hw, fv);
- if (type & prof_type)
+ if (req_profs & prof_type)
ice_set_bit((u16)offset, bm);
}
} while (fv);
* allocated for every list entry.
*/
enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt,
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
{
struct ice_sw_fv_list_entry *fvl;
struct ice_fv *fv;
u32 offset;
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
if (!ids_cnt || !hw->seg)
return ICE_ERR_PARAM;
ice_seg = hw->seg;
do {
- u8 i;
+ u16 i;
fv = (struct ice_fv *)
ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
}
/**
- * ice_init_profile_to_result_bm - Initialize the profile result index bitmap
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
* @hw: pointer to hardware structure
*/
-void
-ice_init_prof_result_bm(struct ice_hw *hw)
+void ice_init_prof_result_bm(struct ice_hw *hw)
{
struct ice_pkg_enum state;
struct ice_seg *ice_seg;
struct ice_fv *fv;
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
if (!hw->seg)
return;
*
* Frees a package buffer
*/
-static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
{
ice_free(hw, bld);
}
bld->reserved_section_table_entries += count;
data_end = LE16_TO_CPU(buf->data_end) +
- (count * sizeof(buf->section_entry[0]));
+ FLEX_ARRAY_SIZE(buf, section_entry, count);
buf->data_end = CPU_TO_LE16(data_end);
return ICE_SUCCESS;
return NULL;
}
+/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
/**
* ice_pkg_buf_get_active_sections
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
}
/**
- * ice_pkg_buf_header
+ * ice_pkg_buf
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
*
* Return a pointer to the buffer's header
*/
-static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
{
if (!bld)
return NULL;
}
/**
- * ice_tunnel_port_in_use
+ * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
* @hw: pointer to the HW structure
* @port: port to search for
* @index: optionally returns index
* Returns whether a port is already in use as a tunnel, and optionally its
* index
*/
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
+static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
{
u16 i;
return false;
}
+/**
+ * ice_tunnel_port_in_use
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
+{
+ bool res;
+
+ ice_acquire_lock(&hw->tnl_lock);
+ res = ice_tunnel_port_in_use_hlpr(hw, port, index);
+ ice_release_lock(&hw->tnl_lock);
+
+ return res;
+}
+
/**
* ice_tunnel_get_type
* @hw: pointer to the HW structure
bool
ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
{
+ bool res = false;
u16 i;
+ ice_acquire_lock(&hw->tnl_lock);
+
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
*type = hw->tnl.tbl[i].type;
- return true;
+ res = true;
+ break;
}
- return false;
+ ice_release_lock(&hw->tnl_lock);
+
+ return res;
}
/**
}
/**
- * ice_get_tunnel_port - retrieve an open tunnel port
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
* @hw: pointer to the HW structure
* @type: tunnel type (TNL_ALL will return any open port)
* @port: returns open port
ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
u16 *port)
{
+ bool res = false;
u16 i;
+ ice_acquire_lock(&hw->tnl_lock);
+
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
(type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
*port = hw->tnl.tbl[i].port;
- return true;
+ res = true;
+ break;
}
- return false;
+ ice_release_lock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_upd_dvm_boost_entry
+ * @hw: pointer to the HW structure
+ * @entry: pointer to double vlan boost entry info
+ */
+static enum ice_status
+ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u8 val, dc, nm;
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld)
+ return ICE_ERR_NO_MEMORY;
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_upd_dvm_boost_entry_err;
+
+ sect_rx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ ice_struct_size(sect_rx, tcam, 1));
+ if (!sect_rx)
+ goto ice_upd_dvm_boost_entry_err;
+ sect_rx->count = CPU_TO_LE16(1);
+
+ sect_tx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ ice_struct_size(sect_tx, tcam, 1));
+ if (!sect_tx)
+ goto ice_upd_dvm_boost_entry_err;
+ sect_tx->count = CPU_TO_LE16(1);
+
+ /* copy original boost entry to update package buffer */
+ ice_memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+
+ /* re-write the don't care and never match bits accordingly */
+ if (entry->enable) {
+ /* all bits are don't care */
+ val = 0x00;
+ dc = 0xFF;
+ nm = 0x00;
+ } else {
+ /* disable, one never match bit, the rest are don't care */
+ val = 0x00;
+ dc = 0xF7;
+ nm = 0x08;
+ }
+
+ ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ &val, NULL, &dc, &nm, 0, sizeof(u8));
+
+ /* exact copy of entry to Tx section entry */
+ ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+
+ status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1);
+
+ice_upd_dvm_boost_entry_err:
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_set_dvm_boost_entries
+ * @hw: pointer to the HW structure
+ *
+ * Enable double vlan by updating the appropriate boost tcam entries.
+ */
+enum ice_status ice_set_dvm_boost_entries(struct ice_hw *hw)
+{
+ enum ice_status status;
+ u16 i;
+
+ for (i = 0; i < hw->dvm_upd.count; i++) {
+ status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
+ if (status)
+ return status;
+ }
+
+ return ICE_SUCCESS;
}
/**
* ice_create_tunnel
* @hw: pointer to the HW structure
* @type: type of tunnel
- * @port: port to use for vxlan tunnel
+ * @port: port of tunnel to create
*
- * Creates a tunnel
+ * Create a tunnel by updating the parse graph in the parser. We do that by
+ * creating a package buffer with the tunnel info and issuing an update package
+ * command.
*/
enum ice_status
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
struct ice_buf_build *bld;
u16 index;
- if (ice_tunnel_port_in_use(hw, port, NULL))
- return ICE_ERR_ALREADY_EXISTS;
+ ice_acquire_lock(&hw->tnl_lock);
- if (!ice_find_free_tunnel_entry(hw, type, &index))
- return ICE_ERR_OUT_OF_RANGE;
+ if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
+ hw->tnl.tbl[index].ref++;
+ status = ICE_SUCCESS;
+ goto ice_create_tunnel_end;
+ }
+
+ if (!ice_find_free_tunnel_entry(hw, type, &index)) {
+ status = ICE_ERR_OUT_OF_RANGE;
+ goto ice_create_tunnel_end;
+ }
bld = ice_pkg_buf_alloc(hw);
- if (!bld)
- return ICE_ERR_NO_MEMORY;
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_create_tunnel_end;
+ }
/* allocate 2 sections, one for Rx parser, one for Tx parser */
if (ice_pkg_buf_reserve_section(bld, 2))
sect_rx = (struct ice_boost_tcam_section *)
ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
- sizeof(*sect_rx));
+ ice_struct_size(sect_rx, tcam, 1));
if (!sect_rx)
goto ice_create_tunnel_err;
sect_rx->count = CPU_TO_LE16(1);
sect_tx = (struct ice_boost_tcam_section *)
ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
- sizeof(*sect_tx));
+ ice_struct_size(sect_tx, tcam, 1));
if (!sect_tx)
goto ice_create_tunnel_err;
sect_tx->count = CPU_TO_LE16(1);
*/
ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
(u8 *)&port, NULL, NULL, NULL,
- offsetof(struct ice_boost_key_value, hv_dst_port_key),
+ (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
/* exact copy of entry to Tx section entry */
if (!status) {
hw->tnl.tbl[index].port = port;
hw->tnl.tbl[index].in_use = true;
+ hw->tnl.tbl[index].ref = 1;
}
ice_create_tunnel_err:
ice_pkg_buf_free(hw, bld);
+ice_create_tunnel_end:
+ ice_release_lock(&hw->tnl_lock);
+
return status;
}
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
u16 count = 0;
+ u16 index;
u16 size;
- u16 i;
+ u16 i, j;
+
+ ice_acquire_lock(&hw->tnl_lock);
+
+ if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
+ if (hw->tnl.tbl[index].ref > 1) {
+ hw->tnl.tbl[index].ref--;
+ status = ICE_SUCCESS;
+ goto ice_destroy_tunnel_end;
+ }
/* determine count */
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
(all || hw->tnl.tbl[i].port == port))
count++;
- if (!count)
- return ICE_ERR_PARAM;
+ if (!count) {
+ status = ICE_ERR_PARAM;
+ goto ice_destroy_tunnel_end;
+ }
/* size of section - there is at least one entry */
- size = (count - 1) * sizeof(*sect_rx->tcam) + sizeof(*sect_rx);
+ size = ice_struct_size(sect_rx, tcam, count);
bld = ice_pkg_buf_alloc(hw);
- if (!bld)
- return ICE_ERR_NO_MEMORY;
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_destroy_tunnel_end;
+ }
/* allocate 2 sections, one for Rx parser, one for Tx parser */
if (ice_pkg_buf_reserve_section(bld, 2))
size);
if (!sect_rx)
goto ice_destroy_tunnel_err;
- sect_rx->count = CPU_TO_LE16(1);
+ sect_rx->count = CPU_TO_LE16(count);
sect_tx = (struct ice_boost_tcam_section *)
ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
size);
if (!sect_tx)
goto ice_destroy_tunnel_err;
- sect_tx->count = CPU_TO_LE16(1);
+ sect_tx->count = CPU_TO_LE16(count);
/* copy original boost entry to update package buffer, one copy to Rx
* section, another copy to the Tx section
*/
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
(all || hw->tnl.tbl[i].port == port)) {
- ice_memcpy(sect_rx->tcam + i,
+ ice_memcpy(sect_rx->tcam + j,
hw->tnl.tbl[i].boost_entry,
sizeof(*sect_rx->tcam),
ICE_NONDMA_TO_NONDMA);
- ice_memcpy(sect_tx->tcam + i,
+ ice_memcpy(sect_tx->tcam + j,
hw->tnl.tbl[i].boost_entry,
sizeof(*sect_tx->tcam),
ICE_NONDMA_TO_NONDMA);
hw->tnl.tbl[i].marked = true;
+ j++;
}
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
for (i = 0; i < hw->tnl.count &&
i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].marked) {
+ hw->tnl.tbl[i].ref = 0;
hw->tnl.tbl[i].port = 0;
hw->tnl.tbl[i].in_use = false;
hw->tnl.tbl[i].marked = false;
ice_destroy_tunnel_err:
ice_pkg_buf_free(hw, bld);
+ice_destroy_tunnel_end:
+ ice_release_lock(&hw->tnl_lock);
+
return status;
}
* @off: variable to receive the protocol offset
*/
enum ice_status
-ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u8 fv_idx,
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off)
{
struct ice_fv_word *fv_ext;
* @ptg: pointer to variable that receives the PTG
*
* This function will search the PTGs for a particular ptype, returning the
- * PTG ID that contains it through the ptg parameter, with the value of
+ * PTG ID that contains it through the PTG parameter, with the value of
* ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
*/
static enum ice_status
* ice_ptg_alloc_val - Allocates a new packet type group ID by value
* @hw: pointer to the hardware structure
* @blk: HW block
- * @ptg: the ptg to allocate
+ * @ptg: the PTG to allocate
*
- * This function allocates a given packet type group ID specified by the ptg
+ * This function allocates a given packet type group ID specified by the PTG
* parameter.
*/
-static
-void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
{
hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
}
* @hw: pointer to the hardware structure
* @blk: HW block
* @ptype: the ptype to remove
- * @ptg: the ptg to remove the ptype from
+ * @ptg: the PTG to remove the ptype from
*
- * This function will remove the ptype from the specific ptg, and move it to
+ * This function will remove the ptype from the specific PTG, and move it to
* the default PTG (ICE_DEFAULT_PTG).
*/
static enum ice_status
* @hw: pointer to the hardware structure
* @blk: HW block
* @ptype: the ptype to add or move
- * @ptg: the ptg to add or move the ptype to
+ * @ptg: the PTG to add or move the ptype to
*
* This function will either add or move a ptype to a particular PTG depending
* on if the ptype is already part of another group. Note that using a
u16 xlt2; /* # XLT2 entries */
u16 prof_tcam; /* # profile ID TCAM entries */
u16 prof_id; /* # profile IDs */
- u8 prof_cdid_bits; /* # cdid one-hot bits used in key */
+ u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
u16 prof_redir; /* # profile redirection entries */
u16 es; /* # extraction sequence entries */
u16 fvw; /* # field vector words */
u16 count = 0;
/* compare counts */
- LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) {
+ LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list)
count++;
- }
- LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) {
+ LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
chk_count++;
- }
if (!count || count != chk_count)
return false;
* ice_vsig_alloc_val - allocate a new VSIG by value
* @hw: pointer to the hardware structure
* @blk: HW block
- * @vsig: the vsig to allocate
+ * @vsig: the VSIG to allocate
*
- * This function will allocate a given VSIG specified by the vsig parameter.
+ * This function will allocate a given VSIG specified by the VSIG parameter.
*/
static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
{
struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
u16 i;
- for (i = 0; i < xlt2->count; i++) {
+ for (i = 0; i < xlt2->count; i++)
if (xlt2->vsig_tbl[i].in_use &&
ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
return ICE_SUCCESS;
}
- }
return ICE_ERR_DOES_NOT_EXIST;
}
* @blk: HW block
* @prof: profile to check
* @idx: profile index to check
- * @masks: masks to match
+ * @mask: mask to match
*/
static bool
ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
expect_no_mask = true;
/* Scan the enabled masks on this profile, for the specified idx */
- for (i = 0; i < ICE_PROFILE_MASK_COUNT; i++)
+ for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
+ hw->blk[blk].masks.count; i++)
if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
if (hw->blk[blk].masks.masks[i].in_use &&
hw->blk[blk].masks.masks[i].idx == idx) {
struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
- u16 i;
+ u8 i;
+
+ /* For FD and RSS, we don't want to re-use an existed profile with the
+ * same field vector and mask. This will cause rule interference.
+ */
+ if (blk == ICE_BLK_FD || blk == ICE_BLK_RSS)
+ return ICE_ERR_DOES_NOT_EXIST;
- for (i = 0; i < es->count; i++) {
+ for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw;
- u16 j;
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
continue;
/* check if masks settings are the same for this profile */
- if (!ice_prof_has_mask(hw, blk, i, masks))
- continue;
-
- *prof_id = i;
- return ICE_SUCCESS;
- }
-
- return ICE_ERR_DOES_NOT_EXIST;
-}
-
-/**
- * ice_find_prof_id - find profile ID for a given field vector
- * @hw: pointer to the hardware structure
- * @blk: HW block
- * @fv: field vector to search for
- * @prof_id: receives the profile ID
- */
-static enum ice_status
-ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
- struct ice_fv_word *fv, u8 *prof_id)
-{
- struct ice_es *es = &hw->blk[blk].es;
- u16 off, i;
-
- for (i = 0; i < es->count; i++) {
- off = i * es->fvw;
-
- if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
+ if (masks && !ice_prof_has_mask(hw, blk, i, masks))
continue;
*prof_id = i;
* ice_alloc_tcam_ent - allocate hardware TCAM entry
* @hw: pointer to the HW struct
* @blk: the block to allocate the TCAM for
+ * @btm: true to allocate from bottom of table, false to allocate from top
* @tcam_idx: pointer to variable to receive the TCAM entry
*
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
static enum ice_status
-ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
+ u16 *tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM;
- return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
+ return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
}
/**
*/
static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
{
-#define MAX_NUM_PORTS 8
- u16 num_ports = MAX_NUM_PORTS;
+ u16 per_pf;
u16 i;
ice_init_lock(&hw->blk[blk].masks.lock);
- hw->blk[blk].masks.count = ICE_PROFILE_MASK_COUNT / num_ports;
- hw->blk[blk].masks.first = hw->pf_id * hw->blk[blk].masks.count;
+ per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
+
+ hw->blk[blk].masks.count = per_pf;
+ hw->blk[blk].masks.first = hw->pf_id * per_pf;
ice_memset(hw->blk[blk].masks.masks, 0,
sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM);
hw->blk[blk].masks.masks[mask_idx].idx = 0;
/* update mask as unused entry */
- ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d", blk, mask_idx);
+ ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
+ mask_idx);
ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
exit_ice_free_prof_mask:
/**
* ice_shutdown_all_prof_masks - releases all locks for masking
* @hw: pointer to the HW struct
- * @blk: hardware block
*
* This should be called before unloading the driver
*/
* @hw: pointer to the HW struct
* @blk: hardware block
* @prof_id: profile ID
- * @es: field vector
* @masks: masks
*/
static enum ice_status
ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
- struct ice_fv_word *es, u16 *masks)
+ u16 *masks)
{
bool err = false;
u32 ena_mask = 0;
* @hw: pointer to the hardware structure
* @blk: the HW block to initialize
*/
-static
-void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
+static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
{
u16 pt;
LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
ice_flow_entry, l_entry)
- ice_flow_rem_entry(hw, ICE_FLOW_ENTRY_HNDL(e));
+ ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
+ ICE_FLOW_ENTRY_HNDL(e));
LIST_DEL(&p->l_entry);
if (p->acts)
ice_free(hw, p->acts);
+
+ ice_destroy_lock(&p->entries_lock);
ice_free(hw, p);
}
ice_release_lock(&hw->fl_profs_locks[blk_idx]);
ice_free(hw, r);
}
ice_destroy_lock(&hw->rss_locks);
- ice_shutdown_all_prof_masks(hw);
+ if (!hw->dcf_enabled)
+ ice_shutdown_all_prof_masks(hw);
ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
}
INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
}
+/**
+ * ice_clear_hw_tbls - clear HW tables and flow profiles
+ * @hw: pointer to the hardware structure
+ */
+void ice_clear_hw_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_tcam *prof = &hw->blk[i].prof;
+ struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+ struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+ struct ice_es *es = &hw->blk[i].es;
+
+ if (hw->blk[i].is_list_init) {
+ ice_free_prof_map(hw, i);
+ ice_free_flow_profs(hw, i);
+ }
+
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
+
+ ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt1->ptg_tbl, 0,
+ ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
+ ICE_NONDMA_MEM);
+
+ ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt2->vsig_tbl, 0,
+ xlt2->count * sizeof(*xlt2->vsig_tbl),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
+ ICE_NONDMA_MEM);
+
+ ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
+ ICE_NONDMA_MEM);
+ ice_memset(prof_redir->t, 0,
+ prof_redir->count * sizeof(*prof_redir->t),
+ ICE_NONDMA_MEM);
+
+ ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
+ ICE_NONDMA_MEM);
+ ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
+ ICE_NONDMA_MEM);
+ ice_memset(es->written, 0, es->count * sizeof(*es->written),
+ ICE_NONDMA_MEM);
+ ice_memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena),
+ ICE_NONDMA_MEM);
+ }
+}
+
/**
* ice_init_hw_tbls - init hardware table memory
* @hw: pointer to the hardware structure
ice_init_lock(&hw->rss_locks);
INIT_LIST_HEAD(&hw->rss_list_head);
- ice_init_all_prof_masks(hw);
+ if (!hw->dcf_enabled)
+ ice_init_all_prof_masks(hw);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
es->ref_count = (u16 *)
ice_calloc(hw, es->count, sizeof(*es->ref_count));
+ if (!es->ref_count)
+ goto err;
+
es->written = (u8 *)
ice_calloc(hw, es->count, sizeof(*es->written));
+
+ if (!es->written)
+ goto err;
+
es->mask_ena = (u32 *)
ice_calloc(hw, es->count, sizeof(*es->mask_ena));
- if (!es->ref_count)
+
+ if (!es->mask_ena)
goto err;
}
return ICE_SUCCESS;
* @blk: the block in which to write profile ID to
* @ptg: packet type group (PTG) portion of key
* @vsig: VSIG portion of key
- * @cdid: cdid portion of key
+ * @cdid: CDID portion of key
* @flags: flag portion of key
* @vl_msk: valid mask
* @dc_msk: don't care mask
default:
ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
break;
- };
+ }
return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
* @prof_id: profile ID
* @ptg: packet type group (PTG) portion of key
* @vsig: VSIG portion of key
- * @cdid: cdid portion of key
+ * @cdid: CDID portion of key
* @flags: flag portion of key
* @vl_msk: valid mask
* @dc_msk: don't care mask
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_vsi *ptr;
+
*refs = 0;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
struct ice_vsig_prof *ent;
LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
- ice_vsig_prof, list) {
+ ice_vsig_prof, list)
if (ent->profile_cookie == hdl)
return true;
- }
- ice_debug(hw, ICE_DBG_INIT,
- "Characteristic list for VSI group %d not found.\n",
+ ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
vsig);
return false;
}
u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
struct ice_chs_chg *tmp;
- LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
struct ice_pkg_es *p;
id = ice_sect_id(blk, ICE_VEC_TBL);
p = (struct ice_pkg_es *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p, es,
+ 1) +
vec_size -
sizeof(p->es[0]));
ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
ICE_NONDMA_TO_NONDMA);
}
- }
return ICE_SUCCESS;
}
{
struct ice_chs_chg *tmp;
- LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
struct ice_prof_id_section *p;
u32 id;
id = ice_sect_id(blk, ICE_PROF_TCAM);
p = (struct ice_prof_id_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p,
+ entry,
+ 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
sizeof(hw->blk[blk].prof.t->key),
ICE_NONDMA_TO_NONDMA);
}
- }
return ICE_SUCCESS;
}
{
struct ice_chs_chg *tmp;
- LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
struct ice_xlt1_section *p;
u32 id;
id = ice_sect_id(blk, ICE_XLT1);
p = (struct ice_xlt1_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p,
+ value,
+ 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
p->offset = CPU_TO_LE16(tmp->ptype);
p->value[0] = tmp->ptg;
}
- }
return ICE_SUCCESS;
}
struct ice_chs_chg *tmp;
LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
- bool found = false;
-
- if (tmp->type == ICE_VSIG_ADD)
- found = true;
- else if (tmp->type == ICE_VSI_MOVE)
- found = true;
- else if (tmp->type == ICE_VSIG_REM)
- found = true;
-
- if (found) {
- struct ice_xlt2_section *p;
- u32 id;
+ struct ice_xlt2_section *p;
+ u32 id;
+ switch (tmp->type) {
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
id = ice_sect_id(blk, ICE_XLT2);
p = (struct ice_xlt2_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p,
+ value,
+ 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
p->count = CPU_TO_LE16(1);
p->offset = CPU_TO_LE16(tmp->vsi);
p->value[0] = CPU_TO_LE16(tmp->vsig);
+ break;
+ default:
+ break;
}
}
struct ice_buf_build *b;
struct ice_chs_chg *tmp;
enum ice_status status;
- u16 pkg_sects = 0;
- u16 sects = 0;
+ u16 pkg_sects;
u16 xlt1 = 0;
u16 xlt2 = 0;
u16 tcam = 0;
u16 es = 0;
+ u16 sects;
/* count number of sections we need */
LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
/* update package */
status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
if (status == ICE_ERR_AQ_ERROR)
- ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile.");
+ ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
error_tmp:
ice_pkg_buf_free(hw, b);
GLQF_FDMASK_SEL(prof_id), mask_sel);
}
-#define ICE_SRC_DST_MAX_COUNT 8
-
struct ice_fd_src_dst_pair {
u8 prot_id;
u8 count;
ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
- ice_init_fd_mask_regs(hw);
-
/* This code assumes that the Flow Director field vectors are assigned
* from the end of the FV indexes working towards the zero index, that
* only complete fields will be included and will be consecutive, and
ICE_PROT_INVALID)
first_free = i - 1;
- for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) {
+ for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
es[i].off == ice_fd_pairs[j].off) {
ice_set_bit(j, pair_list);
pair_start[j] = i;
}
- }
}
orig_free = first_free;
return ICE_ERR_OUT_OF_RANGE;
/* keep track of non-relevant fields */
- mask_sel |= 1 << (first_free - k);
+ mask_sel |= BIT(first_free - k);
}
pair_start[index] = first_free;
}
/* check for a swap location */
- for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) {
+ for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
es[si].off == ice_fd_pairs[j].off) {
u8 idx;
break;
}
- }
si -= indexes_used;
}
- /* for each set of 4 swap indexes, write the appropriate register */
+ /* for each set of 4 swap and 4 inset indexes, write the appropriate
+ * register
+ */
for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
- u32 raw_entry = 0;
+ u32 raw_swap = 0;
+ u32 raw_in = 0;
for (k = 0; k < 4; k++) {
u8 idx;
idx = (j * 4) + k;
- if (used[idx])
- raw_entry |= used[idx] << (k * BITS_PER_BYTE);
+ if (used[idx] && !(mask_sel & BIT(idx))) {
+ raw_swap |= used[idx] << (k * BITS_PER_BYTE);
+#define ICE_INSET_DFLT 0x9f
+ raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
+ }
}
- /* write the appropriate register set, based on HW block */
- wr32(hw, GLQF_FDSWAP(prof_id, j), raw_entry);
+ /* write the appropriate swap register set */
+ wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
+
+ ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
+ prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
- ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %x\n",
- prof_id, j, GLQF_FDSWAP(prof_id, j), raw_entry);
+ /* write the appropriate inset register set */
+ wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
+
+ ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
+ prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
}
- /* update the masks for this profile to be sure we ignore fields that
- * are not relevant to our match criteria
- */
- ice_update_fd_mask(hw, prof_id, mask_sel);
+ /* initially clear the mask select for this profile */
+ ice_update_fd_mask(hw, prof_id, 0);
return ICE_SUCCESS;
}
+/* The entries here needs to match the order of enum ice_ptype_attrib */
+static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
+ { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
+ { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
+ { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
+ { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
+};
+
/**
- * ice_add_prof_with_mask - add profile
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
- * @es: extraction sequence (length of array is determined by the block)
- * @masks: extraction sequence (length of array is determined by the block)
- *
- * This function registers a profile, which matches a set of PTYPES with a
- * particular extraction sequence. While the hardware profile is allocated
- * it will not be written until the first call to ice_add_flow that specifies
- * the ID value used here.
+ * ice_get_ptype_attrib_info - get ptype attribute information
+ * @type: attribute type
+ * @info: pointer to variable to the attribute information
*/
-enum ice_status
-ice_add_prof_with_mask(struct ice_hw *hw, enum ice_block blk, u64 id,
- u8 ptypes[], struct ice_fv_word *es, u16 *masks)
+static void
+ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
+ struct ice_ptype_attrib_info *info)
{
- u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
- ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
- struct ice_prof_map *prof;
- enum ice_status status;
- u32 byte = 0;
- u8 prof_id;
-
- ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
-
- ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
-
- /* search for existing profile */
- status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
- if (status) {
- /* allocate profile ID */
- status = ice_alloc_prof_id(hw, blk, &prof_id);
- if (status)
- goto err_ice_add_prof;
- if (blk == ICE_BLK_FD) {
- /* For Flow Director block, the extraction sequence may
- * need to be altered in the case where there are paired
- * fields that have no match. This is necessary because
- * for Flow Director, src and dest fields need to paired
- * for filter programming and these values are swapped
- * during Tx.
- */
- status = ice_update_fd_swap(hw, prof_id, es);
- if (status)
- goto err_ice_add_prof;
- }
- status = ice_update_prof_masking(hw, blk, prof_id, es, masks);
- if (status)
- goto err_ice_add_prof;
-
- /* and write new es */
- ice_write_es(hw, blk, prof_id, es);
- }
-
- ice_prof_inc_ref(hw, blk, prof_id);
-
- /* add profile info */
-
- prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
- if (!prof)
- goto err_ice_add_prof;
-
- prof->profile_cookie = id;
- prof->prof_id = prof_id;
- prof->ptg_cnt = 0;
- prof->context = 0;
-
- /* build list of ptgs */
- while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u32 bit;
-
- if (!ptypes[byte]) {
- bytes--;
- byte++;
- continue;
- }
- /* Examine 8 bits per byte */
- for (bit = 0; bit < 8; bit++) {
- if (ptypes[byte] & BIT(bit)) {
- u16 ptype;
- u8 ptg;
- u8 m;
-
- ptype = byte * BITS_PER_BYTE + bit;
-
- /* The package should place all ptypes in a
- * non-zero PTG, so the following call should
- * never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
+ *info = ice_ptype_attributes[type];
+}
- /* If PTG is already added, skip and continue */
- if (ice_is_bit_set(ptgs_used, ptg))
- continue;
+/**
+ * ice_add_prof_attrib - add any PTG with attributes to profile
+ * @prof: pointer to the profile to which PTG entries will be added
+ * @ptg: PTG to be added
+ * @ptype: PTYPE that needs to be looked up
+ * @attr: array of attributes that will be considered
+ * @attr_cnt: number of elements in the attribute array
+ */
+static enum ice_status
+ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
+ const struct ice_ptype_attributes *attr, u16 attr_cnt)
+{
+ bool found = false;
+ u16 i;
- ice_set_bit(ptg, ptgs_used);
- prof->ptg[prof->ptg_cnt] = ptg;
+ for (i = 0; i < attr_cnt; i++) {
+ if (attr[i].ptype == ptype) {
+ found = true;
- if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
- break;
+ prof->ptg[prof->ptg_cnt] = ptg;
+ ice_get_ptype_attrib_info(attr[i].attrib,
+ &prof->attr[prof->ptg_cnt]);
- /* nothing left in byte, then exit */
- m = ~((1 << (bit + 1)) - 1);
- if (!(ptypes[byte] & m))
- break;
- }
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ return ICE_ERR_MAX_LIMIT;
}
-
- bytes--;
- byte++;
}
- LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
- status = ICE_SUCCESS;
+ if (!found)
+ return ICE_ERR_DOES_NOT_EXIST;
-err_ice_add_prof:
- ice_release_lock(&hw->blk[blk].es.prof_map_lock);
- return status;
+ return ICE_SUCCESS;
}
/**
* @blk: hardware block
* @id: profile tracking ID
* @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @attr: array of attributes
+ * @attr_cnt: number of elements in attrib array
* @es: extraction sequence (length of array is determined by the block)
+ * @masks: mask for extraction sequence
*
- * This function registers a profile, which matches a set of PTGs with a
+ * This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
* it will not be written until the first call to ice_add_flow that specifies
* the ID value used here.
*/
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- struct ice_fv_word *es)
+ const struct ice_ptype_attributes *attr, u16 attr_cnt,
+ struct ice_fv_word *es, u16 *masks)
{
u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
enum ice_status status;
- u32 byte = 0;
+ u8 byte = 0;
u8 prof_id;
ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
/* search for existing profile */
- status = ice_find_prof_id(hw, blk, es, &prof_id);
+ status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
if (status) {
/* allocate profile ID */
status = ice_alloc_prof_id(hw, blk, &prof_id);
if (status)
goto err_ice_add_prof;
}
+ status = ice_update_prof_masking(hw, blk, prof_id, masks);
+ if (status)
+ goto err_ice_add_prof;
/* and write new es */
ice_write_es(hw, blk, prof_id, es);
/* build list of ptgs */
while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u32 bit;
+ u8 bit;
if (!ptypes[byte]) {
bytes--;
byte++;
continue;
}
+
/* Examine 8 bits per byte */
- for (bit = 0; bit < 8; bit++) {
- if (ptypes[byte] & BIT(bit)) {
- u16 ptype;
- u8 ptg;
- u8 m;
+ ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
+ BITS_PER_BYTE) {
+ u16 ptype;
+ u8 ptg;
- ptype = byte * BITS_PER_BYTE + bit;
+ ptype = byte * BITS_PER_BYTE + bit;
- /* The package should place all ptypes in a
- * non-zero PTG, so the following call should
- * never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+ continue;
- /* If PTG is already added, skip and continue */
- if (ice_is_bit_set(ptgs_used, ptg))
- continue;
+ /* If PTG is already added, skip and continue */
+ if (ice_is_bit_set(ptgs_used, ptg))
+ continue;
- ice_set_bit(ptg, ptgs_used);
+ ice_set_bit(ptg, ptgs_used);
+ /* Check to see there are any attributes for this
+ * ptype, and add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype, attr,
+ attr_cnt);
+ if (status == ICE_ERR_MAX_LIMIT)
+ break;
+ if (status) {
+ /* This is simple a ptype/PTG with no
+ * attribute
+ */
prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
break;
-
- /* nothing left in byte, then exit */
- m = ~((1 << (bit + 1)) - 1);
- if (!(ptypes[byte] & m))
- break;
}
}
}
/**
- * ice_search_prof_id_low - Search for a profile tracking ID low level
+ * ice_search_prof_id - Search for a profile tracking ID
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
*
- * This will search for a profile tracking ID which was previously added. This
- * version assumes that the caller has already acquired the prof map lock.
+ * This will search for a profile tracking ID which was previously added.
+ * The profile map lock should be held before calling this function.
*/
-static struct ice_prof_map *
-ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;
struct ice_prof_map *map;
- LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map,
- list) {
+ LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
if (map->profile_cookie == id) {
entry = map;
break;
}
- }
-
- return entry;
-}
-
-/**
- * ice_search_prof_id - Search for a profile tracking ID
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- *
- * This will search for a profile tracking ID which was previously added.
- */
-struct ice_prof_map *
-ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
-{
- struct ice_prof_map *entry;
-
- ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
- entry = ice_search_prof_id_low(hw, blk, id);
- ice_release_lock(&hw->blk[blk].es.prof_map_lock);
return entry;
}
struct ice_vsig_prof *p;
LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
- ice_vsig_prof, list) {
+ ice_vsig_prof, list)
count++;
- }
return count;
}
enum ice_status status;
u16 i;
- for (i = 0; i < prof->tcam_count; i++) {
+ for (i = 0; i < prof->tcam_count; i++)
if (prof->tcam[i].in_use) {
prof->tcam[i].in_use = false;
status = ice_rel_tcam_idx(hw, blk,
if (status)
return ICE_ERR_HW_TABLE;
}
- }
return ICE_SUCCESS;
}
/* If the VSIG has at least 1 VSI then iterate through the list
* and remove the VSIs before deleting the group.
*/
- if (vsi_cur) {
+ if (vsi_cur)
do {
struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
struct ice_chs_chg *p;
vsi_cur = tmp;
} while (vsi_cur);
- }
-
- status = ice_vsig_free(hw, blk, vsig);
- return status;
+ return ice_vsig_free(hw, blk, vsig);
}
/**
LIST_FOR_EACH_ENTRY_SAFE(p, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
- ice_vsig_prof, list) {
+ ice_vsig_prof, list)
if (p->profile_cookie == hdl) {
if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
/* this is the last profile, remove the VSIG */
}
return status;
}
- }
return ICE_ERR_DOES_NOT_EXIST;
}
INIT_LIST_HEAD(&chg);
- for (i = 1; i < ICE_MAX_VSIGS; i++) {
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
if (ice_has_prof_vsig(hw, blk, i, id)) {
status = ice_rem_prof_id_vsig(hw, blk, i, id,
goto err_ice_rem_flow_all;
}
}
- }
status = ice_upd_prof_hw(hw, blk, &chg);
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
- pmap = ice_search_prof_id_low(hw, blk, id);
+ pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) {
status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_rem_prof;
LIST_DEL(&pmap->list);
ice_free(hw, pmap);
- status = ICE_SUCCESS;
-
err_ice_rem_prof:
ice_release_lock(&hw->blk[blk].es.prof_map_lock);
return status;
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct LIST_HEAD_TYPE *chg)
{
+ enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_chs_chg *p;
u16 i;
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_get_prof;
+ }
- for (i = 0; i < map->ptg_cnt; i++) {
+ for (i = 0; i < map->ptg_cnt; i++)
if (!hw->blk[blk].es.written[map->prof_id]) {
/* add ES to change list */
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_get_prof;
+ }
p->type = ICE_PTG_ES_ADD;
p->ptype = 0;
p->ptg = map->ptg[i];
+ p->attr = map->attr[i];
p->add_ptg = 0;
p->add_prof = 1;
LIST_ADD(&p->list_entry, chg);
}
- }
-
- return ICE_SUCCESS;
err_ice_get_prof:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct LIST_HEAD_TYPE *lst, u64 hdl)
{
- struct ice_vsig_prof *p;
+ enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
+ struct ice_vsig_prof *p;
u16 i;
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_to_lst;
+ }
p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
- if (!p)
- return ICE_ERR_NO_MEMORY;
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_prof_to_lst;
+ }
p->profile_cookie = map->profile_cookie;
p->prof_id = map->prof_id;
p->tcam[i].prof_id = map->prof_id;
p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
p->tcam[i].ptg = map->ptg[i];
+ p->tcam[i].attr = map->attr[i];
}
LIST_ADD(&p->list, lst);
- return ICE_SUCCESS;
+err_ice_add_prof_to_lst:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
}
/**
return ICE_SUCCESS;
}
+/**
+ * ice_set_tcam_flags - set TCAM flag don't care mask
+ * @mask: mask for flags
+ * @dc_mask: pointer to the don't care mask
+ */
+static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
+{
+ u16 *flag_word;
+
+ /* flags are lowest u16 */
+ flag_word = (u16 *)dc_mask;
+ *flag_word = ~mask;
+}
+
+/**
+ * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
+ * @hw: pointer to the HW struct
+ * @idx: the index of the TCAM entry to remove
+ * @chg: the list of change structures to search
+ */
+static void
+ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
+{
+ struct ice_chs_chg *pos, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry)
+ if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
+ LIST_DEL(&tmp->list_entry);
+ ice_free(hw, tmp);
+ }
+}
+
/**
* ice_prof_tcam_ena_dis - add enable or disable TCAM change
* @hw: pointer to the HW struct
* @blk: hardware block
* @enable: true to enable, false to disable
- * @vsig: the vsig of the TCAM entry
+ * @vsig: the VSIG of the TCAM entry
* @tcam: pointer the TCAM info structure of the TCAM to disable
* @chg: the change list
*
enum ice_status status;
struct ice_chs_chg *p;
- /* Default: enable means change the low flag bit to don't care */
- u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
- u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
- /* if disabling, free the tcam */
+ /* if disabling, free the TCAM */
if (!enable) {
- status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
+ status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
+
+ /* if we have already created a change for this TCAM entry, then
+ * we need to remove that entry, in order to prevent writing to
+ * a TCAM entry we no longer will have ownership of.
+ */
+ ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
tcam->tcam_idx = 0;
tcam->in_use = 0;
return status;
}
- /* for re-enabling, reallocate a tcam */
- status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
+ /* for re-enabling, reallocate a TCAM */
+ /* for entries with empty attribute masks, allocate entry from
+ * the bottom of the TCAM table; otherwise, allocate from the
+ * top of the table in order to give it higher priority
+ */
+ status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
+ &tcam->tcam_idx);
if (status)
return status;
if (!p)
return ICE_ERR_NO_MEMORY;
+ /* set don't care masks for TCAM flags */
+ ice_set_tcam_flags(tcam->attr.mask, dc_msk);
+
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
- tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
- nm_msk);
+ tcam->ptg, vsig, 0, tcam->attr.flags,
+ vl_msk, dc_msk, nm_msk);
if (status)
goto err_ice_prof_tcam_ena_dis;
return status;
}
+/**
+ * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use
+ * @ptg_attr: pointer to the PTG and attribute pair to check
+ * @ptgs_used: bitmap that denotes which PTGs are in use
+ * @attr_used: array of PTG and attributes pairs already used
+ * @attr_cnt: count of entries in the attr_used array
+ */
+static bool
+ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, ice_bitmap_t *ptgs_used,
+ struct ice_tcam_inf *attr_used[], u16 attr_cnt)
+{
+ u16 i;
+
+ if (!ice_is_bit_set(ptgs_used, ptg_attr->ptg))
+ return false;
+
+ /* the PTG is used, so now look for correct attributes */
+ for (i = 0; i < attr_cnt; i++)
+ if (attr_used[i]->ptg == ptg_attr->ptg &&
+ attr_used[i]->attr.flags == ptg_attr->attr.flags &&
+ attr_used[i]->attr.mask == ptg_attr->attr.mask)
+ return true;
+
+ return false;
+}
+
/**
* ice_adj_prof_priorities - adjust profile based on priorities
* @hw: pointer to the HW struct
struct LIST_HEAD_TYPE *chg)
{
ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
+ struct ice_tcam_inf **attr_used;
+ enum ice_status status = ICE_SUCCESS;
struct ice_vsig_prof *t;
- enum ice_status status;
+ u16 attr_used_cnt = 0;
u16 idx;
+#define ICE_MAX_PTG_ATTRS 1024
+ attr_used = (struct ice_tcam_inf **)ice_calloc(hw, ICE_MAX_PTG_ATTRS,
+ sizeof(*attr_used));
+ if (!attr_used)
+ return ICE_ERR_NO_MEMORY;
+
ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
idx = vsig & ICE_VSIG_IDX_M;
u16 i;
for (i = 0; i < t->tcam_count; i++) {
+ bool used;
+
/* Scan the priorities from newest to oldest.
* Make sure that the newest profiles take priority.
*/
- if (ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
- t->tcam[i].in_use) {
+ used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used,
+ attr_used, attr_used_cnt);
+
+ if (used && t->tcam[i].in_use) {
/* need to mark this PTG as never match, as it
* was already in use and therefore duplicate
* (and lower priority)
&t->tcam[i],
chg);
if (status)
- return status;
- } else if (!ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
- !t->tcam[i].in_use) {
+ goto err_ice_adj_prof_priorities;
+ } else if (!used && !t->tcam[i].in_use) {
/* need to enable this PTG, as it in not in use
* and not enabled (highest priority)
*/
&t->tcam[i],
chg);
if (status)
- return status;
+ goto err_ice_adj_prof_priorities;
}
/* keep track of used ptgs */
ice_set_bit(t->tcam[i].ptg, ptgs_used);
+ if (attr_used_cnt < ICE_MAX_PTG_ATTRS)
+ attr_used[attr_used_cnt++] = &t->tcam[i];
+ else
+ ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n");
}
}
- return ICE_SUCCESS;
+err_ice_adj_prof_priorities:
+ ice_free(hw, attr_used);
+ return status;
}
/**
* @blk: hardware block
* @vsig: the VSIG to which this profile is to be added
* @hdl: the profile handle indicating the profile to add
+ * @rev: true to add entries to the end of the list
* @chg: the change list
*/
static enum ice_status
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
- struct LIST_HEAD_TYPE *chg)
+ bool rev, struct LIST_HEAD_TYPE *chg)
{
/* Masks that ignore flags */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
- u16 i;
-
- /* Get the details on the profile specified by the handle ID */
- map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ u16 vsig_idx, i;
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
/* new VSIG profile structure */
t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
if (!t)
+ return ICE_ERR_NO_MEMORY;
+
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_add_prof_id_vsig;
+ }
t->profile_cookie = map->profile_cookie;
t->prof_id = map->prof_id;
/* create TCAM entries */
for (i = 0; i < map->ptg_cnt; i++) {
- enum ice_status status;
u16 tcam_idx;
/* add TCAM to change list */
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof_id_vsig;
+ }
/* allocate the TCAM entry index */
- status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+ /* for entries with empty attribute masks, allocate entry from
+ * the bottom of the TCAM table; otherwise, allocate from the
+ * top of the table in order to give it higher priority
+ */
+ status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
+ &tcam_idx);
if (status) {
ice_free(hw, p);
goto err_ice_add_prof_id_vsig;
t->tcam[i].ptg = map->ptg[i];
t->tcam[i].prof_id = map->prof_id;
t->tcam[i].tcam_idx = tcam_idx;
+ t->tcam[i].attr = map->attr[i];
t->tcam[i].in_use = true;
p->type = ICE_TCAM_ADD;
p->vsig = vsig;
p->tcam_idx = t->tcam[i].tcam_idx;
+ /* set don't care masks for TCAM flags */
+ ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
+
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
t->tcam[i].prof_id,
- t->tcam[i].ptg, vsig, 0, 0,
- vl_msk, dc_msk, nm_msk);
- if (status)
+ t->tcam[i].ptg, vsig, 0,
+ t->tcam[i].attr.flags, vl_msk,
+ dc_msk, nm_msk);
+ if (status) {
+ ice_free(hw, p);
goto err_ice_add_prof_id_vsig;
+ }
/* log change */
LIST_ADD(&p->list_entry, chg);
}
/* add profile to VSIG */
- LIST_ADD(&t->list,
- &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+ vsig_idx = vsig & ICE_VSIG_IDX_M;
+ if (rev)
+ LIST_ADD_TAIL(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
+ else
+ LIST_ADD(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
- return ICE_SUCCESS;
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
err_ice_add_prof_id_vsig:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
ice_free(hw, t);
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
if (status)
goto err_ice_create_prof_id_vsig;
- status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+ status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
if (status)
goto err_ice_create_prof_id_vsig;
}
/**
- * ice_create_vsig_from_list - create a new VSIG with a list of profiles
+ * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
* @hw: pointer to the HW struct
* @blk: hardware block
* @vsi: the initial VSI that will be in VSIG
* @lst: the list of profile that will be added to the VSIG
+ * @new_vsig: return of new VSIG
* @chg: the change list
*/
static enum ice_status
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
- struct LIST_HEAD_TYPE *lst, struct LIST_HEAD_TYPE *chg)
+ struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
+ struct LIST_HEAD_TYPE *chg)
{
struct ice_vsig_prof *t;
enum ice_status status;
return status;
LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
+ /* Reverse the order here since we are copying the list */
status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
- chg);
+ true, chg);
if (status)
return status;
}
+ *new_vsig = vsig;
+
return ICE_SUCCESS;
}
struct ice_vsig_prof *tmp1, *del1;
struct LIST_HEAD_TYPE union_lst;
struct ice_chs_chg *tmp, *del;
- struct LIST_HEAD_TYPE chrs;
struct LIST_HEAD_TYPE chg;
enum ice_status status;
- u16 vsig, or_vsig = 0;
+ u16 vsig;
INIT_LIST_HEAD(&union_lst);
- INIT_LIST_HEAD(&chrs);
INIT_LIST_HEAD(&chg);
/* Get profile */
status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
if (!status && vsig) {
bool only_vsi;
+ u16 or_vsig;
u16 ref;
- /* found in vsig */
+ /* found in VSIG */
or_vsig = vsig;
/* make sure that there is no overlap/conflict between the new
* not sharing entries and we can simply add the new
* profile to the VSIG.
*/
- status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
+ &chg);
if (status)
goto err_ice_add_prof_id_flow;
} else {
/* No match, so we need a new VSIG */
status = ice_create_vsig_from_lst(hw, blk, vsi,
- &union_lst, &chg);
+ &union_lst, &vsig,
+ &chg);
if (status)
goto err_ice_add_prof_id_flow;
ice_free(hw, del1);
}
- LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &chrs, ice_vsig_prof, list) {
- LIST_DEL(&del1->list);
- ice_free(hw, del1);
- }
-
return status;
}
{
struct ice_vsig_prof *ent, *tmp;
- LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) {
+ LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list)
if (ent->profile_cookie == hdl) {
LIST_DEL(&ent->list);
ice_free(hw, ent);
return ICE_SUCCESS;
}
- }
return ICE_ERR_DOES_NOT_EXIST;
}
if (last_profile) {
/* If there are no profiles left for this VSIG,
- * then simply remove the the VSIG.
+ * then simply remove the VSIG.
*/
status = ice_rem_vsig(hw, blk, vsig, &chg);
if (status)
* new VSIG and TCAM entries
*/
status = ice_create_vsig_from_lst(hw, blk, vsi,
- ©, &chg);
+ ©, &vsig,
+ &chg);
if (status)
goto err_ice_rem_prof_id_flow;