nvms = (struct ice_nvm_table *)(ice_seg->device_table +
LE32_TO_CPU(ice_seg->device_table_count));
- return (struct ice_buf_table *)
+ return (_FORCE_ struct ice_buf_table *)
(nvms->vers + LE32_TO_CPU(nvms->table_count));
}
* Handles enumeration of individual label entries.
*/
static void *
-ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
+ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
u32 *offset)
{
struct ice_label_section *labels;
* since the first call to ice_enum_labels requires a pointer to an actual
* ice_seg structure.
*/
-void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_pkg_enum state;
char *label_name;
* @size: the size of the complete key in bytes (must be even)
* @val: array of 8-bit values that makes up the value portion of the key
* @upd: array of 8-bit masks that determine what key portion to update
- * @dc: array of 8-bit masks that make up the dont' care mask
+ * @dc: array of 8-bit masks that make up the don't care mask
* @nm: array of 8-bit masks that make up the never match mask
* @off: the offset of the first byte in the key to update
* @len: the number of bytes in the key update
{
enum ice_status status;
- ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_global_cfg_lock");
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
- ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_change_lock");
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
ICE_CHANGE_LOCK_TIMEOUT);
*/
void ice_release_change_lock(struct ice_hw *hw)
{
- ice_debug(hw, ICE_DBG_TRACE, "ice_release_change_lock");
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
}
struct ice_aq_desc desc;
enum ice_status status;
- ice_debug(hw, ICE_DBG_TRACE, "ice_aq_download_pkg");
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
if (error_offset)
*error_offset = 0;
return status;
}
-/**
- * ice_aq_upload_section
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer which will receive the section
- * @buf_size: the size of the package buffer
- * @cd: pointer to command details structure or NULL
- *
- * Upload Section (0x0C41)
- */
-enum ice_status
-ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, struct ice_sq_cd *cd)
-{
- struct ice_aq_desc desc;
-
- ice_debug(hw, ICE_DBG_TRACE, "ice_aq_upload_section");
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
- desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-
- return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
-}
/**
* ice_aq_update_pkg
struct ice_aq_desc desc;
enum ice_status status;
- ice_debug(hw, ICE_DBG_TRACE, "ice_aq_update_pkg");
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
if (error_offset)
*error_offset = 0;
* success it returns a pointer to the segment header, otherwise it will
* return NULL.
*/
-struct ice_generic_seg_hdr *
+static struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
struct ice_pkg_hdr *pkg_hdr)
{
u32 i;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- ice_debug(hw, ICE_DBG_PKG, "Package version: %d.%d.%d.%d\n",
+ ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
bh = (struct ice_buf_hdr *)(bufs + i);
- status = ice_aq_download_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
- last, &offset, &info, NULL);
-
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+ &offset, &info, NULL);
if (status) {
ice_debug(hw, ICE_DBG_PKG,
"Pkg download failed: err %d off %d inf %d\n",
{
struct ice_aq_desc desc;
- ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_pkg_info_list");
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
*
* Handles the download of a complete package.
*/
-enum ice_status ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
+static enum ice_status
+ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
*
* Saves off the package details into the HW structure.
*/
-enum ice_status
+static enum ice_status
ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
struct ice_global_metadata_seg *meta_seg;
*
* Store details of the package currently loaded in HW into the HW structure.
*/
-enum ice_status
-ice_get_pkg_info(struct ice_hw *hw)
+static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
{
struct ice_aqc_get_pkg_info_resp *pkg_info;
enum ice_status status;
u16 size;
u32 i;
- ice_debug(hw, ICE_DBG_TRACE, "ice_init_pkg_info\n");
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
(ICE_PKG_CNT - 1));
return status;
}
-/**
- * ice_find_label_value
- * @ice_seg: pointer to the ice segment (non-NULL)
- * @name: name of the label to search for
- * @type: the section type that will contain the label
- * @value: pointer to a value that will return the label's value if found
- *
- * Finds a label's value given the label name and the section type to search.
- * The ice_seg parameter must not be NULL since the first call to
- * ice_enum_labels requires a pointer to an actual ice_seg structure.
- */
-enum ice_status
-ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
- u16 *value)
-{
- struct ice_pkg_enum state;
- char *label_name;
- u16 val;
-
- if (!ice_seg)
- return ICE_ERR_PARAM;
-
- do {
- label_name = ice_enum_labels(ice_seg, type, &state, &val);
- if (label_name && !strcmp(label_name, name)) {
- *value = val;
- return ICE_SUCCESS;
- }
-
- ice_seg = NULL;
- } while (label_name);
-
- return ICE_ERR_CFG;
-}
/**
* ice_verify_pkg - verify package
* Allocates a package buffer and returns a pointer to the buffer header.
* Note: all package contents must be in Little Endian form.
*/
-struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
{
struct ice_buf_build *bld;
struct ice_buf_hdr *buf;
}
/**
- * ice_pkg_buf_alloc_single_section
+ * ice_pkg_buf_free
* @hw: pointer to the HW structure
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- * @section: returns pointer to the section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
*
- * Allocates a package buffer with a single section.
- * Note: all package contents must be in Little Endian form.
+ * Frees a package buffer
*/
-static struct ice_buf_build *
-ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
- void **section)
+static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
{
- struct ice_buf_build *buf;
-
- if (!section)
- return NULL;
-
- buf = ice_pkg_buf_alloc(hw);
- if (!buf)
- return NULL;
-
- if (ice_pkg_buf_reserve_section(buf, 1))
- goto ice_pkg_buf_alloc_single_section_err;
-
- *section = ice_pkg_buf_alloc_section(buf, type, size);
- if (!*section)
- goto ice_pkg_buf_alloc_single_section_err;
-
- return buf;
-
-ice_pkg_buf_alloc_single_section_err:
- ice_pkg_buf_free(hw, buf);
- return NULL;
+ ice_free(hw, bld);
}
/**
* result in some wasted space in the buffer.
* Note: all package contents must be in Little Endian form.
*/
-enum ice_status
+static enum ice_status
ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
{
struct ice_buf_hdr *buf;
return ICE_SUCCESS;
}
-/**
- * ice_pkg_buf_unreserve_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @count: the number of sections to unreserve
- *
- * Unreserves one or more section table entries in a package buffer, releasing
- * space that can be used for section data. This routine can be called
- * multiple times as long as they are made before calling
- * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
- * is called once, the number of sections that can be allocated will not be able
- * to be increased; not using all reserved sections is fine, but this will
- * result in some wasted space in the buffer.
- * Note: all package contents must be in Little Endian form.
- */
-enum ice_status
-ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
-{
- struct ice_buf_hdr *buf;
- u16 section_count;
- u16 data_end;
-
- if (!bld)
- return ICE_ERR_PARAM;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* already an active section, can't decrease table size */
- section_count = LE16_TO_CPU(buf->section_count);
- if (section_count > 0)
- return ICE_ERR_CFG;
-
- if (count > bld->reserved_section_table_entries)
- return ICE_ERR_CFG;
- bld->reserved_section_table_entries -= count;
-
- data_end = LE16_TO_CPU(buf->data_end) -
- (count * sizeof(buf->section_entry[0]));
- buf->data_end = CPU_TO_LE16(data_end);
-
- return ICE_SUCCESS;
-}
-
/**
* ice_pkg_buf_alloc_section
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
* section contents.
* Note: all package contents must be in Little Endian form.
*/
-void *
+static void *
ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
{
struct ice_buf_hdr *buf;
return NULL;
}
-/**
- * ice_pkg_buf_get_free_space
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Returns the number of free bytes remaining in the buffer.
- * Note: all package contents must be in Little Endian form.
- */
-u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
-{
- struct ice_buf_hdr *buf;
-
- if (!bld)
- return 0;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
- return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
-}
-
/**
* ice_pkg_buf_get_active_sections
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
* not be used.
* Note: all package contents must be in Little Endian form.
*/
-u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
{
struct ice_buf_hdr *buf;
*
* Return a pointer to the buffer's header
*/
-struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
{
if (!bld)
return NULL;
}
/**
- * ice_pkg_buf_free
+ * ice_tunnel_port_in_use
* @hw: pointer to the HW structure
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @port: port to search for
+ * @index: optionally returns index
*
- * Frees a package buffer
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
*/
-void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
{
- ice_free(hw, bld);
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_tunnel_get_type
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @type: returns tunnel index
+ *
+ * For a given port number, will return the type of tunnel.
+ */
+bool
+ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+ *type = hw->tnl.tbl[i].type;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_find_free_tunnel_entry
+ * @hw: pointer to the HW structure
+ * @type: tunnel type
+ * @index: optionally returns index
+ *
+ * Returns whether there is a free tunnel entry, and optionally its index
+ */
+static bool
+ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
+ hw->tnl.tbl[i].type == type) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_create_tunnel
+ * @hw: pointer to the HW structure
+ * @type: type of tunnel
+ * @port: port to use for vxlan tunnel
+ *
+ * Creates a tunnel
+ */
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 index;
+
+ if (ice_tunnel_port_in_use(hw, port, NULL))
+ return ICE_ERR_ALREADY_EXISTS;
+
+ if (!ice_find_free_tunnel_entry(hw, type, &index))
+ return ICE_ERR_OUT_OF_RANGE;
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld)
+ return ICE_ERR_NO_MEMORY;
+
+ /* allocate 2 sections, one for RX parser, one for TX parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_create_tunnel_err;
+
+ sect_rx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ sizeof(*sect_rx));
+ if (!sect_rx)
+ goto ice_create_tunnel_err;
+ sect_rx->count = CPU_TO_LE16(1);
+
+ sect_tx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ sizeof(*sect_tx));
+ if (!sect_tx)
+ goto ice_create_tunnel_err;
+ sect_tx->count = CPU_TO_LE16(1);
+
+ /* copy original boost entry to update package buffer */
+ ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
+
+ /* over-write the never-match dest port key bits with the encoded port
+ * bits
+ */
+ ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ (u8 *)&port, NULL, NULL, NULL,
+ offsetof(struct ice_boost_key_value, hv_dst_port_key),
+ sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
+
+ /* exact copy of entry to TX section entry */
+ ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status) {
+ hw->tnl.tbl[index].port = port;
+ hw->tnl.tbl[index].in_use = true;
+ }
+
+ice_create_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_destroy_tunnel
+ * @hw: pointer to the HW structure
+ * @port: port of tunnel to destroy (ignored if the all parameter is true)
+ * @all: flag that states to destroy all tunnels
+ *
+ * Destroys a tunnel or all tunnels by creating an update package buffer
+ * targeting the specific updates requested and then performing an update
+ * package.
+ */
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 count = 0;
+ u16 size;
+ u16 i;
+
+ /* determine count */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port))
+ count++;
+
+ if (!count)
+ return ICE_ERR_PARAM;
+
+ /* size of section - there is at least one entry */
+ size = (count - 1) * sizeof(*sect_rx->tcam) + sizeof(*sect_rx);
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld)
+ return ICE_ERR_NO_MEMORY;
+
+ /* allocate 2 sections, one for RX parser, one for TX parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_destroy_tunnel_err;
+
+ sect_rx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_rx)
+ goto ice_destroy_tunnel_err;
+ sect_rx->count = CPU_TO_LE16(1);
+
+ sect_tx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_tx)
+ goto ice_destroy_tunnel_err;
+ sect_tx->count = CPU_TO_LE16(1);
+
+ /* copy original boost entry to update package buffer, one copy to RX
+ * section, another copy to the TX section
+ */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port)) {
+ ice_memcpy(sect_rx->tcam + i,
+ hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_rx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(sect_tx->tcam + i,
+ hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_tx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+ hw->tnl.tbl[i].marked = true;
+ }
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status)
+ for (i = 0; i < hw->tnl.count &&
+ i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].marked) {
+ hw->tnl.tbl[i].port = 0;
+ hw->tnl.tbl[i].in_use = false;
+ hw->tnl.tbl[i].marked = false;
+ }
+
+ice_destroy_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
}
/**
/* PTG Management */
-/**
- * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table
- * @hw: pointer to the hardware structure
- * @blk: HW block
- *
- * This function will update the XLT1 hardware table to reflect the new
- * packet type group configuration.
- */
-enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
-{
- struct ice_xlt1_section *sect;
- struct ice_buf_build *bld;
- enum ice_status status;
- u16 index;
-
- bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
- ICE_XLT1_SIZE(ICE_XLT1_CNT),
- (void **)§);
- if (!bld)
- return ICE_ERR_NO_MEMORY;
-
- sect->count = CPU_TO_LE16(ICE_XLT1_CNT);
- sect->offset = CPU_TO_LE16(0);
- for (index = 0; index < ICE_XLT1_CNT; index++)
- sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg;
-
- status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
-
- ice_pkg_buf_free(hw, bld);
-
- return status;
-}
/**
* ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
* PTG ID that contains it through the ptg parameter, with the value of
* ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
*/
-enum ice_status
+static enum ice_status
ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
{
if (ptype >= ICE_XLT1_CNT || !ptg)
* that 0 is the default packet type group, so successfully created PTGs will
* have a non-zero ID value; which means a 0 return value indicates an error.
*/
-u8 ice_ptg_alloc(struct ice_hw *hw, enum ice_block blk)
+static u8 ice_ptg_alloc(struct ice_hw *hw, enum ice_block blk)
{
u16 i;
return 0;
}
-/**
- * ice_ptg_free - Frees a packet type group
- * @hw: pointer to the hardware structure
- * @blk: HW block
- * @ptg: the ptg ID to free
- *
- * This function frees a packet type group, and returns all the current ptypes
- * within it to the default PTG.
- */
-void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
-{
- struct ice_ptg_ptype *p, *temp;
-
- hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false;
- p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
- while (p) {
- p->ptg = ICE_DEFAULT_PTG;
- temp = p->next_ptype;
- p->next_ptype = NULL;
- p = temp;
- }
-
- hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL;
-}
-
/**
* ice_ptg_remove_ptype - Removes ptype from a particular packet type group
* @hw: pointer to the hardware structure
* a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
-enum ice_status
+static enum ice_status
ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
enum ice_status status;
/* VSIG Management */
-/**
- * ice_vsig_update_xlt2_sect - update one section of XLT2 table
- * @hw: pointer to the hardware structure
- * @blk: HW block
- * @vsi: HW VSI number to program
- * @vsig: vsig for the VSI
- *
- * This function will update the XLT2 hardware table with the input VSI
- * group configuration.
- */
-static enum ice_status
-ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
- u16 vsig)
-{
- struct ice_xlt2_section *sect;
- struct ice_buf_build *bld;
- enum ice_status status;
-
- bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
- sizeof(struct ice_xlt2_section),
- (void **)§);
- if (!bld)
- return ICE_ERR_NO_MEMORY;
-
- sect->count = CPU_TO_LE16(1);
- sect->offset = CPU_TO_LE16(vsi);
- sect->value[0] = CPU_TO_LE16(vsig);
-
- status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
-
- ice_pkg_buf_free(hw, bld);
-
- return status;
-}
-
-/**
- * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration
- * @hw: pointer to the hardware structure
- * @blk: HW block
- *
- * This function will update the XLT2 hardware table with the input VSI
- * group configuration of used vsis.
- */
-enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
-{
- u16 vsi;
-
- for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
- /* update only vsis that have been changed */
- if (hw->blk[blk].xlt2.vsis[vsi].changed) {
- enum ice_status status;
- u16 vsig;
-
- vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
- status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig);
- if (status)
- return status;
-
- hw->blk[blk].xlt2.vsis[vsi].changed = 0;
- }
- }
-
- return ICE_SUCCESS;
-}
/**
* ice_vsig_find_vsi - find a VSIG that contains a specified VSI
* for, the list must match exactly, including the order in which the
* characteristics are listed.
*/
-enum ice_status
+static enum ice_status
ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
struct LIST_HEAD_TYPE *chs, u16 *vsig)
{
* The function will remove all VSIs associated with the input VSIG and move
* them to the DEFAULT_VSIG and mark the VSIG available.
*/
-enum ice_status
+static enum ice_status
ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
{
struct ice_vsig_prof *dtmp, *del;
ice_free(hw, del);
}
+ /* if VSIG characteristic list was cleared for reset
+ * re-initialize the list head
+ */
+ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_remove_vsi - remove VSI from VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI to remove
+ * @vsig: VSI group to remove from
+ *
+ * The function will remove the input VSI from its VSI group and move it
+ * to the DEFAULT_VSIG.
+ */
+static enum ice_status
+ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+ struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
+ u16 idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* entry already in default VSIG, don't have to remove */
+ if (idx == ICE_DEFAULT_VSIG)
+ return ICE_SUCCESS;
+
+ vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ if (!(*vsi_head))
+ return ICE_ERR_CFG;
+
+ vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
+ vsi_cur = (*vsi_head);
+
+ /* iterate the VSI list, skip over the entry to be removed */
+ while (vsi_cur) {
+ if (vsi_tgt == vsi_cur) {
+ (*vsi_head) = vsi_cur->next_vsi;
+ break;
+ }
+ vsi_head = &vsi_cur->next_vsi;
+ vsi_cur = vsi_cur->next_vsi;
+ }
+
+ /* verify if VSI was removed from group list */
+ if (!vsi_cur)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ vsi_cur->vsig = ICE_DEFAULT_VSIG;
+ vsi_cur->changed = 1;
+ vsi_cur->next_vsi = NULL;
+
return ICE_SUCCESS;
}
* move the entry to the DEFAULT_VSIG, update the original VSIG and
* then move entry to the new VSIG.
*/
-enum ice_status
+static enum ice_status
ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi *tmp;
return ICE_SUCCESS;
}
-/**
- * ice_vsig_remove_vsi - remove VSI from VSIG
- * @hw: pointer to the hardware structure
- * @blk: HW block
- * @vsi: VSI to remove
- * @vsig: VSI group to remove from
- *
- * The function will remove the input VSI from its VSI group and move it
- * to the DEFAULT_VSIG.
- */
-enum ice_status
-ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
-{
- struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
- u16 idx;
-
- idx = vsig & ICE_VSIG_IDX_M;
-
- if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
-
- if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
-
- /* entry already in default VSIG, don't have to remove */
- if (idx == ICE_DEFAULT_VSIG)
- return ICE_SUCCESS;
-
- vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
- if (!(*vsi_head))
- return ICE_ERR_CFG;
-
- vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
- vsi_cur = (*vsi_head);
-
- /* iterate the VSI list, skip over the entry to be removed */
- while (vsi_cur) {
- if (vsi_tgt == vsi_cur) {
- (*vsi_head) = vsi_cur->next_vsi;
- break;
- }
- vsi_head = &vsi_cur->next_vsi;
- vsi_cur = vsi_cur->next_vsi;
- }
-
- /* verify if VSI was removed from group list */
- if (!vsi_cur)
- return ICE_ERR_DOES_NOT_EXIST;
-
- vsi_cur->vsig = ICE_DEFAULT_VSIG;
- vsi_cur->changed = 1;
- vsi_cur->next_vsi = NULL;
-
- return ICE_SUCCESS;
-}
-
/**
* ice_find_prof_id - find profile ID for a given field vector
* @hw: pointer to the hardware structure
case ICE_SID_XLT2_ACL:
case ICE_SID_XLT2_PE:
xlt2 = (struct ice_xlt2_section *)sect;
- src = (u8 *)xlt2->value;
+ src = (_FORCE_ u8 *)xlt2->value;
sect_len = LE16_TO_CPU(xlt2->count) *
sizeof(*hw->blk[block_id].xlt2.t);
dst = (u8 *)hw->blk[block_id].xlt2.t;
ice_init_sw_db(hw);
}
+/**
+ * ice_free_prof_map - free profile map
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_es *es = &hw->blk[blk_idx].es;
+ struct ice_prof_map *del, *tmp;
+
+ ice_acquire_lock(&es->prof_map_lock);
+ LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
+ ice_prof_map, list) {
+ LIST_DEL(&del->list);
+ ice_free(hw, del);
+ }
+ INIT_LIST_HEAD(&es->prof_map);
+ ice_release_lock(&es->prof_map_lock);
+}
+
/**
* ice_free_flow_profs - free flow profile entries
* @hw: pointer to the hardware structure
{
struct ice_flow_prof *p, *tmp;
- /* This call is being made as part of resource deallocation
- * during unload. Lock acquire and release will not be
- * necessary here.
- */
+ ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
ice_flow_prof, l_entry) {
struct ice_flow_entry *e, *t;
ice_free(hw, p->acts);
ice_free(hw, p);
}
+ ice_release_lock(&hw->fl_profs_locks[blk_idx]);
/* if driver is in reset and tables are being cleared
* re-initialize the flow profile list heads
for (i = 0; i < ICE_BLK_COUNT; i++) {
if (hw->blk[i].is_list_init) {
struct ice_es *es = &hw->blk[i].es;
- struct ice_prof_map *del, *tmp;
-
- LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
- ice_prof_map, list) {
- LIST_DEL(&del->list);
- ice_free(hw, del);
- }
+ ice_free_prof_map(hw, i);
ice_destroy_lock(&es->prof_map_lock);
ice_free_flow_profs(hw, i);
ice_destroy_lock(&hw->fl_profs_locks[i]);
+
hw->blk[i].is_list_init = false;
}
ice_free_vsig_tbl(hw, (enum ice_block)i);
u32 mask_sel = 0;
u8 i, j, k;
- ice_memset(pair_list, 0, sizeof(pair_list), ICE_NONDMA_MEM);
+ ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
ice_init_fd_mask_regs(hw);
/* fill in the swap array */
si = hw->blk[ICE_BLK_FD].es.fvw - 1;
- do {
+ while (si >= 0) {
u8 indexes_used = 1;
/* assume flat at this index */
}
si -= indexes_used;
- } while (si >= 0);
+ }
/* for each set of 4 swap indexes, write the appropriate register */
for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
idx = (j * 4) + k;
if (used[idx])
- raw_entry |= used[idx] << (k * 8);
+ raw_entry |= used[idx] << (k * BITS_PER_BYTE);
}
/* write the appropriate register set, based on HW block */
u16 ptype;
u8 m;
- ptype = byte * 8 + bit;
+ ptype = byte * BITS_PER_BYTE + bit;
if (ptype < ICE_FLOW_PTYPE_MAX) {
prof->ptype[prof->ptype_count] = ptype;
return entry;
}
-/**
- * ice_set_prof_context - Set context for a given profile
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- * @cntxt: context
- */
-struct ice_prof_map *
-ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
-{
- struct ice_prof_map *entry;
-
- entry = ice_search_prof_id(hw, blk, id);
- if (entry)
- entry->context = cntxt;
-
- return entry;
-}
-
-/**
- * ice_get_prof_context - Get context for a given profile
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- * @cntxt: pointer to variable to receive the context
- */
-struct ice_prof_map *
-ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
-{
- struct ice_prof_map *entry;
-
- entry = ice_search_prof_id(hw, blk, id);
- if (entry)
- *cntxt = entry->context;
-
- return entry;
-}
-
/**
* ice_vsig_prof_id_count - count profiles in a VSIG
* @hw: pointer to the HW struct
p->tcam[i].prof_id = map->prof_id;
p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
- ice_ptg_find_ptype(hw, blk, map->ptype[i], &ptg);
+ if (ice_ptg_find_ptype(hw, blk, map->ptype[i], &ptg))
+ return ICE_ERR_CFG;
p->tcam[i].ptg = ptg;
}
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (!status)
status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+
if (status) {
ice_free(hw, p);
return status;
enum ice_status status;
u16 idx;
- ice_memset(ptgs_used, 0, sizeof(ptgs_used), ICE_NONDMA_MEM);
+ ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
idx = vsig & ICE_VSIG_IDX_M;
/* Priority is based on the order in which the profiles are added. The
return status;
}
-/**
- * ice_add_flow - add flow
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @vsi: array of VSIs to enable with the profile specified by ID
- * @count: number of elements in the VSI array
- * @id: profile tracking ID
- *
- * Calling this function will update the hardware tables to enable the
- * profile indicated by the ID parameter for the VSIs specified in the VSI
- * array. Once successfully called, the flow will be enabled.
- */
-enum ice_status
-ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
- u64 id)
-{
- enum ice_status status;
- u16 i;
-
- for (i = 0; i < count; i++) {
- status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
- if (status)
- return status;
- }
-
- return ICE_SUCCESS;
-}
-
/**
* ice_rem_prof_from_list - remove a profile from list
* @hw: pointer to the HW struct
return status;
}
-
-/**
- * ice_rem_flow - remove flow
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @vsi: array of VSIs from which to remove the profile specified by ID
- * @count: number of elements in the VSI array
- * @id: profile tracking ID
- *
- * The function will remove flows from the specified VSIs that were enabled
- * using ice_add_flow. The ID value will indicated which profile will be
- * removed. Once successfully called, the flow will be disabled.
- */
-enum ice_status
-ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
- u64 id)
-{
- enum ice_status status;
- u16 i;
-
- for (i = 0; i < count; i++) {
- status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
- if (status)
- return status;
- }
-
- return ICE_SUCCESS;
-}