{
struct ice_nvm_table *nvms;
- nvms = (struct ice_nvm_table *)(ice_seg->device_table +
- LE32_TO_CPU(ice_seg->device_table_count));
+ nvms = (struct ice_nvm_table *)
+ (ice_seg->device_table +
+ LE32_TO_CPU(ice_seg->device_table_count));
return (_FORCE_ struct ice_buf_table *)
(nvms->vers + LE32_TO_CPU(nvms->table_count));
int i;
ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
if (!ice_seg)
return;
return status;
}
-
/**
* ice_aq_update_pkg
* @hw: pointer to the hardware structure
return status;
}
-
/**
* ice_verify_pkg - verify package
* @pkg: pointer to the package buffer
hw->seg = NULL;
}
-/**
- * ice_init_fd_mask_regs - initialize Flow Director mask registers
- * @hw: pointer to the HW struct
- *
- * This function sets up the Flow Director mask registers to allow for complete
- * masking off of any of the 24 Field Vector words. After this call, mask 0 will
- * mask off all of FV index 0, mask 1 will mask off all of FV index 1, etc.
- */
-static void ice_init_fd_mask_regs(struct ice_hw *hw)
-{
- u16 i;
-
- for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
- wr32(hw, GLQF_FDMASK(i), i);
- ice_debug(hw, ICE_DBG_INIT, "init fd mask(%d): %x = %x\n", i,
- GLQF_FDMASK(i), i);
- }
-}
-
/**
* ice_init_pkg_regs - initialize additional package registers
* @hw: pointer to the hardware structure
/* setup Switch block input mask, which is 48-bits in two parts */
wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
- /* setup default flow director masks */
- ice_init_fd_mask_regs(hw);
}
/**
* ice_chk_pkg_version - check package version for compatibility with driver
- * @hw: pointer to the hardware structure
* @pkg_ver: pointer to a version structure to check
*
* Check to make sure that the package about to be downloaded is compatible with
* version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
* definitions.
*/
-static enum ice_status
-ice_chk_pkg_version(struct ice_hw *hw, struct ice_pkg_ver *pkg_ver)
+static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
{
if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
- pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) {
- ice_info(hw, "ERROR: Incompatible package: %d.%d.%d.%d - requires package version: %d.%d.*.*\n",
- pkg_ver->major, pkg_ver->minor, pkg_ver->update,
- pkg_ver->draft, ICE_PKG_SUPP_VER_MAJ,
- ICE_PKG_SUPP_VER_MNR);
-
+ pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
return ICE_ERR_NOT_SUPPORTED;
- }
return ICE_SUCCESS;
}
/* before downloading the package, check package version for
* compatibility with driver
*/
- status = ice_chk_pkg_version(hw, &hw->pkg_ver);
+ status = ice_chk_pkg_version(&hw->pkg_ver);
if (status)
return status;
if (!status) {
status = ice_get_pkg_info(hw);
if (!status)
- status = ice_chk_pkg_version(hw, &hw->active_pkg_ver);
+ status = ice_chk_pkg_version(&hw->active_pkg_ver);
}
if (!status) {
struct ice_seg *ice_seg;
struct ice_fv *fv;
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
if (type == ICE_PROF_ALL) {
u16 i;
struct ice_fv *fv;
u32 offset;
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
if (!ids_cnt || !hw->seg)
return ICE_ERR_PARAM;
return ICE_ERR_NO_MEMORY;
}
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ ice_set_bit(i,
+ hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
/**
* ice_pkg_buf_free
* @hw: pointer to the HW structure
return false;
}
+/**
+ * ice_get_tunnel_port - retrieve an open tunnel port
+ * @hw: pointer to the HW structure
+ * @type: tunnel type (TNL_ALL will return any open port)
+ * @port: returns open port
+ */
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *port)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
+ *port = hw->tnl.tbl[i].port;
+ return true;
+ }
+
+ return false;
+}
+
/**
* ice_create_tunnel
* @hw: pointer to the HW structure
if (!bld)
return ICE_ERR_NO_MEMORY;
- /* allocate 2 sections, one for RX parser, one for TX parser */
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
if (ice_pkg_buf_reserve_section(bld, 2))
goto ice_create_tunnel_err;
offsetof(struct ice_boost_key_value, hv_dst_port_key),
sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
- /* exact copy of entry to TX section entry */
+ /* exact copy of entry to Tx section entry */
ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
ICE_NONDMA_TO_NONDMA);
if (!bld)
return ICE_ERR_NO_MEMORY;
- /* allocate 2 sections, one for RX parser, one for TX parser */
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
if (ice_pkg_buf_reserve_section(bld, 2))
goto ice_destroy_tunnel_err;
goto ice_destroy_tunnel_err;
sect_tx->count = CPU_TO_LE16(1);
- /* copy original boost entry to update package buffer, one copy to RX
- * section, another copy to the TX section
+ /* copy original boost entry to update package buffer, one copy to Rx
+ * section, another copy to the Tx section
*/
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
/* PTG Management */
-
/**
* ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
* @hw: pointer to the hardware structure
hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
}
-/**
- * ice_ptg_alloc - Find a free entry and allocates a new packet type group ID
- * @hw: pointer to the hardware structure
- * @blk: HW block
- *
- * This function allocates and returns a new packet type group ID. Note
- * that 0 is the default packet type group, so successfully created PTGs will
- * have a non-zero ID value; which means a 0 return value indicates an error.
- */
-static u8 ice_ptg_alloc(struct ice_hw *hw, enum ice_block blk)
-{
- u16 i;
-
- /* Skip the default PTG of 0 */
- for (i = 1; i < ICE_MAX_PTGS; i++)
- if (!hw->blk[blk].xlt1.ptg_tbl[i].in_use) {
- /* found a free PTG ID */
- ice_ptg_alloc_val(hw, blk, i);
- return (u8)i;
- }
-
- return 0;
-}
-
/**
* ice_ptg_remove_ptype - Removes ptype from a particular packet type group
* @hw: pointer to the hardware structure
/* VSIG Management */
-
/**
* ice_vsig_find_vsi - find a VSIG that contains a specified VSI
* @hw: pointer to the hardware structure
expect_no_mask = true;
/* Scan the enabled masks on this profile, for the specified idx */
- for (i = 0; i < ICE_PROFILE_MASK_COUNT; i++)
+ for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
+ hw->blk[blk].masks.count; i++)
if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
if (hw->blk[blk].masks.masks[i].in_use &&
hw->blk[blk].masks.masks[i].idx == idx) {
for (i = 0; i < es->count; i++) {
u16 off = i * es->fvw;
- u16 j;
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
continue;
/* check if masks settings are the same for this profile */
- if (!ice_prof_has_mask(hw, blk, i, masks))
- continue;
-
- *prof_id = i;
- return ICE_SUCCESS;
- }
-
- return ICE_ERR_DOES_NOT_EXIST;
-}
-
-/**
- * ice_find_prof_id - find profile ID for a given field vector
- * @hw: pointer to the hardware structure
- * @blk: HW block
- * @fv: field vector to search for
- * @prof_id: receives the profile ID
- */
-static enum ice_status
-ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
- struct ice_fv_word *fv, u8 *prof_id)
-{
- struct ice_es *es = &hw->blk[blk].es;
- u16 off, i;
-
- for (i = 0; i < es->count; i++) {
- off = i * es->fvw;
-
- if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
+ if (masks && !ice_prof_has_mask(hw, blk, i, masks))
continue;
*prof_id = i;
* ice_write_prof_mask_enable_res - write profile mask enable register
* @hw: pointer to the HW struct
* @blk: hardware block
- * @prof_id: profile id
+ * @prof_id: profile ID
* @enable_mask: enable mask
*/
static void
*/
static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
{
-#define MAX_NUM_PORTS 8
- u16 num_ports = MAX_NUM_PORTS;
+ u16 per_pf;
u16 i;
ice_init_lock(&hw->blk[blk].masks.lock);
- hw->blk[blk].masks.count = ICE_PROFILE_MASK_COUNT / num_ports;
- hw->blk[blk].masks.first = hw->pf_id * hw->blk[blk].masks.count;
+ per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
+
+ hw->blk[blk].masks.count = per_pf;
+ hw->blk[blk].masks.first = hw->pf_id * per_pf;
ice_memset(hw->blk[blk].masks.masks, 0,
sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM);
* ice_free_prof_masks - free all profile masks for a profile
* @hw: pointer to the HW struct
* @blk: hardware block
- * @prof_id: profile id
+ * @prof_id: profile ID
*/
static enum ice_status
ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
* ice_update_prof_masking - set registers according to masking
* @hw: pointer to the HW struct
* @blk: hardware block
- * @prof_id: profile id
+ * @prof_id: profile ID
* @es: field vector
* @masks: masks
*/
void *sect;
/* if the HW segment pointer is null then the first iteration of
- * ice_pkg_enum_section() will fail. In this case the Hw tables will
+ * ice_pkg_enum_section() will fail. In this case the HW tables will
* not be filled and return success.
*/
if (!hw->seg) {
return;
/* if the sum of section size and offset exceed destination size
- * then we are out of bounds of the Hw table size for that PF.
+ * then we are out of bounds of the HW table size for that PF.
* Changing section length to fill the remaining table space
* of that PF.
*/
*
* Reads the current package contents and populates the driver
* database with the data iteratively for all advanced feature
- * blocks. Assume that the Hw tables have been allocated.
+ * blocks. Assume that the HW tables have been allocated.
*/
void ice_fill_blk_tbls(struct ice_hw *hw)
{
return ICE_SUCCESS;
}
-/**
- * ice_get_ptg - get or allocate a ptg for a ptype
- * @hw: pointer to the hardware structure
- * @blk: HW block
- * @ptype: the ptype to retrieve the PTG for
- * @ptg: receives the PTG of the ptype
- * @add: receive boolean indicating whether PTG was added or not
- */
-static enum ice_status
-ice_get_ptg(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg,
- bool *add)
-{
- enum ice_status status;
-
- *ptg = ICE_DEFAULT_PTG;
- *add = false;
-
- status = ice_ptg_find_ptype(hw, blk, ptype, ptg);
- if (status)
- return status;
-
- if (*ptg == ICE_DEFAULT_PTG) {
- /* need to allocate a PTG, and add ptype to it */
- *ptg = ice_ptg_alloc(hw, blk);
- if (*ptg == ICE_DEFAULT_PTG)
- return ICE_ERR_HW_TABLE;
-
- status = ice_ptg_add_mv_ptype(hw, blk, ptype, *ptg);
- if (status)
- return ICE_ERR_HW_TABLE;
-
- *add = true;
- }
-
- return ICE_SUCCESS;
-};
-
/**
* ice_has_prof_vsig - check to see if VSIG has a specific profile
* @hw: pointer to the hardware structure
ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
- ice_init_fd_mask_regs(hw);
-
/* This code assumes that the Flow Director field vectors are assigned
* from the end of the FV indexes working towards the zero index, that
* only complete fields will be included and will be consecutive, and
return ICE_ERR_OUT_OF_RANGE;
/* keep track of non-relevant fields */
- mask_sel |= 1 << (first_free - k);
+ mask_sel |= BIT(first_free - k);
}
pair_start[index] = first_free;
si -= indexes_used;
}
- /* for each set of 4 swap indexes, write the appropriate register */
+ /* for each set of 4 swap and 4 inset indexes, write the appropriate
+ * register
+ */
for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
- u32 raw_entry = 0;
+ u32 raw_swap = 0;
+ u32 raw_in = 0;
for (k = 0; k < 4; k++) {
u8 idx;
idx = (j * 4) + k;
- if (used[idx])
- raw_entry |= used[idx] << (k * BITS_PER_BYTE);
+ if (used[idx] && !(mask_sel & BIT(idx))) {
+ raw_swap |= used[idx] << (k * BITS_PER_BYTE);
+#define ICE_INSET_DFLT 0x9f
+ raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
+ }
}
- /* write the appropriate register set, based on HW block */
- wr32(hw, GLQF_FDSWAP(prof_id, j), raw_entry);
+ /* write the appropriate swap register set */
+ wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
+
+ ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
+ prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
- ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %x\n",
- prof_id, j, GLQF_FDSWAP(prof_id, j), raw_entry);
+ /* write the appropriate inset register set */
+ wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
+
+ ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
+ prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
}
- /* update the masks for this profile to be sure we ignore fields that
- * are not relevant to our match criteria
- */
- ice_update_fd_mask(hw, prof_id, mask_sel);
+ /* initially clear the mask select for this profile */
+ ice_update_fd_mask(hw, prof_id, 0);
return ICE_SUCCESS;
}
+/* The entries here needs to match the order of enum ice_ptype_attrib */
+static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
+ { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
+ { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
+ { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
+ { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
+};
+
/**
- * ice_add_prof_with_mask - add profile
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
- * @es: extraction sequence (length of array is determined by the block)
- * @masks: extraction sequence (length of array is determined by the block)
- *
- * This function registers a profile, which matches a set of PTYPES with a
- * particular extraction sequence. While the hardware profile is allocated
- * it will not be written until the first call to ice_add_flow that specifies
- * the ID value used here.
+ * ice_get_ptype_attrib_info - get ptype attribute information
+ * @type: attribute type
+ * @info: pointer to variable to the attribute information
*/
-enum ice_status
-ice_add_prof_with_mask(struct ice_hw *hw, enum ice_block blk, u64 id,
- u8 ptypes[], struct ice_fv_word *es, u16 *masks)
+static void
+ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
+ struct ice_ptype_attrib_info *info)
{
- u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
- struct ice_prof_map *prof;
- enum ice_status status;
- u32 byte = 0;
- u8 prof_id;
-
- ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
-
- /* search for existing profile */
- status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
- if (status) {
- /* allocate profile ID */
- status = ice_alloc_prof_id(hw, blk, &prof_id);
- if (status)
- goto err_ice_add_prof;
- if (blk == ICE_BLK_FD) {
- /* For Flow Director block, the extraction sequence may
- * need to be altered in the case where there are paired
- * fields that have no match. This is necessary because
- * for Flow Director, src and dest fields need to paired
- * for filter programming and these values are swapped
- * during Tx.
- */
- status = ice_update_fd_swap(hw, prof_id, es);
- if (status)
- goto err_ice_add_prof;
- }
- status = ice_update_prof_masking(hw, blk, prof_id, es, masks);
- if (status)
- goto err_ice_add_prof;
-
- /* and write new es */
- ice_write_es(hw, blk, prof_id, es);
- }
-
- ice_prof_inc_ref(hw, blk, prof_id);
-
- /* add profile info */
-
- prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
- if (!prof)
- goto err_ice_add_prof;
-
- prof->profile_cookie = id;
- prof->prof_id = prof_id;
- prof->ptype_count = 0;
- prof->context = 0;
-
- /* build list of ptgs */
- while (bytes && prof->ptype_count < ICE_MAX_PTYPE_PER_PROFILE) {
- u32 bit;
+ *info = ice_ptype_attributes[type];
+}
- if (!ptypes[byte]) {
- bytes--;
- byte++;
- continue;
- }
- /* Examine 8 bits per byte */
- for (bit = 0; bit < 8; bit++) {
- if (ptypes[byte] & BIT(bit)) {
- u16 ptype;
- u8 m;
+/**
+ * ice_add_prof_attrib - add any ptg with attributes to profile
+ * @prof: pointer to the profile to which ptg entries will be added
+ * @ptg: PTG to be added
+ * @ptype: PTYPE that needs to be looked up
+ * @attr: array of attributes that will be considered
+ * @attr_cnt: number of elements in the attribute array
+ */
+static enum ice_status
+ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
+ const struct ice_ptype_attributes *attr, u16 attr_cnt)
+{
+ bool found = false;
+ u16 i;
- ptype = byte * BITS_PER_BYTE + bit;
- if (ptype < ICE_FLOW_PTYPE_MAX) {
- prof->ptype[prof->ptype_count] = ptype;
+ for (i = 0; i < attr_cnt; i++) {
+ if (attr[i].ptype == ptype) {
+ found = true;
- if (++prof->ptype_count >=
- ICE_MAX_PTYPE_PER_PROFILE)
- break;
- }
+ prof->ptg[prof->ptg_cnt] = ptg;
+ ice_get_ptype_attrib_info(attr[i].attrib,
+ &prof->attr[prof->ptg_cnt]);
- /* nothing left in byte, then exit */
- m = ~((1 << (bit + 1)) - 1);
- if (!(ptypes[byte] & m))
- break;
- }
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ return ICE_ERR_MAX_LIMIT;
}
-
- bytes--;
- byte++;
}
- LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
- status = ICE_SUCCESS;
+ if (!found)
+ return ICE_ERR_DOES_NOT_EXIST;
-err_ice_add_prof:
- ice_release_lock(&hw->blk[blk].es.prof_map_lock);
- return status;
+ return ICE_SUCCESS;
}
/**
* @blk: hardware block
* @id: profile tracking ID
* @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @attr: array of attributes
+ * @attr_cnt: number of elements in attrib array
* @es: extraction sequence (length of array is determined by the block)
+ * @masks: mask for extraction sequence
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
*/
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- struct ice_fv_word *es)
+ const struct ice_ptype_attributes *attr, u16 attr_cnt,
+ struct ice_fv_word *es, u16 *masks)
{
u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
+ ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
enum ice_status status;
u32 byte = 0;
u8 prof_id;
+ ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
+
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
/* search for existing profile */
- status = ice_find_prof_id(hw, blk, es, &prof_id);
+ status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
if (status) {
/* allocate profile ID */
status = ice_alloc_prof_id(hw, blk, &prof_id);
if (status)
goto err_ice_add_prof;
}
+ status = ice_update_prof_masking(hw, blk, prof_id, es, masks);
+ if (status)
+ goto err_ice_add_prof;
/* and write new es */
ice_write_es(hw, blk, prof_id, es);
prof->profile_cookie = id;
prof->prof_id = prof_id;
- prof->ptype_count = 0;
+ prof->ptg_cnt = 0;
prof->context = 0;
/* build list of ptgs */
- while (bytes && prof->ptype_count < ICE_MAX_PTYPE_PER_PROFILE) {
+ while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
u32 bit;
if (!ptypes[byte]) {
}
/* Examine 8 bits per byte */
for (bit = 0; bit < 8; bit++) {
- if (ptypes[byte] & 1 << bit) {
+ if (ptypes[byte] & BIT(bit)) {
u16 ptype;
+ u8 ptg;
u8 m;
ptype = byte * BITS_PER_BYTE + bit;
- if (ptype < ICE_FLOW_PTYPE_MAX) {
- prof->ptype[prof->ptype_count] = ptype;
- if (++prof->ptype_count >=
- ICE_MAX_PTYPE_PER_PROFILE)
+ /* The package should place all ptypes in a
+ * non-zero PTG, so the following call should
+ * never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+ continue;
+
+ /* If PTG is already added, skip and continue */
+ if (ice_is_bit_set(ptgs_used, ptg))
+ continue;
+
+ ice_set_bit(ptg, ptgs_used);
+ /* Check to see there are any attributes for
+ * this ptype, and add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype,
+ attr, attr_cnt);
+ if (status == ICE_ERR_MAX_LIMIT)
+ break;
+ if (status) {
+ /* This is simple a ptype/ptg with no
+ * attribute
+ */
+ prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
+
+ if (++prof->ptg_cnt >=
+ ICE_MAX_PTG_PER_PROFILE)
break;
}
u16 i;
for (i = 0; i < prof->tcam_count; i++) {
- prof->tcam[i].in_use = false;
- status = ice_rel_tcam_idx(hw, blk, prof->tcam[i].tcam_idx);
- if (status)
- return ICE_ERR_HW_TABLE;
+ if (prof->tcam[i].in_use) {
+ prof->tcam[i].in_use = false;
+ status = ice_rel_tcam_idx(hw, blk,
+ prof->tcam[i].tcam_idx);
+ if (status)
+ return ICE_ERR_HW_TABLE;
+ }
}
return ICE_SUCCESS;
}
/**
- * ice_get_prof_ptgs - get ptgs for profile
+ * ice_get_prof - get profile
* @hw: pointer to the HW struct
* @blk: hardware block
* @hdl: profile handle
* @chg: change list
*/
static enum ice_status
-ice_get_prof_ptgs(struct ice_hw *hw, enum ice_block blk, u64 hdl,
- struct LIST_HEAD_TYPE *chg)
+ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
+ struct LIST_HEAD_TYPE *chg)
{
struct ice_prof_map *map;
struct ice_chs_chg *p;
if (!map)
return ICE_ERR_DOES_NOT_EXIST;
- for (i = 0; i < map->ptype_count; i++) {
- enum ice_status status;
- bool add;
- u8 ptg;
-
- status = ice_get_ptg(hw, blk, map->ptype[i], &ptg, &add);
- if (status)
- goto err_ice_get_prof_ptgs;
-
- if (add || !hw->blk[blk].es.written[map->prof_id]) {
- /* add PTG to change list */
+ for (i = 0; i < map->ptg_cnt; i++) {
+ if (!hw->blk[blk].es.written[map->prof_id]) {
+ /* add ES to change list */
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
if (!p)
- goto err_ice_get_prof_ptgs;
+ goto err_ice_get_prof;
p->type = ICE_PTG_ES_ADD;
- p->ptype = map->ptype[i];
- p->ptg = ptg;
- p->add_ptg = add;
+ p->ptype = 0;
+ p->ptg = map->ptg[i];
+ p->attr = map->attr[i];
+ p->add_ptg = 0;
- p->add_prof = !hw->blk[blk].es.written[map->prof_id];
+ p->add_prof = 1;
p->prof_id = map->prof_id;
hw->blk[blk].es.written[map->prof_id] = true;
return ICE_SUCCESS;
-err_ice_get_prof_ptgs:
+err_ice_get_prof:
/* let caller clean up the change list */
return ICE_ERR_NO_MEMORY;
}
struct ice_vsig_prof *p;
/* copy to the input list */
- p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
+ p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
+ ICE_NONDMA_TO_NONDMA);
if (!p)
goto err_ice_get_profs_vsig;
- ice_memcpy(p, ent1, sizeof(*p), ICE_NONDMA_TO_NONDMA);
-
LIST_ADD_TAIL(&p->list, lst);
}
p->profile_cookie = map->profile_cookie;
p->prof_id = map->prof_id;
- p->tcam_count = map->ptype_count;
-
- for (i = 0; i < map->ptype_count; i++) {
- u8 ptg;
+ p->tcam_count = map->ptg_cnt;
+ for (i = 0; i < map->ptg_cnt; i++) {
p->tcam[i].prof_id = map->prof_id;
p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
-
- if (ice_ptg_find_ptype(hw, blk, map->ptype[i], &ptg)) {
- ice_free(hw, p);
- return ICE_ERR_CFG;
- }
-
- p->tcam[i].ptg = ptg;
+ p->tcam[i].ptg = map->ptg[i];
+ p->tcam[i].attr = map->attr[i];
}
LIST_ADD(&p->list, lst);
return ICE_SUCCESS;
}
+/**
+ * ice_set_tcam_flags - set tcam flag don't care mask
+ * @mask: mask for flags
+ * @dc_mask: pointer to the don't care mask
+ */
+static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
+{
+ u16 *flag_word;
+
+ /* flags are lowest u16 */
+ flag_word = (u16 *)dc_mask;
+ *flag_word = ~mask;
+}
/**
* ice_prof_tcam_ena_dis - add enable or disable TCAM change
* @hw: pointer to the HW struct
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
- /* If disabled, change the low flag bit to never match */
+ /* if disabling, free the tcam */
if (!enable) {
- dc_msk[0] = 0x00;
- nm_msk[0] = 0x01;
+ status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
+ tcam->tcam_idx = 0;
+ tcam->in_use = 0;
+ return status;
}
+ /* for re-enabling, reallocate a tcam */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
+ if (status)
+ return status;
+
/* add TCAM to change list */
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
if (!p)
return ICE_ERR_NO_MEMORY;
+ /* set don't care masks for tcam flags */
+ ice_set_tcam_flags(tcam->attr.mask, dc_msk);
+
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
- tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
- nm_msk);
+ tcam->ptg, vsig, 0, tcam->attr.flags,
+ vl_msk, dc_msk, nm_msk);
if (status)
goto err_ice_prof_tcam_ena_dis;
- tcam->in_use = enable;
+ tcam->in_use = 1;
p->type = ICE_TCAM_ADD;
p->add_tcam_idx = true;
t->profile_cookie = map->profile_cookie;
t->prof_id = map->prof_id;
- t->tcam_count = map->ptype_count;
+ t->tcam_count = map->ptg_cnt;
/* create TCAM entries */
- for (i = 0; i < map->ptype_count; i++) {
+ for (i = 0; i < map->ptg_cnt; i++) {
enum ice_status status;
u16 tcam_idx;
- bool add;
- u8 ptg;
-
- /* If properly sequenced, we should never have to allocate new
- * PTGs
- */
- status = ice_get_ptg(hw, blk, map->ptype[i], &ptg, &add);
- if (status)
- goto err_ice_add_prof_id_vsig;
/* add TCAM to change list */
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
goto err_ice_add_prof_id_vsig;
}
- t->tcam[i].ptg = ptg;
+ t->tcam[i].ptg = map->ptg[i];
t->tcam[i].prof_id = map->prof_id;
t->tcam[i].tcam_idx = tcam_idx;
+ t->tcam[i].attr = map->attr[i];
t->tcam[i].in_use = true;
p->type = ICE_TCAM_ADD;
p->vsig = vsig;
p->tcam_idx = t->tcam[i].tcam_idx;
+ /* set don't care masks for tcam flags */
+ ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
+
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
t->tcam[i].prof_id,
- t->tcam[i].ptg, vsig, 0, 0,
- vl_msk, dc_msk, nm_msk);
+ t->tcam[i].ptg, vsig, 0,
+ t->tcam[i].attr.flags, vl_msk,
+ dc_msk, nm_msk);
if (status)
goto err_ice_add_prof_id_vsig;
INIT_LIST_HEAD(&chrs);
INIT_LIST_HEAD(&chg);
- status = ice_get_prof_ptgs(hw, blk, hdl, &chg);
+ /* Get profile */
+ status = ice_get_prof(hw, blk, hdl, &chg);
if (status)
return status;