Correct abbreviation issues found by running abbrevcheck.
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Acked-by: Xiaolong Ye <xiaolong.ye@intel.com>
- * ice_get_itr_intrl_gran - determine int/intrl granularity
+ * ice_get_itr_intrl_gran
* @hw: pointer to the HW struct
*
* @hw: pointer to the HW struct
*
- * Determines the itr/intrl granularities based on the maximum aggregate
+ * Determines the ITR/INTRL granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
*/
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
* bandwidth according to the device's configuration during power-on.
*/
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
ice_free(hw, (qi)->ring.cmd_buf); \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
ice_free(hw, (qi)->ring.cmd_buf); \
ice_free(hw, (qi)->ring.dma_head); \
} while (0)
ice_free(hw, (qi)->ring.dma_head); \
} while (0)
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
struct ice_ctl_q_ring {
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
struct ice_ctl_q_ring {
- void *dma_head; /* Virtual address to dma head */
+ void *dma_head; /* Virtual address to DMA head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
void *cmd_buf; /* command buffer memory */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
void *cmd_buf; /* command buffer memory */
if (!bld)
return ICE_ERR_NO_MEMORY;
if (!bld)
return ICE_ERR_NO_MEMORY;
- /* allocate 2 sections, one for RX parser, one for TX parser */
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
if (ice_pkg_buf_reserve_section(bld, 2))
goto ice_create_tunnel_err;
if (ice_pkg_buf_reserve_section(bld, 2))
goto ice_create_tunnel_err;
offsetof(struct ice_boost_key_value, hv_dst_port_key),
sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
offsetof(struct ice_boost_key_value, hv_dst_port_key),
sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
- /* exact copy of entry to TX section entry */
+ /* exact copy of entry to Tx section entry */
ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
ICE_NONDMA_TO_NONDMA);
ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
ICE_NONDMA_TO_NONDMA);
if (!bld)
return ICE_ERR_NO_MEMORY;
if (!bld)
return ICE_ERR_NO_MEMORY;
- /* allocate 2 sections, one for RX parser, one for TX parser */
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
if (ice_pkg_buf_reserve_section(bld, 2))
goto ice_destroy_tunnel_err;
if (ice_pkg_buf_reserve_section(bld, 2))
goto ice_destroy_tunnel_err;
goto ice_destroy_tunnel_err;
sect_tx->count = CPU_TO_LE16(1);
goto ice_destroy_tunnel_err;
sect_tx->count = CPU_TO_LE16(1);
- /* copy original boost entry to update package buffer, one copy to RX
- * section, another copy to the TX section
+ /* copy original boost entry to update package buffer, one copy to Rx
+ * section, another copy to the Tx section
*/
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
*/
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
* ice_write_prof_mask_enable_res - write profile mask enable register
* @hw: pointer to the HW struct
* @blk: hardware block
* ice_write_prof_mask_enable_res - write profile mask enable register
* @hw: pointer to the HW struct
* @blk: hardware block
* @enable_mask: enable mask
*/
static void
* @enable_mask: enable mask
*/
static void
* ice_free_prof_masks - free all profile masks for a profile
* @hw: pointer to the HW struct
* @blk: hardware block
* ice_free_prof_masks - free all profile masks for a profile
* @hw: pointer to the HW struct
* @blk: hardware block
*/
static enum ice_status
ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
*/
static enum ice_status
ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
* ice_update_prof_masking - set registers according to masking
* @hw: pointer to the HW struct
* @blk: hardware block
* ice_update_prof_masking - set registers according to masking
* @hw: pointer to the HW struct
* @blk: hardware block
* @es: field vector
* @masks: masks
*/
* @es: field vector
* @masks: masks
*/
void *sect;
/* if the HW segment pointer is null then the first iteration of
void *sect;
/* if the HW segment pointer is null then the first iteration of
- * ice_pkg_enum_section() will fail. In this case the Hw tables will
+ * ice_pkg_enum_section() will fail. In this case the HW tables will
* not be filled and return success.
*/
if (!hw->seg) {
* not be filled and return success.
*/
if (!hw->seg) {
return;
/* if the sum of section size and offset exceed destination size
return;
/* if the sum of section size and offset exceed destination size
- * then we are out of bounds of the Hw table size for that PF.
+ * then we are out of bounds of the HW table size for that PF.
* Changing section length to fill the remaining table space
* of that PF.
*/
* Changing section length to fill the remaining table space
* of that PF.
*/
*
* Reads the current package contents and populates the driver
* database with the data iteratively for all advanced feature
*
* Reads the current package contents and populates the driver
* database with the data iteratively for all advanced feature
- * blocks. Assume that the Hw tables have been allocated.
+ * blocks. Assume that the HW tables have been allocated.
*/
void ice_fill_blk_tbls(struct ice_hw *hw)
{
*/
void ice_fill_blk_tbls(struct ice_hw *hw)
{
/* Vsig bit layout:
* [0:12]: incremental vsig index 1 to ICE_MAX_VSIGS
/* Vsig bit layout:
* [0:12]: incremental vsig index 1 to ICE_MAX_VSIGS
- * [13:15]: pf number of device
+ * [13:15]: PF number of device
*/
#define ICE_VSIG_IDX_M (0x1FFF)
#define ICE_PF_NUM_S 13
*/
#define ICE_VSIG_IDX_M (0x1FFF)
#define ICE_PF_NUM_S 13
* ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
* ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
- * @flags: The value of pkt_flags[x:x] in RX/TX MDID metadata.
+ * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
*
* This function will allocate an extraction sequence entries for a DWORD size
* chunk of the packet flags.
*
* This function will allocate an extraction sequence entries for a DWORD size
* chunk of the packet flags.
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
- * register. Used for determining the itr/intrl granularity during
+ * register. Used for determining the ITR/INTRL granularity during
* initialization.
*/
#define ICE_MAX_AGG_BW_200G 0x0
* initialization.
*/
#define ICE_MAX_AGG_BW_200G 0x0