net/ice/base: add ACL module
authorQi Zhang <qi.z.zhang@intel.com>
Mon, 23 Mar 2020 07:17:48 +0000 (15:17 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 21 Apr 2020 11:57:05 +0000 (13:57 +0200)
Add all ACL related code.

Signed-off-by: Real Valiquette <real.valiquette@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Acked-by: Qiming Yang <qiming.yang@intel.com>
13 files changed:
drivers/net/ice/Makefile
drivers/net/ice/base/ice_acl.c [new file with mode: 0644]
drivers/net/ice/base/ice_acl.h [new file with mode: 0644]
drivers/net/ice/base/ice_acl_ctrl.c [new file with mode: 0644]
drivers/net/ice/base/ice_adminq_cmd.h
drivers/net/ice/base/ice_fdir.c
drivers/net/ice/base/ice_fdir.h
drivers/net/ice/base/ice_flex_pipe.c
drivers/net/ice/base/ice_flow.c
drivers/net/ice/base/ice_flow.h
drivers/net/ice/base/ice_type.h
drivers/net/ice/base/meson.build
drivers/net/ice/ice_fdir_filter.c

index 54a90a8..dc3b557 100644 (file)
@@ -51,6 +51,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_flex_pipe.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_flow.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_dcb.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_acl.c
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_acl_ctrl.c
 
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx.c
diff --git a/drivers/net/ice/base/ice_acl.c b/drivers/net/ice/base/ice_acl.c
new file mode 100644 (file)
index 0000000..26e03aa
--- /dev/null
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020
+ */
+
+#include "ice_acl.h"
+#include "ice_adminq_cmd.h"
+
+/**
+ * ice_aq_alloc_acl_tbl - allocate ACL table
+ * @hw: pointer to the HW struct
+ * @tbl: pointer to ice_acl_alloc_tbl struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Allocate ACL table (indirect 0x0C10)
+ */
+enum ice_status
+ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl,
+                    struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_alloc_table *cmd;
+       struct ice_aq_desc desc;
+
+       if (!tbl->act_pairs_per_entry)
+               return ICE_ERR_PARAM;
+
+       if (tbl->act_pairs_per_entry > ICE_AQC_MAX_ACTION_MEMORIES)
+               return ICE_ERR_MAX_LIMIT;
+
+       /* If this is concurrent table, then buffer shall be valid and
+        * contain DependentAllocIDs, 'num_dependent_alloc_ids' should be valid
+        * and within limit
+        */
+       if (tbl->concurr) {
+               if (!tbl->num_dependent_alloc_ids)
+                       return ICE_ERR_PARAM;
+               if (tbl->num_dependent_alloc_ids >
+                   ICE_AQC_MAX_CONCURRENT_ACL_TBL)
+                       return ICE_ERR_INVAL_SIZE;
+       }
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_tbl);
+       desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+       cmd = &desc.params.alloc_table;
+       cmd->table_width = CPU_TO_LE16(tbl->width * BITS_PER_BYTE);
+       cmd->table_depth = CPU_TO_LE16(tbl->depth);
+       cmd->act_pairs_per_entry = tbl->act_pairs_per_entry;
+       if (tbl->concurr)
+               cmd->table_type = tbl->num_dependent_alloc_ids;
+
+       return ice_aq_send_cmd(hw, &desc, &tbl->buf, sizeof(tbl->buf), cd);
+}
+
+/**
+ * ice_aq_dealloc_acl_tbl - deallocate ACL table
+ * @hw: pointer to the HW struct
+ * @alloc_id: allocation ID of the table being released
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Deallocate ACL table (indirect 0x0C11)
+ *
+ * NOTE: This command has no buffer format for command itself but response
+ * format is 'struct ice_aqc_acl_generic', pass ptr to that struct
+ * as 'buf' and its size as 'buf_size'
+ */
+enum ice_status
+ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id,
+                      struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_tbl_actpair *cmd;
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dealloc_acl_tbl);
+       cmd = &desc.params.tbl_actpair;
+       cmd->alloc_id = CPU_TO_LE16(alloc_id);
+
+       return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
+}
+
+static enum ice_status
+ice_aq_acl_entry(struct ice_hw *hw, u16 opcode, u8 tcam_idx, u16 entry_idx,
+                struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_entry *cmd;
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+
+       if (opcode == ice_aqc_opc_program_acl_entry)
+               desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+       cmd = &desc.params.program_query_entry;
+       cmd->tcam_index = tcam_idx;
+       cmd->entry_index = CPU_TO_LE16(entry_idx);
+
+       return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
+}
+
+/**
+ * ice_aq_program_acl_entry - program ACL entry
+ * @hw: pointer to the HW struct
+ * @tcam_idx: Updated TCAM block index
+ * @entry_idx: updated entry index
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Program ACL entry (direct 0x0C20)
+ */
+enum ice_status
+ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
+                        struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd)
+{
+       return ice_aq_acl_entry(hw, ice_aqc_opc_program_acl_entry, tcam_idx,
+                               entry_idx, buf, cd);
+}
+
+/**
+ * ice_aq_query_acl_entry - query ACL entry
+ * @hw: pointer to the HW struct
+ * @tcam_idx: Updated TCAM block index
+ * @entry_idx: updated entry index
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ *  Query ACL entry (direct 0x0C24)
+ *
+ * NOTE: Caller of this API to parse 'buf' appropriately since it contains
+ * response (key and key invert)
+ */
+enum ice_status
+ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
+                      struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd)
+{
+       return ice_aq_acl_entry(hw, ice_aqc_opc_query_acl_entry, tcam_idx,
+                               entry_idx, buf, cd);
+}
+
+/* Helper function to alloc/dealloc ACL action pair */
+static enum ice_status
+ice_aq_actpair_a_d(struct ice_hw *hw, u16 opcode, u16 alloc_id,
+                  struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_tbl_actpair *cmd;
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+       cmd = &desc.params.tbl_actpair;
+       cmd->alloc_id = CPU_TO_LE16(alloc_id);
+
+       return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
+}
+
+/**
+ * ice_aq_alloc_actpair - allocate actionpair for specified ACL table
+ * @hw: pointer to the HW struct
+ * @alloc_id: allocation ID of the table being associated with the actionpair
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Allocate ACL actionpair (direct 0x0C12)
+ *
+ * This command doesn't need and doesn't have its own command buffer
+ * but for response format is as specified in 'struct ice_aqc_acl_generic'
+ */
+enum ice_status
+ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id,
+                    struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
+{
+       return ice_aq_actpair_a_d(hw, ice_aqc_opc_alloc_acl_actpair, alloc_id,
+                                 buf, cd);
+}
+
+/**
+ * ice_aq_dealloc_actpair - dealloc actionpair for specified ACL table
+ * @hw: pointer to the HW struct
+ * @alloc_id: allocation ID of the table being associated with the actionpair
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ *  Deallocate ACL actionpair (direct 0x0C13)
+ */
+enum ice_status
+ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id,
+                      struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
+{
+       return ice_aq_actpair_a_d(hw, ice_aqc_opc_dealloc_acl_actpair, alloc_id,
+                                 buf, cd);
+}
+
+/* Helper function to program/query ACL action pair */
+static enum ice_status
+ice_aq_actpair_p_q(struct ice_hw *hw, u16 opcode, u8 act_mem_idx,
+                  u16 act_entry_idx, struct ice_aqc_actpair *buf,
+                  struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_actpair *cmd;
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+
+       if (opcode == ice_aqc_opc_program_acl_actpair)
+               desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+       cmd = &desc.params.program_query_actpair;
+       cmd->act_mem_index = act_mem_idx;
+       cmd->act_entry_index = CPU_TO_LE16(act_entry_idx);
+
+       return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
+}
+
+/**
+ * ice_aq_program_actpair - program ACL actionpair
+ * @hw: pointer to the HW struct
+ * @act_mem_idx: action memory index to program/update/query
+ * @act_entry_idx: the entry index in action memory to be programmed/updated
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Program action entries (indirect 0x0C1C)
+ */
+enum ice_status
+ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
+                      struct ice_aqc_actpair *buf, struct ice_sq_cd *cd)
+{
+       return ice_aq_actpair_p_q(hw, ice_aqc_opc_program_acl_actpair,
+                                 act_mem_idx, act_entry_idx, buf, cd);
+}
+
+/**
+ * ice_aq_query_actpair - query ACL actionpair
+ * @hw: pointer to the HW struct
+ * @act_mem_idx: action memory index to program/update/query
+ * @act_entry_idx: the entry index in action memory to be programmed/updated
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Query ACL actionpair (indirect 0x0C25)
+ */
+enum ice_status
+ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
+                    struct ice_aqc_actpair *buf, struct ice_sq_cd *cd)
+{
+       return ice_aq_actpair_p_q(hw, ice_aqc_opc_query_acl_actpair,
+                                 act_mem_idx, act_entry_idx, buf, cd);
+}
+
+/**
+ * ice_aq_dealloc_acl_res - deallocate ACL resources
+ * @hw: pointer to the HW struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * ACL - de-allocate (direct 0x0C1A) resources. Used by SW to release all the
+ * resources allocated for it using a single command
+ */
+enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd)
+{
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dealloc_acl_res);
+
+       return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_acl_prof_aq_send - sending acl profile aq commands
+ * @hw: pointer to the HW struct
+ * @opc: command opcode
+ * @prof_id: profile ID
+ * @buf: ptr to buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function sends ACL profile commands
+ */
+static enum ice_status
+ice_acl_prof_aq_send(struct ice_hw *hw, u16 opc, u8 prof_id,
+                    struct ice_aqc_acl_prof_generic_frmt *buf,
+                    struct ice_sq_cd *cd)
+{
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, opc);
+       desc.params.profile.profile_id = prof_id;
+       if (opc == ice_aqc_opc_program_acl_prof_extraction ||
+           opc == ice_aqc_opc_program_acl_prof_ranges)
+               desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+       return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
+}
+
+/**
+ * ice_prgm_acl_prof_extrt - program ACL profile extraction sequence
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ * @buf: ptr to buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * ACL - program ACL profile extraction (indirect 0x0C1D)
+ */
+enum ice_status
+ice_prgm_acl_prof_extrt(struct ice_hw *hw, u8 prof_id,
+                       struct ice_aqc_acl_prof_generic_frmt *buf,
+                       struct ice_sq_cd *cd)
+{
+       return ice_acl_prof_aq_send(hw, ice_aqc_opc_program_acl_prof_extraction,
+                                   prof_id, buf, cd);
+}
+
+/**
+ * ice_query_acl_prof - query ACL profile
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ * @buf: ptr to buffer (which will contain response of this command)
+ * @cd: pointer to command details structure or NULL
+ *
+ * ACL - query ACL profile (indirect 0x0C21)
+ */
+enum ice_status
+ice_query_acl_prof(struct ice_hw *hw, u8 prof_id,
+                  struct ice_aqc_acl_prof_generic_frmt *buf,
+                  struct ice_sq_cd *cd)
+{
+       return ice_acl_prof_aq_send(hw, ice_aqc_opc_query_acl_prof, prof_id,
+                                   buf, cd);
+}
+
+/**
+ * ice_aq_acl_cntrs_chk_params - Checks ACL counter parameters
+ * @cntrs: ptr to buffer describing input and output params
+ *
+ * This function checks the counter bank range for counter type and returns
+ * success or failure.
+ */
+static enum ice_status ice_aq_acl_cntrs_chk_params(struct ice_acl_cntrs *cntrs)
+{
+       enum ice_status status = ICE_SUCCESS;
+
+       if (!cntrs || !cntrs->amount)
+               return ICE_ERR_PARAM;
+
+       switch (cntrs->type) {
+       case ICE_AQC_ACL_CNT_TYPE_SINGLE:
+               /* Single counter type - configured to count either bytes
+                * or packets, the valid values for byte or packet counters
+                * shall be 0-3.
+                */
+               if (cntrs->bank > ICE_AQC_ACL_MAX_CNT_SINGLE)
+                       status = ICE_ERR_OUT_OF_RANGE;
+               break;
+       case ICE_AQC_ACL_CNT_TYPE_DUAL:
+               /* Pair counter type - counts number of bytes and packets
+                * The valid values for byte/packet counter duals shall be 0-1
+                */
+               if (cntrs->bank > ICE_AQC_ACL_MAX_CNT_DUAL)
+                       status = ICE_ERR_OUT_OF_RANGE;
+               break;
+       default:
+               /* Unspecified counter type - Invalid or error*/
+               status = ICE_ERR_PARAM;
+       }
+
+       return status;
+}
+
+/**
+ * ice_aq_alloc_acl_cntrs - allocate ACL counters
+ * @hw: pointer to the HW struct
+ * @cntrs: ptr to buffer describing input and output params
+ * @cd: pointer to command details structure or NULL
+ *
+ * ACL - allocate (indirect 0x0C16) counters. This function attempts to
+ * allocate a contiguous block of counters. In case of failures, caller can
+ * attempt to allocate a smaller chunk. The allocation is considered
+ * unsuccessful if returned counter value is invalid. In this case it returns
+ * an error otherwise success.
+ */
+enum ice_status
+ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
+                      struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_alloc_counters *cmd;
+       u16 first_cntr, last_cntr;
+       struct ice_aq_desc desc;
+       enum ice_status status;
+
+       /* check for invalid params */
+       status = ice_aq_acl_cntrs_chk_params(cntrs);
+       if (status)
+               return status;
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_counters);
+       cmd = &desc.params.alloc_counters;
+       cmd->counter_amount = cntrs->amount;
+       cmd->counters_type = cntrs->type;
+       cmd->bank_alloc = cntrs->bank;
+       status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+       if (!status) {
+               first_cntr = LE16_TO_CPU(cmd->ops.resp.first_counter);
+               last_cntr = LE16_TO_CPU(cmd->ops.resp.last_counter);
+               if (first_cntr == ICE_AQC_ACL_ALLOC_CNT_INVAL ||
+                   last_cntr == ICE_AQC_ACL_ALLOC_CNT_INVAL)
+                       return ICE_ERR_OUT_OF_RANGE;
+               cntrs->first_cntr = first_cntr;
+               cntrs->last_cntr = last_cntr;
+       }
+       return status;
+}
+
+/**
+ * ice_aq_dealloc_acl_cntrs - deallocate ACL counters
+ * @hw: pointer to the HW struct
+ * @cntrs: ptr to buffer describing input and output params
+ * @cd: pointer to command details structure or NULL
+ *
+ * ACL - de-allocate (direct 0x0C17) counters.
+ * This function deallocate ACL counters.
+ */
+enum ice_status
+ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
+                        struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_dealloc_counters *cmd;
+       struct ice_aq_desc desc;
+       enum ice_status status;
+
+       /* check for invalid params */
+       status = ice_aq_acl_cntrs_chk_params(cntrs);
+       if (status)
+               return status;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dealloc_acl_counters);
+       cmd = &desc.params.dealloc_counters;
+       cmd->first_counter = CPU_TO_LE16(cntrs->first_cntr);
+       cmd->last_counter = CPU_TO_LE16(cntrs->last_cntr);
+       cmd->counters_type = cntrs->type;
+       cmd->bank_alloc = cntrs->bank;
+       return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_query_acl_cntrs - query ACL counter
+ * @hw: pointer to the HW struct
+ * @bank: queries counter bank
+ * @index: queried counter index
+ * @cntr_val: pointer to counter or packet counter value
+ * @cd: pointer to command details structure or NULL
+ *
+ * ACL - query ACL counter (direct 0x0C27)
+ */
+enum ice_status
+ice_aq_query_acl_cntrs(struct ice_hw *hw, u8 bank, u16 index, u64 *cntr_val,
+                      struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_query_counter *cmd;
+       struct ice_aq_desc desc;
+       enum ice_status status;
+
+       if (!cntr_val)
+               return ICE_ERR_PARAM;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_acl_counter);
+       cmd = &desc.params.query_counter;
+       cmd->counter_index = CPU_TO_LE16(index);
+       cmd->counter_bank = bank;
+       status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+       if (!status) {
+               __le64 resp_val = 0;
+
+               ice_memcpy(&resp_val, cmd->ops.resp.val,
+                          sizeof(cmd->ops.resp.val), ICE_NONDMA_TO_NONDMA);
+               *cntr_val = LE64_TO_CPU(resp_val);
+       }
+       return status;
+}
+
+/**
+ * ice_prog_acl_prof_ranges - program ACL profile ranges
+ * @hw: pointer to the HW struct
+ * @prof_id: programmed or updated profile ID
+ * @buf: pointer to input buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * ACL - program ACL profile ranges (indirect 0x0C1E)
+ */
+enum ice_status
+ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id,
+                        struct ice_aqc_acl_profile_ranges *buf,
+                        struct ice_sq_cd *cd)
+{
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc,
+                                     ice_aqc_opc_program_acl_prof_ranges);
+       desc.params.profile.profile_id = prof_id;
+       desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+       return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
+}
+
+/**
+ * ice_query_acl_prof_ranges - query ACL profile ranges
+ * @hw: pointer to the HW struct
+ * @prof_id: programmed or updated profile ID
+ * @buf: pointer to response buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * ACL - query ACL profile ranges (indirect 0x0C22)
+ */
+enum ice_status
+ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id,
+                         struct ice_aqc_acl_profile_ranges *buf,
+                         struct ice_sq_cd *cd)
+{
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc,
+                                     ice_aqc_opc_query_acl_prof_ranges);
+       desc.params.profile.profile_id = prof_id;
+       return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
+}
+
+/**
+ * ice_aq_alloc_acl_scen - allocate ACL scenario
+ * @hw: pointer to the HW struct
+ * @scen_id: memory location to receive allocated scenario ID
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Allocate ACL scenario (indirect 0x0C14)
+ */
+enum ice_status
+ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id,
+                     struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_alloc_scen *cmd;
+       struct ice_aq_desc desc;
+       enum ice_status status;
+
+       if (!scen_id)
+               return ICE_ERR_PARAM;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_scen);
+       desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+       cmd = &desc.params.alloc_scen;
+
+       status = ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
+       if (!status)
+               *scen_id = LE16_TO_CPU(cmd->ops.resp.scen_id);
+
+       return status;
+}
+
+/**
+ * ice_aq_dealloc_acl_scen - deallocate ACL scenario
+ * @hw: pointer to the HW struct
+ * @scen_id: scen_id to be deallocated (input and output field)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Deallocate ACL scenario (direct 0x0C15)
+ */
+enum ice_status
+ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_dealloc_scen *cmd;
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dealloc_acl_scen);
+       cmd = &desc.params.dealloc_scen;
+       cmd->scen_id = CPU_TO_LE16(scen_id);
+
+       return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_update_query_scen - update or query ACL scenario
+ * @hw: pointer to the HW struct
+ * @opcode: aq command opcode for either query or update scenario
+ * @scen_id: scen_id to be updated or queried
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Calls update or query ACL scenario
+ */
+static enum ice_status
+ice_aq_update_query_scen(struct ice_hw *hw, u16 opcode, u16 scen_id,
+                        struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd)
+{
+       struct ice_aqc_acl_update_query_scen *cmd;
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+       if (opcode == ice_aqc_opc_update_acl_scen)
+               desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+       cmd = &desc.params.update_query_scen;
+       cmd->scen_id = CPU_TO_LE16(scen_id);
+
+       return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
+}
+
+/**
+ * ice_aq_update_acl_scen - update ACL scenario
+ * @hw: pointer to the HW struct
+ * @scen_id: scen_id to be updated
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update ACL scenario (indirect 0x0C1B)
+ */
+enum ice_status
+ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id,
+                      struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd)
+{
+       return ice_aq_update_query_scen(hw, ice_aqc_opc_update_acl_scen,
+                                       scen_id, buf, cd);
+}
+
+/**
+ * ice_aq_query_acl_scen - query ACL scenario
+ * @hw: pointer to the HW struct
+ * @scen_id: scen_id to be queried
+ * @buf: address of indirect data buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Query ACL scenario (indirect 0x0C23)
+ */
+enum ice_status
+ice_aq_query_acl_scen(struct ice_hw *hw, u16 scen_id,
+                     struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd)
+{
+       return ice_aq_update_query_scen(hw, ice_aqc_opc_query_acl_scen,
+                                       scen_id, buf, cd);
+}
diff --git a/drivers/net/ice/base/ice_acl.h b/drivers/net/ice/base/ice_acl.h
new file mode 100644 (file)
index 0000000..0029630
--- /dev/null
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020
+ */
+
+#ifndef _ICE_ACL_H_
+#define _ICE_ACL_H_
+
+#include "ice_common.h"
+#include "ice_adminq_cmd.h"
+
+struct ice_acl_tbl_params {
+       u16 width;      /* Select/match bytes */
+       u16 depth;      /* Number of entries */
+
+#define ICE_ACL_TBL_MAX_DEP_TBLS       15
+       u16 dep_tbls[ICE_ACL_TBL_MAX_DEP_TBLS];
+
+       u8 entry_act_pairs;     /* Action pairs per entry */
+       u8 concurr;             /* Concurrent table lookup enable */
+};
+
+struct ice_acl_act_mem {
+       u8 act_mem;
+#define ICE_ACL_ACT_PAIR_MEM_INVAL     0xff
+       u8 member_of_tcam;
+};
+
+struct ice_acl_tbl {
+       /* TCAM configuration */
+       u8 first_tcam;  /* Index of the first TCAM block */
+       u8 last_tcam;   /* Index of the last TCAM block */
+       /* Index of the first entry in the first TCAM */
+       u16 first_entry;
+       /* Index of the last entry in the last TCAM */
+       u16 last_entry;
+
+       /* List of active scenarios */
+       struct LIST_HEAD_TYPE scens;
+
+       struct ice_acl_tbl_params info;
+       struct ice_acl_act_mem act_mems[ICE_AQC_MAX_ACTION_MEMORIES];
+
+       /* Keep track of available 64-entry chunks in TCAMs */
+       ice_declare_bitmap(avail, ICE_AQC_ACL_ALLOC_UNITS);
+
+       u16 id;
+};
+
+#define ICE_MAX_ACL_TCAM_ENTRY (ICE_AQC_ACL_TCAM_DEPTH * ICE_AQC_ACL_SLICES)
+enum ice_acl_entry_prior {
+       ICE_LOW = 0,
+       ICE_NORMAL,
+       ICE_HIGH,
+       ICE_MAX_PRIOR
+};
+
+/* Scenario structure
+ * A scenario is a logical partition within an ACL table. It can span more
+ * than one TCAM in cascade mode to support select/mask key widths larger.
+ * than the width of a TCAM. It can also span more than one TCAM in stacked
+ * mode to support larger number of entries than what a TCAM can hold. It is
+ * used to select values from selection bases (field vectors holding extract
+ * protocol header fields) to form lookup keys, and to associate action memory
+ * banks to the TCAMs used.
+ */
+struct ice_acl_scen {
+       struct LIST_ENTRY_TYPE list_entry;
+       /* If nth bit of act_mem_bitmap is set, then nth action memory will
+        * participate in this scenario
+        */
+       ice_declare_bitmap(act_mem_bitmap, ICE_AQC_MAX_ACTION_MEMORIES);
+
+       /* If nth bit of entry_bitmap is set, then nth entry will
+        * be available in this scenario
+        */
+       ice_declare_bitmap(entry_bitmap, ICE_MAX_ACL_TCAM_ENTRY);
+       u16 first_idx[ICE_MAX_PRIOR];
+       u16 last_idx[ICE_MAX_PRIOR];
+
+       u16 id;
+       u16 start;      /* Number of entry from the start of the parent table */
+#define ICE_ACL_SCEN_MIN_WIDTH 0x3
+       u16 width;      /* Number of select/mask bytes */
+       u16 num_entry;  /* Number of scenario entry */
+       u16 end;        /* Last addressable entry from start of table */
+       u8 eff_width;   /* Available width in bytes to match */
+#define ICE_ACL_SCEN_PKT_DIR_IDX_IN_TCAM       0x2
+#define ICE_ACL_SCEN_PID_IDX_IN_TCAM           0x3
+#define ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM       0x4
+       u8 pid_idx;     /* Byte index used to match profile ID */
+       u8 rng_chk_idx; /* Byte index used to match range checkers result */
+       u8 pkt_dir_idx; /* Byte index used to match packet direction */
+};
+
+/* This structure represents input fields needed to allocate ACL table */
+struct ice_acl_alloc_tbl {
+       /* Table's width in number of bytes matched */
+       u16 width;
+       /* Table's depth in number of entries. */
+       u16 depth;
+       u8 num_dependent_alloc_ids;     /* number of depdendent alloc IDs */
+       u8 concurr;                     /* true for concurrent table type */
+
+       /* Amount of action pairs per table entry. Minimal valid
+        * value for this field is 1 (e.g. single pair of actions)
+        */
+       u8 act_pairs_per_entry;
+       union {
+               struct ice_aqc_acl_alloc_table_data data_buf;
+               struct ice_aqc_acl_generic resp_buf;
+       } buf;
+};
+
+/* This structure is used to communicate input and output params for
+ * [de]allocate_acl_counters
+ */
+struct ice_acl_cntrs {
+       u8 amount;
+       u8 type;
+       u8 bank;
+
+       /* Next 2 variables are used for output in case of alloc_acl_counters
+        * and input in case of deallocate_acl_counters
+        */
+       u16 first_cntr;
+       u16 last_cntr;
+};
+
+enum ice_status
+ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params);
+enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw);
+enum ice_status
+ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries,
+                   u16 *scen_id);
+enum ice_status ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id);
+enum ice_status
+ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl,
+                    struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id,
+                      struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
+                        struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
+                      struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id,
+                    struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id,
+                      struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
+                      struct ice_aqc_actpair *buf, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
+                    struct ice_aqc_actpair *buf, struct ice_sq_cd *cd);
+enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd);
+enum ice_status
+ice_prgm_acl_prof_extrt(struct ice_hw *hw, u8 prof_id,
+                       struct ice_aqc_acl_prof_generic_frmt *buf,
+                       struct ice_sq_cd *cd);
+enum ice_status
+ice_query_acl_prof(struct ice_hw *hw, u8 prof_id,
+                  struct ice_aqc_acl_prof_generic_frmt *buf,
+                  struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
+                      struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs,
+                        struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_query_acl_cntrs(struct ice_hw *hw, u8 bank, u16 index, u64 *cntr_val,
+                      struct ice_sq_cd *cd);
+enum ice_status
+ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id,
+                        struct ice_aqc_acl_profile_ranges *buf,
+                        struct ice_sq_cd *cd);
+enum ice_status
+ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id,
+                         struct ice_aqc_acl_profile_ranges *buf,
+                         struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id,
+                     struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id,
+                      struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_query_acl_scen(struct ice_hw *hw, u16 scen_id,
+                     struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd);
+enum ice_status
+ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
+                 enum ice_acl_entry_prior prior, u8 *keys, u8 *inverts,
+                 struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx);
+enum ice_status
+ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
+                struct ice_acl_act_entry *acts, u8 acts_cnt, u16 entry_idx);
+enum ice_status
+ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx);
+#endif /* _ICE_ACL_H_ */
diff --git a/drivers/net/ice/base/ice_acl_ctrl.c b/drivers/net/ice/base/ice_acl_ctrl.c
new file mode 100644 (file)
index 0000000..7dfe0ed
--- /dev/null
@@ -0,0 +1,1185 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020
+ */
+
+#include "ice_acl.h"
+#include "ice_flow.h"
+
+/* Determine the TCAM index of entry 'e' within the ACL table */
+#define ICE_ACL_TBL_TCAM_IDX(e) ((e) / ICE_AQC_ACL_TCAM_DEPTH)
+
+/* Determine the entry index within the TCAM */
+#define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((e) % ICE_AQC_ACL_TCAM_DEPTH)
+
+#define ICE_ACL_SCEN_ENTRY_INVAL 0xFFFF
+/**
+ * ice_acl_init_entry
+ * @scen: pointer to the scenario struct
+ *
+ * Initialize the scenario control structure.
+ */
+static void ice_acl_init_entry(struct ice_acl_scen *scen)
+{
+       /**
+        * low priority: start from the highest index, 25% of total entries
+        * normal priority: start from the highest index, 50% of total entries
+        * high priority: start from the lowest index, 25% of total entries
+        */
+       scen->first_idx[ICE_LOW] = scen->num_entry - 1;
+       scen->first_idx[ICE_NORMAL] = scen->num_entry - scen->num_entry / 4 - 1;
+       scen->first_idx[ICE_HIGH] = 0;
+
+       scen->last_idx[ICE_LOW] = scen->num_entry - scen->num_entry / 4;
+       scen->last_idx[ICE_NORMAL] = scen->num_entry / 4;
+       scen->last_idx[ICE_HIGH] = scen->num_entry / 4 - 1;
+}
+
+/**
+ * ice_acl_scen_assign_entry_idx
+ * @scen: pointer to the scenario struct
+ * @prior: the priority of the flow entry being allocated
+ *
+ * To find the index of an available entry in scenario
+ *
+ * Returns ICE_ACL_SCEN_ENTRY_INVAL if fails
+ * Returns index on success
+ */
+static u16 ice_acl_scen_assign_entry_idx(struct ice_acl_scen *scen,
+                                        enum ice_acl_entry_prior prior)
+{
+       u16 first_idx, last_idx, i;
+       s8 step;
+
+       if (prior >= ICE_MAX_PRIOR)
+               return ICE_ACL_SCEN_ENTRY_INVAL;
+
+       first_idx = scen->first_idx[prior];
+       last_idx = scen->last_idx[prior];
+       step = first_idx <= last_idx ? 1 : -1;
+
+       for (i = first_idx; i != last_idx + step; i += step)
+               if (!ice_test_and_set_bit(i, scen->entry_bitmap))
+                       return i;
+
+       return ICE_ACL_SCEN_ENTRY_INVAL;
+}
+
+/**
+ * ice_acl_scen_free_entry_idx
+ * @scen: pointer to the scenario struct
+ * @idx: the index of the flow entry being de-allocated
+ *
+ * To mark an entry available in scenario
+ */
+static enum ice_status
+ice_acl_scen_free_entry_idx(struct ice_acl_scen *scen, u16 idx)
+{
+       if (idx >= scen->num_entry)
+               return ICE_ERR_MAX_LIMIT;
+
+       if (!ice_test_and_clear_bit(idx, scen->entry_bitmap))
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_acl_tbl_calc_end_idx
+ * @start: start index of the TCAM entry of this partition
+ * @num_entries: number of entries in this partition
+ * @width: width of a partition in number of TCAMs
+ *
+ * Calculate the end entry index for a partition with starting entry index
+ * 'start', entries 'num_entries', and width 'width'.
+ */
+static u16 ice_acl_tbl_calc_end_idx(u16 start, u16 num_entries, u16 width)
+{
+       u16 end_idx, add_entries = 0;
+
+       end_idx = start + (num_entries - 1);
+
+       /* In case that our ACL partition requires cascading TCAMs */
+       if (width > 1) {
+               u16 num_stack_level;
+
+               /* Figure out the TCAM stacked level in this ACL scenario */
+               num_stack_level = (start % ICE_AQC_ACL_TCAM_DEPTH) +
+                       num_entries;
+               num_stack_level = DIVIDE_AND_ROUND_UP(num_stack_level,
+                                                     ICE_AQC_ACL_TCAM_DEPTH);
+
+               /* In this case, each entries in our ACL partition span
+                * multiple TCAMs. Thus, we will need to add
+                * ((width - 1) * num_stack_level) TCAM's entries to
+                * end_idx.
+                *
+                * For example : In our case, our scenario is 2x2:
+                *      [TCAM 0]        [TCAM 1]
+                *      [TCAM 2]        [TCAM 3]
+                * Assuming that a TCAM will have 512 entries. If "start"
+                * is 500, "num_entries" is 3 and "width" = 2, then end_idx
+                * should be 1024 (belongs to TCAM 2).
+                * Before going to this if statement, end_idx will have the
+                * value of 512. If "width" is 1, then the final value of
+                * end_idx is 512. However, in our case, width is 2, then we
+                * will need add (2 - 1) * 1 * 512. As result, end_idx will
+                * have the value of 1024.
+                */
+               add_entries = (width - 1) * num_stack_level *
+                       ICE_AQC_ACL_TCAM_DEPTH;
+       }
+
+       return end_idx + add_entries;
+}
+
+/**
+ * ice_acl_init_tbl
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the ACL table by invalidating TCAM entries and action pairs.
+ */
+static enum ice_status ice_acl_init_tbl(struct ice_hw *hw)
+{
+       struct ice_aqc_actpair act_buf;
+       struct ice_aqc_acl_data buf;
+       enum ice_status status = ICE_SUCCESS;
+       struct ice_acl_tbl *tbl;
+       u8 tcam_idx, i;
+       u16 idx;
+
+       tbl = hw->acl_tbl;
+       if (!tbl) {
+               status = ICE_ERR_CFG;
+               return status;
+       }
+
+       ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
+       ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
+
+       tcam_idx = tbl->first_tcam;
+       idx = tbl->first_entry;
+       while (tcam_idx < tbl->last_tcam ||
+              (tcam_idx == tbl->last_tcam && idx <= tbl->last_entry)) {
+               /* Use the same value for entry_key and entry_key_inv since
+                * we are initializing the fields to 0
+                */
+               status = ice_aq_program_acl_entry(hw, tcam_idx, idx, &buf,
+                                                 NULL);
+               if (status)
+                       return status;
+
+               if (++idx > tbl->last_entry) {
+                       tcam_idx++;
+                       idx = tbl->first_entry;
+               }
+       }
+
+       for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++) {
+               u16 act_entry_idx, start, end;
+
+               if (tbl->act_mems[i].act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL)
+                       continue;
+
+               start = tbl->first_entry;
+               end = tbl->last_entry;
+
+               for (act_entry_idx = start; act_entry_idx <= end;
+                    act_entry_idx++) {
+                       /* Invalidate all allocated action pairs */
+                       status = ice_aq_program_actpair(hw, i, act_entry_idx,
+                                                       &act_buf, NULL);
+                       if (status)
+                               return status;
+               }
+       }
+
+       return status;
+}
+
+/**
+ * ice_acl_assign_act_mems_to_tcam
+ * @tbl: pointer to acl table structure
+ * @cur_tcam: Index of current TCAM. Value = 0 to (ICE_AQC_ACL_SLICES - 1)
+ * @cur_mem_idx: Index of current action memory bank. Value = 0 to
+ *              (ICE_AQC_MAX_ACTION_MEMORIES - 1)
+ * @num_mem: Number of action memory banks for this TCAM
+ *
+ * Assign "num_mem" valid action memory banks from "curr_mem_idx" to
+ * "curr_tcam" TCAM.
+ */
+static void
+ice_acl_assign_act_mems_to_tcam(struct ice_acl_tbl *tbl, u8 cur_tcam,
+                               u8 *cur_mem_idx, u8 num_mem)
+{
+       u8 mem_cnt;
+
+       for (mem_cnt = 0;
+            *cur_mem_idx < ICE_AQC_MAX_ACTION_MEMORIES && mem_cnt < num_mem;
+            (*cur_mem_idx)++) {
+               struct ice_acl_act_mem *p_mem = &tbl->act_mems[*cur_mem_idx];
+
+               if (p_mem->act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL)
+                       continue;
+
+               p_mem->member_of_tcam = cur_tcam;
+
+               mem_cnt++;
+       }
+}
+
+/**
+ * ice_acl_divide_act_mems_to_tcams
+ * @tbl: pointer to acl table structure
+ *
+ * Figure out how to divide given action memory banks to given TCAMs. This
+ * division is for SW book keeping. In the time when scenario is created,
+ * an action memory bank can be used for different TCAM.
+ *
+ * For example, given that we have 2x2 ACL table with each table entry has
+ * 2 action memory pairs. As the result, we will have 4 TCAMs (T1,T2,T3,T4)
+ * and 4 action memory banks (A1,A2,A3,A4)
+ *     [T1 - T2] { A1 - A2 }
+ *     [T3 - T4] { A3 - A4 }
+ * In the time when we need to create a scenario, for example, 2x1 scenario,
+ * we will use [T3,T4] in a cascaded layout. As it is a requirement that all
+ * action memory banks in a cascaded TCAM's row will need to associate with
+ * the last TCAM. Thus, we will associate action memory banks [A3] and [A4]
+ * for TCAM [T4].
+ * For SW book-keeping purpose, we will keep theoretical maps between TCAM
+ * [Tn] to action memory bank [An].
+ */
+static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl)
+{
+       u16 num_cscd, stack_level, stack_idx, min_act_mem;
+       u8 tcam_idx = tbl->first_tcam;
+       u16 max_idx_to_get_extra;
+       u8 mem_idx = 0;
+
+       /* Determine number of stacked TCAMs */
+       stack_level = DIVIDE_AND_ROUND_UP(tbl->info.depth,
+                                         ICE_AQC_ACL_TCAM_DEPTH);
+
+       /* Determine number of cascaded TCAMs */
+       num_cscd = DIVIDE_AND_ROUND_UP(tbl->info.width,
+                                      ICE_AQC_ACL_KEY_WIDTH_BYTES);
+
+       /* In a line of cascaded TCAM, given the number of action memory
+        * banks per ACL table entry, we want to fairly divide these action
+        * memory banks between these TCAMs.
+        *
+        * For example, there are 3 TCAMs (TCAM 3,4,5) in a line of
+        * cascaded TCAM, and there are 7 act_mems for each ACL table entry.
+        * The result is:
+        *      [TCAM_3 will have 3 act_mems]
+        *      [TCAM_4 will have 2 act_mems]
+        *      [TCAM_5 will have 2 act_mems]
+        */
+       min_act_mem = tbl->info.entry_act_pairs / num_cscd;
+       max_idx_to_get_extra = tbl->info.entry_act_pairs % num_cscd;
+
+       for (stack_idx = 0; stack_idx < stack_level; stack_idx++) {
+               u16 i;
+
+               for (i = 0; i < num_cscd; i++) {
+                       u8 total_act_mem = min_act_mem;
+
+                       if (i < max_idx_to_get_extra)
+                               total_act_mem++;
+
+                       ice_acl_assign_act_mems_to_tcam(tbl, tcam_idx,
+                                                       &mem_idx,
+                                                       total_act_mem);
+
+                       tcam_idx++;
+               }
+       }
+}
+
+/**
+ * ice_acl_create_tbl
+ * @hw: pointer to the HW struct
+ * @params: parameters for the table to be created
+ *
+ * Create a LEM table for ACL usage. We are currently starting with some fixed
+ * values for the size of the table, but this will need to grow as more flow
+ * entries are added by the user level.
+ */
+enum ice_status
+ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params)
+{
+       u16 width, depth, first_e, last_e, i;
+       struct ice_aqc_acl_generic *resp_buf;
+       struct ice_acl_alloc_tbl tbl_alloc;
+       struct ice_acl_tbl *tbl;
+       enum ice_status status;
+
+       if (hw->acl_tbl)
+               return ICE_ERR_ALREADY_EXISTS;
+
+       if (!params)
+               return ICE_ERR_PARAM;
+
+       /* round up the width to the next TCAM width boundary. */
+       width = ROUND_UP(params->width, (u16)ICE_AQC_ACL_KEY_WIDTH_BYTES);
+       /* depth should be provided in chunk (64 entry) increments */
+       depth = ICE_ALIGN(params->depth, ICE_ACL_ENTRY_ALLOC_UNIT);
+
+       if (params->entry_act_pairs < width / ICE_AQC_ACL_KEY_WIDTH_BYTES) {
+               params->entry_act_pairs = width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
+
+               if (params->entry_act_pairs > ICE_AQC_TBL_MAX_ACTION_PAIRS)
+                       params->entry_act_pairs = ICE_AQC_TBL_MAX_ACTION_PAIRS;
+       }
+
+       /* Validate that width*depth will not exceed the TCAM limit */
+       if ((DIVIDE_AND_ROUND_UP(depth, ICE_AQC_ACL_TCAM_DEPTH) *
+            (width / ICE_AQC_ACL_KEY_WIDTH_BYTES)) > ICE_AQC_ACL_SLICES)
+               return ICE_ERR_MAX_LIMIT;
+
+       ice_memset(&tbl_alloc, 0, sizeof(tbl_alloc), ICE_NONDMA_MEM);
+       tbl_alloc.width = width;
+       tbl_alloc.depth = depth;
+       tbl_alloc.act_pairs_per_entry = params->entry_act_pairs;
+       tbl_alloc.concurr = params->concurr;
+       /* Set dependent_alloc_id only for concurrent table type */
+       if (params->concurr) {
+               tbl_alloc.num_dependent_alloc_ids =
+                       ICE_AQC_MAX_CONCURRENT_ACL_TBL;
+
+               for (i = 0; i < ICE_AQC_MAX_CONCURRENT_ACL_TBL; i++)
+                       tbl_alloc.buf.data_buf.alloc_ids[i] =
+                               CPU_TO_LE16(params->dep_tbls[i]);
+       }
+
+       /* call the aq command to create the ACL table with these values */
+       status = ice_aq_alloc_acl_tbl(hw, &tbl_alloc, NULL);
+
+       if (status) {
+               if (LE16_TO_CPU(tbl_alloc.buf.resp_buf.alloc_id) <
+                   ICE_AQC_ALLOC_ID_LESS_THAN_4K)
+                       ice_debug(hw, ICE_DBG_ACL,
+                                 "Alloc ACL table failed. Unavailable resource.\n");
+               else
+                       ice_debug(hw, ICE_DBG_ACL,
+                                 "AQ allocation of ACL failed with error. status: %d\n",
+                                  status);
+               return status;
+       }
+
+       tbl = (struct ice_acl_tbl *)ice_malloc(hw, sizeof(*tbl));
+       if (!tbl) {
+               status = ICE_ERR_NO_MEMORY;
+
+               goto out;
+       }
+
+       resp_buf = &tbl_alloc.buf.resp_buf;
+
+       /* Retrieve information of the allocated table */
+       tbl->id = LE16_TO_CPU(resp_buf->alloc_id);
+       tbl->first_tcam = resp_buf->ops.table.first_tcam;
+       tbl->last_tcam = resp_buf->ops.table.last_tcam;
+       tbl->first_entry = LE16_TO_CPU(resp_buf->first_entry);
+       tbl->last_entry = LE16_TO_CPU(resp_buf->last_entry);
+
+       tbl->info = *params;
+       tbl->info.width = width;
+       tbl->info.depth = depth;
+       hw->acl_tbl = tbl;
+
+       for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++)
+               tbl->act_mems[i].act_mem = resp_buf->act_mem[i];
+
+       /* Figure out which TCAMs that these newly allocated action memories
+        * belong to.
+        */
+       ice_acl_divide_act_mems_to_tcams(tbl);
+
+       /* Initialize the resources allocated by invalidating all TCAM entries
+        * and all the action pairs
+        */
+       status = ice_acl_init_tbl(hw);
+       if (status) {
+               ice_free(hw, tbl);
+               hw->acl_tbl = NULL;
+               ice_debug(hw, ICE_DBG_ACL,
+                         "Initialization of TCAM entries failed. status: %d\n",
+                         status);
+               goto out;
+       }
+
+       first_e = (tbl->first_tcam * ICE_AQC_MAX_TCAM_ALLOC_UNITS) +
+               (tbl->first_entry / ICE_ACL_ENTRY_ALLOC_UNIT);
+       last_e = (tbl->last_tcam * ICE_AQC_MAX_TCAM_ALLOC_UNITS) +
+               (tbl->last_entry / ICE_ACL_ENTRY_ALLOC_UNIT);
+
+       /* Indicate available entries in the table */
+       for (i = first_e; i <= last_e; i++)
+               ice_set_bit(i, tbl->avail);
+
+       INIT_LIST_HEAD(&tbl->scens);
+out:
+
+       return status;
+}
+
+/**
+ * ice_acl_alloc_partition - Allocate a partition from the ACL table
+ * @hw: pointer to the hardware structure
+ * @req: info of partition being allocated
+ */
+static enum ice_status
+ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req)
+{
+       u16 start = 0, cnt = 0, off = 0;
+       u16 width, r_entries, row;
+       bool done = false;
+       int dir;
+
+       /* Determine the number of TCAMs each entry overlaps */
+       width = DIVIDE_AND_ROUND_UP(req->width, ICE_AQC_ACL_KEY_WIDTH_BYTES);
+
+       /* Check if we have enough TCAMs to accommodate the width */
+       if (width > hw->acl_tbl->last_tcam - hw->acl_tbl->first_tcam + 1)
+               return ICE_ERR_MAX_LIMIT;
+
+       /* Number of entries must be multiple of ICE_ACL_ENTRY_ALLOC_UNIT's */
+       r_entries = ICE_ALIGN(req->num_entry, ICE_ACL_ENTRY_ALLOC_UNIT);
+
+       /* To look for an available partition that can accommodate the request,
+        * the process first logically arranges available TCAMs in rows such
+        * that each row produces entries with the requested width. It then
+        * scans the TCAMs' available bitmap, one bit at a time, and
+        * accumulates contiguous available 64-entry chunks until there are
+        * enough of them or when all TCAM configurations have been checked.
+        *
+        * For width of 1 TCAM, the scanning process starts from the top most
+        * TCAM, and goes downward. Available bitmaps are examined from LSB
+        * to MSB.
+        *
+        * For width of multiple TCAMs, the process starts from the bottom-most
+        * row of TCAMs, and goes upward. Available bitmaps are examined from
+        * the MSB to the LSB.
+        *
+        * To make sure that adjacent TCAMs can be logically arranged in the
+        * same row, the scanning process may have multiple passes. In each
+        * pass, the first TCAM of the bottom-most row is displaced by one
+        * additional TCAM. The width of the row and the number of the TCAMs
+        * available determine the number of passes. When the displacement is
+        * more than the size of width, the TCAM row configurations will
+        * repeat. The process will terminate when the configurations repeat.
+        *
+        * Available partitions can span more than one row of TCAMs.
+        */
+       if (width == 1) {
+               row = hw->acl_tbl->first_tcam;
+               dir = 1;
+       } else {
+               /* Start with the bottom-most row, and scan for available
+                * entries upward
+                */
+               row = hw->acl_tbl->last_tcam + 1 - width;
+               dir = -1;
+       }
+
+       do {
+               u16 i;
+
+               /* Scan all 64-entry chunks, one chunk at a time, in the
+                * current TCAM row
+                */
+               for (i = 0;
+                    i < ICE_AQC_MAX_TCAM_ALLOC_UNITS && cnt < r_entries;
+                    i++) {
+                       bool avail = true;
+                       u16 w, p;
+
+                       /* Compute the cumulative available mask across the
+                        * TCAM row to determine if the current 64-entry chunk
+                        * is available.
+                        */
+                       p = dir > 0 ? i : ICE_AQC_MAX_TCAM_ALLOC_UNITS - i - 1;
+                       for (w = row; w < row + width && avail; w++) {
+                               u16 b;
+
+                               b = (w * ICE_AQC_MAX_TCAM_ALLOC_UNITS) + p;
+                               avail &= ice_is_bit_set(hw->acl_tbl->avail, b);
+                       }
+
+                       if (!avail) {
+                               cnt = 0;
+                       } else {
+                               /* Compute the starting index of the newly
+                                * found partition. When 'dir' is negative, the
+                                * scan processes is going upward. If so, the
+                                * starting index needs to be updated for every
+                                * available 64-entry chunk found.
+                                */
+                               if (!cnt || dir < 0)
+                                       start = (row * ICE_AQC_ACL_TCAM_DEPTH) +
+                                               (p * ICE_ACL_ENTRY_ALLOC_UNIT);
+                               cnt += ICE_ACL_ENTRY_ALLOC_UNIT;
+                       }
+               }
+
+               if (cnt >= r_entries) {
+                       req->start = start;
+                       req->num_entry = r_entries;
+                       req->end = ice_acl_tbl_calc_end_idx(start, r_entries,
+                                                           width);
+                       break;
+               }
+
+               row = (dir > 0) ? (row + width) : (row - width);
+               if (row > hw->acl_tbl->last_tcam ||
+                   row < hw->acl_tbl->first_tcam) {
+                       /* All rows have been checked. Increment 'off' that
+                        * will help yield a different TCAM configuration in
+                        * which adjacent TCAMs can be alternatively in the
+                        * same row.
+                        */
+                       off++;
+
+                       /* However, if the new 'off' value yields previously
+                        * checked configurations, then exit.
+                        */
+                       if (off >= width)
+                               done = true;
+                       else
+                               row = dir > 0 ? off :
+                                       hw->acl_tbl->last_tcam + 1 - off -
+                                       width;
+               }
+       } while (!done);
+
+       return cnt >= r_entries ? ICE_SUCCESS : ICE_ERR_MAX_LIMIT;
+}
+
+/**
+ * ice_acl_fill_tcam_select
+ * @scen_buf: Pointer to the scenario buffer that needs to be populated
+ * @scen: Pointer to the available space for the scenario
+ * @tcam_idx: Index of the TCAM used for this scenario
+ * @tcam_idx_in_cascade : Local index of the TCAM in the cascade scenario
+ *
+ * For all TCAM that participate in this scenario, fill out the tcam_select
+ * value.
+ */
+static void
+ice_acl_fill_tcam_select(struct ice_aqc_acl_scen *scen_buf,
+                        struct ice_acl_scen *scen, u16 tcam_idx,
+                        u16 tcam_idx_in_cascade)
+{
+       u16 cascade_cnt, idx;
+       u8 j;
+
+       idx = tcam_idx_in_cascade * ICE_AQC_ACL_KEY_WIDTH_BYTES;
+       cascade_cnt = DIVIDE_AND_ROUND_UP(scen->width,
+                                         ICE_AQC_ACL_KEY_WIDTH_BYTES);
+
+       /* For each scenario, we reserved last three bytes of scenario width for
+        * profile ID, range checker, and packet direction. Thus, the last three
+        * bytes of the last cascaded TCAMs will have value of 1st, 31st and
+        * 32nd byte location of BYTE selection base.
+        *
+        * For other bytes in the TCAMs:
+        * For non-cascade mode (1 TCAM wide) scenario, TCAM[x]'s Select {0-1}
+        * select indices 0-1 of the Byte Selection Base
+        * For cascade mode, the leftmost TCAM of the first cascade row selects
+        * indices 0-4 of the Byte Selection Base; the second TCAM in the
+        * cascade row selects indices starting with 5-n
+        */
+       for (j = 0; j < ICE_AQC_ACL_KEY_WIDTH_BYTES; j++) {
+               /* PKT DIR uses the 1st location of Byte Selection Base: + 1 */
+               u8 val = ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx;
+
+               if (tcam_idx_in_cascade == cascade_cnt - 1) {
+                       if (j == ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM)
+                               val = ICE_AQC_ACL_BYTE_SEL_BASE_RNG_CHK;
+                       else if (j == ICE_ACL_SCEN_PID_IDX_IN_TCAM)
+                               val = ICE_AQC_ACL_BYTE_SEL_BASE_PID;
+                       else if (j == ICE_ACL_SCEN_PKT_DIR_IDX_IN_TCAM)
+                               val = ICE_AQC_ACL_BYTE_SEL_BASE_PKT_DIR;
+               }
+
+               /* In case that scenario's width is greater than the width of
+                * the Byte selection base, we will not assign a value to the
+                * tcam_select[j]. As a result, the tcam_select[j] will have
+                * default value which is zero.
+                */
+               if (val > ICE_AQC_ACL_BYTE_SEL_BASE_RNG_CHK)
+                       continue;
+
+               scen_buf->tcam_cfg[tcam_idx].tcam_select[j] = val;
+
+               idx++;
+       }
+}
+
+/**
+ * ice_acl_set_scen_chnk_msk
+ * @scen_buf: Pointer to the scenario buffer that needs to be populated
+ * @scen: pointer to the available space for the scenario
+ *
+ * Set the chunk mask for the entries that will be used by this scenario
+ */
+static void
+ice_acl_set_scen_chnk_msk(struct ice_aqc_acl_scen *scen_buf,
+                         struct ice_acl_scen *scen)
+{
+       u16 tcam_idx, num_cscd, units, cnt;
+       u8 chnk_offst;
+
+       /* Determine the starting TCAM index and offset of the start entry */
+       tcam_idx = ICE_ACL_TBL_TCAM_IDX(scen->start);
+       chnk_offst = (u8)((scen->start % ICE_AQC_ACL_TCAM_DEPTH) /
+                         ICE_ACL_ENTRY_ALLOC_UNIT);
+
+       /* Entries are allocated and tracked in multiple of 64's */
+       units = scen->num_entry / ICE_ACL_ENTRY_ALLOC_UNIT;
+
+       /* Determine number of cascaded TCAMs */
+       num_cscd = scen->width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
+
+       for (cnt = 0; cnt < units; cnt++) {
+               u16 i;
+
+               /* Set the corresponding bitmap of individual 64-entry
+                * chunk spans across a cascade of 1 or more TCAMs
+                * For each TCAM, there will be (ICE_AQC_ACL_TCAM_DEPTH
+                * / ICE_ACL_ENTRY_ALLOC_UNIT) or 8 chunks.
+                */
+               for (i = tcam_idx; i < tcam_idx + num_cscd; i++)
+                       scen_buf->tcam_cfg[i].chnk_msk |= BIT(chnk_offst);
+
+               chnk_offst = (chnk_offst + 1) % ICE_AQC_MAX_TCAM_ALLOC_UNITS;
+               if (!chnk_offst)
+                       tcam_idx += num_cscd;
+       }
+}
+
+/**
+ * ice_acl_assign_act_mem_for_scen
+ * @tbl: pointer to acl table structure
+ * @scen: pointer to the scenario struct
+ * @scen_buf: pointer to the available space for the scenario
+ * @current_tcam_idx: theoretical index of the TCAM that we associated those
+ *                   action memory banks with, at the table creation time.
+ * @target_tcam_idx: index of the TCAM that we want to associate those action
+ *                  memory banks with.
+ */
+static void
+ice_acl_assign_act_mem_for_scen(struct ice_acl_tbl *tbl,
+                               struct ice_acl_scen *scen,
+                               struct ice_aqc_acl_scen *scen_buf,
+                               u8 current_tcam_idx,
+                               u8 target_tcam_idx)
+{
+       u8 i;
+
+       for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++) {
+               struct ice_acl_act_mem *p_mem = &tbl->act_mems[i];
+
+               if (p_mem->act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL ||
+                   p_mem->member_of_tcam != current_tcam_idx)
+                       continue;
+
+               scen_buf->act_mem_cfg[i] = target_tcam_idx;
+               scen_buf->act_mem_cfg[i] |= ICE_AQC_ACL_SCE_ACT_MEM_EN;
+               ice_set_bit(i, scen->act_mem_bitmap);
+       }
+}
+
+/**
+ * ice_acl_commit_partition - Indicate if the specified partition is active
+ * @hw: pointer to the hardware structure
+ * @scen: pointer to the scenario struct
+ * @commit: true if the partition is being commit
+ */
+static void
+ice_acl_commit_partition(struct ice_hw *hw, struct ice_acl_scen *scen,
+                        bool commit)
+{
+       u16 tcam_idx, off, num_cscd, units, cnt;
+
+       /* Determine the starting TCAM index and offset of the start entry */
+       tcam_idx = ICE_ACL_TBL_TCAM_IDX(scen->start);
+       off = (scen->start % ICE_AQC_ACL_TCAM_DEPTH) /
+               ICE_ACL_ENTRY_ALLOC_UNIT;
+
+       /* Entries are allocated and tracked in multiple of 64's */
+       units = scen->num_entry / ICE_ACL_ENTRY_ALLOC_UNIT;
+
+       /* Determine number of cascaded TCAM */
+       num_cscd = scen->width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
+
+       for (cnt = 0; cnt < units; cnt++) {
+               u16 w;
+
+               /* Set/clear the corresponding bitmap of individual 64-entry
+                * chunk spans across a row of 1 or more TCAMs
+                */
+               for (w = 0; w < num_cscd; w++) {
+                       u16 b;
+
+                       b = ((tcam_idx + w) * ICE_AQC_MAX_TCAM_ALLOC_UNITS) +
+                               off;
+                       if (commit)
+                               ice_set_bit(b, hw->acl_tbl->avail);
+                       else
+                               ice_clear_bit(b, hw->acl_tbl->avail);
+               }
+
+               off = (off + 1) % ICE_AQC_MAX_TCAM_ALLOC_UNITS;
+               if (!off)
+                       tcam_idx += num_cscd;
+       }
+}
+
+/**
+ * ice_acl_create_scen
+ * @hw: pointer to the hardware structure
+ * @match_width: number of bytes to be matched in this scenario
+ * @num_entries: number of entries to be allocated for the scenario
+ * @scen_id: holds returned scenario ID if successful
+ */
+enum ice_status
+ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries,
+                   u16 *scen_id)
+{
+       u8 cascade_cnt, first_tcam, last_tcam, i, k;
+       struct ice_aqc_acl_scen scen_buf;
+       struct ice_acl_scen *scen;
+       enum ice_status status;
+
+       if (!hw->acl_tbl)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       scen = (struct ice_acl_scen *)ice_malloc(hw, sizeof(*scen));
+       if (!scen)
+               return ICE_ERR_NO_MEMORY;
+
+       scen->start = hw->acl_tbl->first_entry;
+       scen->width = ICE_AQC_ACL_KEY_WIDTH_BYTES *
+               DIVIDE_AND_ROUND_UP(match_width, ICE_AQC_ACL_KEY_WIDTH_BYTES);
+       scen->num_entry = num_entries;
+
+       status = ice_acl_alloc_partition(hw, scen);
+       if (status) {
+               ice_free(hw, scen);
+               return status;
+       }
+
+       ice_memset(&scen_buf, 0, sizeof(scen_buf), ICE_NONDMA_MEM);
+
+       /* Determine the number of cascade TCAMs, given the scenario's width */
+       cascade_cnt = DIVIDE_AND_ROUND_UP(scen->width,
+                                         ICE_AQC_ACL_KEY_WIDTH_BYTES);
+       first_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
+       last_tcam = ICE_ACL_TBL_TCAM_IDX(scen->end);
+
+       /* For each scenario, we reserved last three bytes of scenario width for
+        * packet direction flag, profile ID and range checker. Thus, we want to
+        * return back to the caller the eff_width, pkt_dir_idx, rng_chk_idx and
+        * pid_idx.
+        */
+       scen->eff_width = cascade_cnt * ICE_AQC_ACL_KEY_WIDTH_BYTES -
+               ICE_ACL_SCEN_MIN_WIDTH;
+       scen->rng_chk_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES +
+               ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM;
+       scen->pid_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES +
+               ICE_ACL_SCEN_PID_IDX_IN_TCAM;
+       scen->pkt_dir_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES +
+               ICE_ACL_SCEN_PKT_DIR_IDX_IN_TCAM;
+
+       /* set the chunk mask for the tcams */
+       ice_acl_set_scen_chnk_msk(&scen_buf, scen);
+
+       /* set the TCAM select and start_cmp and start_set bits */
+       k = first_tcam;
+       /* set the START_SET bit at the beginning of the stack */
+       scen_buf.tcam_cfg[k].start_cmp_set |= ICE_AQC_ACL_ALLOC_SCE_START_SET;
+       while (k <= last_tcam) {
+               u8 last_tcam_idx_cascade = cascade_cnt + k - 1;
+
+               /* set start_cmp for the first cascaded TCAM */
+               scen_buf.tcam_cfg[k].start_cmp_set |=
+                       ICE_AQC_ACL_ALLOC_SCE_START_CMP;
+
+               /* cascade TCAMs up to the width of the scenario */
+               for (i = k; i < cascade_cnt + k; i++) {
+                       ice_acl_fill_tcam_select(&scen_buf, scen, i, i - k);
+                       ice_acl_assign_act_mem_for_scen(hw->acl_tbl, scen,
+                                                       &scen_buf,
+                                                       i,
+                                                       last_tcam_idx_cascade);
+               }
+
+               k = i;
+       }
+
+       /* We need to set the start_cmp bit for the unused TCAMs. */
+       i = 0;
+       while (i < first_tcam)
+               scen_buf.tcam_cfg[i++].start_cmp_set =
+                                       ICE_AQC_ACL_ALLOC_SCE_START_CMP;
+
+       i = last_tcam + 1;
+       while (i < ICE_AQC_ACL_SLICES)
+               scen_buf.tcam_cfg[i++].start_cmp_set =
+                                       ICE_AQC_ACL_ALLOC_SCE_START_CMP;
+
+       status = ice_aq_alloc_acl_scen(hw, scen_id, &scen_buf, NULL);
+       if (status) {
+               ice_debug(hw, ICE_DBG_ACL,
+                         "AQ allocation of ACL scenario failed. status: %d\n",
+                         status);
+               ice_free(hw, scen);
+               return status;
+       }
+
+       scen->id = *scen_id;
+       ice_acl_commit_partition(hw, scen, false);
+       ice_acl_init_entry(scen);
+       LIST_ADD(&scen->list_entry, &hw->acl_tbl->scens);
+
+       return status;
+}
+
+/**
+ * ice_acl_destroy_tbl - Destroy a previously created LEM table for ACL
+ * @hw: pointer to the HW struct
+ */
+enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw)
+{
+       struct ice_acl_scen *pos_scen, *tmp_scen;
+       struct ice_aqc_acl_generic resp_buf;
+       struct ice_aqc_acl_scen buf;
+       enum ice_status status;
+       u8 i;
+
+       if (!hw->acl_tbl)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       /* Mark all the created scenario's TCAM to stop the packet lookup and
+        * delete them afterward
+        */
+       LIST_FOR_EACH_ENTRY_SAFE(pos_scen, tmp_scen, &hw->acl_tbl->scens,
+                                ice_acl_scen, list_entry) {
+               status = ice_aq_query_acl_scen(hw, pos_scen->id, &buf, NULL);
+               if (status) {
+                       ice_debug(hw, ICE_DBG_ACL, "ice_aq_query_acl_scen() failed. status: %d\n",
+                                 status);
+                       return status;
+               }
+
+               for (i = 0; i < ICE_AQC_ACL_SLICES; i++) {
+                       buf.tcam_cfg[i].chnk_msk = 0;
+                       buf.tcam_cfg[i].start_cmp_set =
+                                       ICE_AQC_ACL_ALLOC_SCE_START_CMP;
+               }
+
+               for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++)
+                       buf.act_mem_cfg[i] = 0;
+
+               status = ice_aq_update_acl_scen(hw, pos_scen->id, &buf, NULL);
+               if (status) {
+                       ice_debug(hw, ICE_DBG_ACL, "ice_aq_update_acl_scen() failed. status: %d\n",
+                                 status);
+                       return status;
+               }
+
+               status = ice_acl_destroy_scen(hw, pos_scen->id);
+               if (status) {
+                       ice_debug(hw, ICE_DBG_ACL, "deletion of scenario failed. status: %d\n",
+                                 status);
+                       return status;
+               }
+       }
+
+       /* call the aq command to destroy the ACL table */
+       status = ice_aq_dealloc_acl_tbl(hw, hw->acl_tbl->id, &resp_buf, NULL);
+
+       if (status) {
+               ice_debug(hw, ICE_DBG_ACL,
+                         "AQ de-allocation of ACL failed. status: %d\n",
+                         status);
+               return status;
+       }
+
+       ice_free(hw, hw->acl_tbl);
+       hw->acl_tbl = NULL;
+
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_acl_add_entry - Add a flow entry to an ACL scenario
+ * @hw: pointer to the HW struct
+ * @scen: scenario to add the entry to
+ * @prior: priority level of the entry being added
+ * @keys: buffer of the value of the key to be programmed to the ACL entry
+ * @inverts: buffer of the value of the key inverts to be programmed
+ * @acts: pointer to a buffer containing formatted actions
+ * @acts_cnt: indicates the number of actions stored in "acts"
+ * @entry_idx: returned scenario relative index of the added flow entry
+ *
+ * Given an ACL table and a scenario, to add the specified key and key invert
+ * to an available entry in the specified scenario.
+ * The "keys" and "inverts" buffers must be of the size which is the same as
+ * the scenario's width
+ */
+enum ice_status
+ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
+                 enum ice_acl_entry_prior prior, u8 *keys, u8 *inverts,
+                 struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx)
+{
+       u8 i, entry_tcam, num_cscd, idx, offset;
+       struct ice_aqc_acl_data buf;
+       enum ice_status status = ICE_SUCCESS;
+
+       if (!scen)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       *entry_idx = ice_acl_scen_assign_entry_idx(scen, prior);
+       if (*entry_idx >= scen->num_entry) {
+               *entry_idx = 0;
+               return ICE_ERR_MAX_LIMIT;
+       }
+
+       /* Determine number of cascaded TCAMs */
+       num_cscd = DIVIDE_AND_ROUND_UP(scen->width,
+                                      ICE_AQC_ACL_KEY_WIDTH_BYTES);
+
+       entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
+       idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + *entry_idx);
+
+       ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
+       for (i = 0; i < num_cscd; i++) {
+               /* If the key spans more than one TCAM in the case of cascaded
+                * TCAMs, the key and key inverts need to be properly split
+                * among TCAMs.E.g.bytes 0 - 4 go to an index in the first TCAM
+                * and bytes 5 - 9 go to the same index in the next TCAM, etc.
+                * If the entry spans more than one TCAM in a cascaded TCAM
+                * mode, the programming of the entries in the TCAMs must be in
+                * reversed order - the TCAM entry of the rightmost TCAM should
+                * be programmed first; the TCAM entry of the leftmost TCAM
+                * should be programmed last.
+                */
+               offset = num_cscd - i - 1;
+               ice_memcpy(&buf.entry_key.val,
+                          &keys[offset * sizeof(buf.entry_key.val)],
+                          sizeof(buf.entry_key.val), ICE_NONDMA_TO_NONDMA);
+               ice_memcpy(&buf.entry_key_invert.val,
+                          &inverts[offset * sizeof(buf.entry_key_invert.val)],
+                          sizeof(buf.entry_key_invert.val),
+                          ICE_NONDMA_TO_NONDMA);
+               status = ice_aq_program_acl_entry(hw, entry_tcam + offset, idx,
+                                                 &buf, NULL);
+               if (status) {
+                       ice_debug(hw, ICE_DBG_ACL,
+                                 "aq program acl entry failed status: %d\n",
+                                 status);
+                       goto out;
+               }
+       }
+
+       /* Program the action memory */
+       status = ice_acl_prog_act(hw, scen, acts, acts_cnt, *entry_idx);
+
+out:
+       if (status) {
+               ice_acl_rem_entry(hw, scen, *entry_idx);
+               *entry_idx = 0;
+       }
+
+       return status;
+}
+
+/**
+ * ice_acl_prog_act - Program a scenario's action memory
+ * @hw: pointer to the HW struct
+ * @scen: scenario to add the entry to
+ * @acts: pointer to a buffer containing formatted actions
+ * @acts_cnt: indicates the number of actions stored in "acts"
+ * @entry_idx: scenario relative index of the added flow entry
+ *
+ * Program a scenario's action memory
+ */
+enum ice_status
+ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
+                struct ice_acl_act_entry *acts, u8 acts_cnt,
+                u16 entry_idx)
+{
+       u8 entry_tcam, num_cscd, i, actx_idx = 0;
+       struct ice_aqc_actpair act_buf;
+       enum ice_status status = ICE_SUCCESS;
+       u16 idx;
+
+       if (entry_idx >= scen->num_entry)
+               return ICE_ERR_MAX_LIMIT;
+
+       ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
+
+       /* Determine number of cascaded TCAMs */
+       num_cscd = DIVIDE_AND_ROUND_UP(scen->width,
+                                      ICE_AQC_ACL_KEY_WIDTH_BYTES);
+
+       entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
+       idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + entry_idx);
+
+       i = ice_find_first_bit(scen->act_mem_bitmap,
+                              ICE_AQC_MAX_ACTION_MEMORIES);
+       while (i < ICE_AQC_MAX_ACTION_MEMORIES) {
+               struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i];
+
+               if (actx_idx >= acts_cnt)
+                       break;
+               if (mem->member_of_tcam >= entry_tcam &&
+                   mem->member_of_tcam < entry_tcam + num_cscd) {
+                       ice_memcpy(&act_buf.act[0], &acts[actx_idx],
+                                  sizeof(struct ice_acl_act_entry),
+                                  ICE_NONDMA_TO_NONDMA);
+
+                       if (++actx_idx < acts_cnt) {
+                               ice_memcpy(&act_buf.act[1], &acts[actx_idx],
+                                          sizeof(struct ice_acl_act_entry),
+                                          ICE_NONDMA_TO_NONDMA);
+                       }
+
+                       status = ice_aq_program_actpair(hw, i, idx, &act_buf,
+                                                       NULL);
+                       if (status) {
+                               ice_debug(hw, ICE_DBG_ACL,
+                                         "program actpair failed status: %d\n",
+                                         status);
+                               break;
+                       }
+                       actx_idx++;
+               }
+
+               i = ice_find_next_bit(scen->act_mem_bitmap,
+                                     ICE_AQC_MAX_ACTION_MEMORIES, i + 1);
+       }
+
+       if (!status && actx_idx < acts_cnt)
+               status = ICE_ERR_MAX_LIMIT;
+
+       return status;
+}
+
+/**
+ * ice_acl_rem_entry - Remove a flow entry from an ACL scenario
+ * @hw: pointer to the HW struct
+ * @scen: scenario to remove the entry from
+ * @entry_idx: the scenario-relative index of the flow entry being removed
+ */
+enum ice_status
+ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
+{
+       struct ice_aqc_actpair act_buf;
+       struct ice_aqc_acl_data buf;
+       u8 entry_tcam, num_cscd, i;
+       enum ice_status status = ICE_SUCCESS;
+       u16 idx;
+
+       if (!scen)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       if (entry_idx >= scen->num_entry)
+               return ICE_ERR_MAX_LIMIT;
+
+       if (!ice_is_bit_set(scen->entry_bitmap, entry_idx))
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       /* Determine number of cascaded TCAMs */
+       num_cscd = DIVIDE_AND_ROUND_UP(scen->width,
+                                      ICE_AQC_ACL_KEY_WIDTH_BYTES);
+
+       entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
+       idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + entry_idx);
+
+       /* invalidate the flow entry */
+       ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
+       for (i = 0; i < num_cscd; i++) {
+               status = ice_aq_program_acl_entry(hw, entry_tcam + i, idx, &buf,
+                                                 NULL);
+               if (status)
+                       ice_debug(hw, ICE_DBG_ACL,
+                                 "aq program acl entry failed status: %d\n",
+                                 status);
+       }
+
+       ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
+       i = ice_find_first_bit(scen->act_mem_bitmap,
+                              ICE_AQC_MAX_ACTION_MEMORIES);
+       while (i < ICE_AQC_MAX_ACTION_MEMORIES) {
+               struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i];
+
+               if (mem->member_of_tcam >= entry_tcam &&
+                   mem->member_of_tcam < entry_tcam + num_cscd) {
+                       /* Invalidate allocated action pairs */
+                       status = ice_aq_program_actpair(hw, i, idx, &act_buf,
+                                                       NULL);
+                       if (status)
+                               ice_debug(hw, ICE_DBG_ACL,
+                                         "program actpair failed.status: %d\n",
+                                         status);
+               }
+
+               i = ice_find_next_bit(scen->act_mem_bitmap,
+                                     ICE_AQC_MAX_ACTION_MEMORIES, i + 1);
+       }
+
+       ice_acl_scen_free_entry_idx(scen, entry_idx);
+
+       return status;
+}
+
+/**
+ * ice_acl_destroy_scen - Destroy an ACL scenario
+ * @hw: pointer to the HW struct
+ * @scen_id: ID of the remove scenario
+ */
+enum ice_status ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id)
+{
+       struct ice_acl_scen *scen, *tmp_scen;
+       struct ice_flow_prof *p, *tmp;
+       enum ice_status status;
+
+       if (!hw->acl_tbl)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       /* Remove profiles that use "scen_id" scenario */
+       LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[ICE_BLK_ACL],
+                                ice_flow_prof, l_entry)
+               if (p->cfg.scen && p->cfg.scen->id == scen_id) {
+                       status = ice_flow_rem_prof(hw, ICE_BLK_ACL, p->id);
+                       if (status) {
+                               ice_debug(hw, ICE_DBG_ACL,
+                                         "ice_flow_rem_prof failed. status: %d\n",
+                                         status);
+                               goto exit;
+                       }
+               }
+
+       /* Call the aq command to destroy the targeted scenario */
+       status = ice_aq_dealloc_acl_scen(hw, scen_id, NULL);
+
+       if (status) {
+               ice_debug(hw, ICE_DBG_ACL,
+                         "AQ de-allocation of scenario failed. status: %d\n",
+                         status);
+               goto exit;
+       }
+
+       /* Remove scenario from hw->acl_tbl->scens */
+       LIST_FOR_EACH_ENTRY_SAFE(scen, tmp_scen, &hw->acl_tbl->scens,
+                                ice_acl_scen, list_entry)
+               if (scen->id == scen_id) {
+                       LIST_DEL(&scen->list_entry);
+                       ice_free(hw, scen);
+               }
+exit:
+       return status;
+}
index 73f5e70..2c899b9 100644 (file)
@@ -419,6 +419,7 @@ struct ice_aqc_vsi_props {
 #define ICE_AQ_VSI_PROP_RXQ_MAP_VALID          BIT(6)
 #define ICE_AQ_VSI_PROP_Q_OPT_VALID            BIT(7)
 #define ICE_AQ_VSI_PROP_OUTER_UP_VALID         BIT(8)
+#define ICE_AQ_VSI_PROP_ACL_VALID              BIT(10)
 #define ICE_AQ_VSI_PROP_FLOW_DIR_VALID         BIT(11)
 #define ICE_AQ_VSI_PROP_PASID_VALID            BIT(12)
        /* switch section */
@@ -534,8 +535,16 @@ struct ice_aqc_vsi_props {
        u8 q_opt_reserved[3];
        /* outer up section */
        __le32 outer_up_table; /* same structure and defines as ingress tbl */
-       /* section 10 */
-       __le16 sect_10_reserved;
+       /* acl section */
+       __le16 acl_def_act;
+#define ICE_AQ_VSI_ACL_DEF_RX_PROF_S   0
+#define ICE_AQ_VSI_ACL_DEF_RX_PROF_M   (0xF << ICE_AQ_VSI_ACL_DEF_RX_PROF_S)
+#define ICE_AQ_VSI_ACL_DEF_RX_TABLE_S  4
+#define ICE_AQ_VSI_ACL_DEF_RX_TABLE_M  (0xF << ICE_AQ_VSI_ACL_DEF_RX_TABLE_S)
+#define ICE_AQ_VSI_ACL_DEF_TX_PROF_S   8
+#define ICE_AQ_VSI_ACL_DEF_TX_PROF_M   (0xF << ICE_AQ_VSI_ACL_DEF_TX_PROF_S)
+#define ICE_AQ_VSI_ACL_DEF_TX_TABLE_S  12
+#define ICE_AQ_VSI_ACL_DEF_TX_TABLE_M  (0xF << ICE_AQ_VSI_ACL_DEF_TX_TABLE_S)
        /* flow director section */
        __le16 fd_options;
 #define ICE_AQ_VSI_FD_ENABLE           BIT(0)
@@ -1694,6 +1703,7 @@ struct ice_aqc_nvm {
 #define ICE_AQC_NVM_ACTIV_SEL_OROM     BIT(4)
 #define ICE_AQC_NVM_ACTIV_SEL_NETLIST  BIT(5)
 #define ICE_AQC_NVM_SPECIAL_UPDATE     BIT(6)
+#define ICE_AQC_NVM_REVERT_LAST_ACTIV  BIT(6) /* Write Activate only */
 #define ICE_AQC_NVM_ACTIV_SEL_MASK     MAKEMASK(0x7, 3)
 #define ICE_AQC_NVM_FLASH_ONLY         BIT(7)
        __le16 module_typeid;
@@ -2010,6 +2020,418 @@ struct ice_aqc_clear_fd_table {
        u8 reserved[12];
 };
 
+/* ACL - allocate (indirect 0x0C10) table */
+#define ICE_AQC_ACL_KEY_WIDTH          40
+#define ICE_AQC_ACL_KEY_WIDTH_BYTES    5
+#define ICE_AQC_ACL_TCAM_DEPTH         512
+#define ICE_ACL_ENTRY_ALLOC_UNIT       64
+#define ICE_AQC_MAX_CONCURRENT_ACL_TBL 15
+#define ICE_AQC_MAX_ACTION_MEMORIES    20
+#define ICE_AQC_MAX_ACTION_ENTRIES     512
+#define ICE_AQC_ACL_SLICES             16
+#define ICE_AQC_ALLOC_ID_LESS_THAN_4K  0x1000
+/* The ACL block supports up to 8 actions per a single output. */
+#define ICE_AQC_TBL_MAX_ACTION_PAIRS   4
+
+#define ICE_AQC_MAX_TCAM_ALLOC_UNITS   (ICE_AQC_ACL_TCAM_DEPTH / \
+                                        ICE_ACL_ENTRY_ALLOC_UNIT)
+#define ICE_AQC_ACL_ALLOC_UNITS                (ICE_AQC_ACL_SLICES * \
+                                        ICE_AQC_MAX_TCAM_ALLOC_UNITS)
+
+struct ice_aqc_acl_alloc_table {
+       __le16 table_width;
+       __le16 table_depth;
+       u8 act_pairs_per_entry;
+       /* For non-concurrent table allocation, this field needs
+        * to be set to zero(0) otherwise it shall specify the
+        * amount of concurrent tables whose AllocIDs are
+        * specified in buffer. Thus the newly allocated table
+        * is concurrent with table IDs specified in AllocIDs.
+        */
+#define ICE_AQC_ACL_ALLOC_TABLE_TYPE_NONCONCURR        0
+       u8 table_type;
+       __le16 reserved;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+/* Allocate ACL table command buffer format */
+struct ice_aqc_acl_alloc_table_data {
+       /* Dependent table AllocIDs. Each word in this 15 word array specifies
+        * a dependent table AllocID according to the amount specified in the
+        * "table_type" field. All unused words shall be set to 0xFFFF
+        */
+#define ICE_AQC_CONCURR_ID_INVALID     0xffff
+       __le16 alloc_ids[ICE_AQC_MAX_CONCURRENT_ACL_TBL];
+};
+
+/* ACL - deallocate (indirect 0x0C11) table
+ * ACL - allocate (indirect 0x0C12) action-pair
+ * ACL - deallocate (indirect 0x0C13) action-pair
+ */
+
+/* Following structure is common and used in case of deallocation
+ * of ACL table and action-pair
+ */
+struct ice_aqc_acl_tbl_actpair {
+       /* Alloc ID of the table being released */
+       __le16 alloc_id;
+       u8 reserved[6];
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+/* This response structure is same in case of alloc/dealloc table,
+ * alloc/dealloc action-pair
+ */
+struct ice_aqc_acl_generic {
+       /* if alloc_id is below 0x1000 then alllocation failed due to
+        * unavailable resources, else this is set by FW to identify
+        * table allocation
+        */
+       __le16 alloc_id;
+
+       union {
+               /* to be used only in case of alloc/dealloc table */
+               struct {
+                       /* Index of the first TCAM block, otherwise set to 0xFF
+                        * for a failed allocation
+                        */
+                       u8 first_tcam;
+                       /* Index of the last TCAM block. This index shall be
+                        * set to the value of first_tcam for single TCAM block
+                        * allocation, otherwise set to 0xFF for a failed
+                        * allocation
+                        */
+                       u8 last_tcam;
+               } table;
+               /* reserved in case of alloc/dealloc action-pair */
+               struct {
+                       __le16 reserved;
+               } act_pair;
+       } ops;
+
+       /* index of first entry (in both TCAM and action memories),
+        * otherwise set to 0xFF for a failed allocation
+        */
+       __le16 first_entry;
+       /* index of last entry (in both TCAM and action memories),
+        * otherwise set to 0xFF for a failed allocation
+        */
+       __le16 last_entry;
+
+       /* Each act_mem element specifies the order of the memory
+        * otherwise 0xFF
+        */
+       u8 act_mem[ICE_AQC_MAX_ACTION_MEMORIES];
+};
+
+/* ACL - allocate (indirect 0x0C14) scenario. This command doesn't have separate
+ * response buffer since original command buffer gets updated with
+ * 'scen_id' in case of success
+ */
+struct ice_aqc_acl_alloc_scen {
+       union {
+               struct {
+                       u8 reserved[8];
+               } cmd;
+               struct {
+                       __le16 scen_id;
+                       u8 reserved[6];
+               } resp;
+       } ops;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+/* ACL - de-allocate (direct 0x0C15) scenario. This command doesn't need
+ * separate response buffer since nothing to be returned as a response
+ * except status.
+ */
+struct ice_aqc_acl_dealloc_scen {
+       __le16 scen_id;
+       u8 reserved[14];
+};
+
+/* ACL - update (direct 0x0C1B) scenario */
+/* ACL - query (direct 0x0C23) scenario */
+struct ice_aqc_acl_update_query_scen {
+       __le16 scen_id;
+       u8 reserved[6];
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+/* Input buffer format in case allocate/update ACL scenario and same format
+ * is used for response buffer in case of query ACL scenario.
+ * NOTE: de-allocate ACL scenario is direct command and doesn't require
+ * "buffer", hence no buffer format.
+ */
+struct ice_aqc_acl_scen {
+       struct {
+               /* Byte [x] selection for the TCAM key. This value must be set
+                * set to 0x0 for unusued TCAM.
+                * Only Bit 6..0 is used in each byte and MSB is reserved
+                */
+#define ICE_AQC_ACL_ALLOC_SCE_SELECT_M         0x7F
+#define ICE_AQC_ACL_BYTE_SEL_BASE              0x20
+#define ICE_AQC_ACL_BYTE_SEL_BASE_PID          0x3E
+#define ICE_AQC_ACL_BYTE_SEL_BASE_PKT_DIR      ICE_AQC_ACL_BYTE_SEL_BASE
+#define ICE_AQC_ACL_BYTE_SEL_BASE_RNG_CHK      0x3F
+               u8 tcam_select[5];
+               /* TCAM Block entry masking. This value should be set to 0x0 for
+                * unused TCAM
+                */
+               u8 chnk_msk;
+               /* Bit 0 : masks TCAM entries 0-63
+                * Bit 1 : masks TCAM entries 64-127
+                * Bit 2 to 7 : follow the pattern of bit 0 and 1
+                */
+#define ICE_AQC_ACL_ALLOC_SCE_START_CMP                BIT(0)
+#define ICE_AQC_ACL_ALLOC_SCE_START_SET                BIT(1)
+               u8 start_cmp_set;
+
+       } tcam_cfg[ICE_AQC_ACL_SLICES];
+
+       /* Each byte, Bit 6..0: Action memory association to a TCAM block,
+        * otherwise it shall be set to 0x0 for disabled memory action.
+        * Bit 7 : Action memory enable for this scenario
+        */
+#define ICE_AQC_ACL_SCE_ACT_MEM_TCAM_ASSOC_M   0x7F
+#define ICE_AQC_ACL_SCE_ACT_MEM_EN             BIT(7)
+       u8 act_mem_cfg[ICE_AQC_MAX_ACTION_MEMORIES];
+};
+
+/* ACL - allocate (indirect 0x0C16) counters */
+struct ice_aqc_acl_alloc_counters {
+       /* Amount of contiguous counters requested. Min value is 1 and
+        * max value is 255
+        */
+#define ICE_AQC_ACL_ALLOC_CNT_MIN_AMT  0x1
+#define ICE_AQC_ACL_ALLOC_CNT_MAX_AMT  0xFF
+       u8 counter_amount;
+
+       /* Counter type: 'single counter' which can be configured to count
+        * either bytes or packets
+        */
+#define ICE_AQC_ACL_CNT_TYPE_SINGLE    0x0
+
+       /* Counter type: 'counter pair' which counts number of bytes and number
+        * of packets.
+        */
+#define ICE_AQC_ACL_CNT_TYPE_DUAL      0x1
+       /* requested counter type, single/dual */
+       u8 counters_type;
+
+       /* counter bank allocation shall be 0-3 for 'byte or packet counter' */
+#define ICE_AQC_ACL_MAX_CNT_SINGLE     0x3
+/* counter bank allocation shall be 0-1 for 'byte and packet counter dual' */
+#define ICE_AQC_ACL_MAX_CNT_DUAL       0x1
+       /* requested counter bank allocation */
+       u8 bank_alloc;
+
+       u8 reserved;
+
+       union {
+               /* Applicable only in case of command */
+               struct {
+                       u8 reserved[12];
+               } cmd;
+               /* Applicable only in case of response */
+#define ICE_AQC_ACL_ALLOC_CNT_INVAL    0xFFFF
+               struct {
+                       /* Index of first allocated counter. 0xFFFF in case
+                        * of unsuccessful allocation
+                        */
+                       __le16 first_counter;
+                       /* Index of last allocated counter. 0xFFFF in case
+                        * of unsuccessful allocation
+                        */
+                       __le16 last_counter;
+                       u8 rsvd[8];
+               } resp;
+       } ops;
+};
+
+/* ACL - de-allocate (direct 0x0C17) counters */
+struct ice_aqc_acl_dealloc_counters {
+       /* first counter being released */
+       __le16 first_counter;
+       /* last counter being released */
+       __le16 last_counter;
+       /* requested counter type, single/dual */
+       u8 counters_type;
+       /* requested counter bank allocation */
+       u8 bank_alloc;
+       u8 reserved[10];
+};
+
+/* ACL - de-allocate (direct 0x0C1A) resources. Used by SW to release all the
+ * resources allocated for it using a single command
+ */
+struct ice_aqc_acl_dealloc_res {
+       u8 reserved[16];
+};
+
+/* ACL - program actionpair (indirect 0x0C1C) */
+/* ACL - query actionpair (indirect 0x0C25) */
+struct ice_aqc_acl_actpair {
+       /* action mem index to program/update */
+       u8 act_mem_index;
+       u8 reserved;
+       /* The entry index in action memory to be programmed/updated */
+       __le16 act_entry_index;
+       __le32 reserved2;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+/* Input buffer format for program/query action-pair admin command */
+struct ice_acl_act_entry {
+       /* Action priority, values must be between 0..7 */
+#define ICE_AQC_ACT_PRIO_VALID_MAX     7
+#define ICE_AQC_ACT_PRIO_MSK           MAKEMASK(0xff, 0)
+       u8 prio;
+       /* Action meta-data identifier. This field should be set to 0x0
+        * for a NOP action
+        */
+#define ICE_AQC_ACT_MDID_S             8
+#define ICE_AQC_ACT_MDID_MSK           MAKEMASK(0xff00, ICE_AQC_ACT_MDID_S)
+       u8 mdid;
+       /* Action value */
+#define ICE_AQC_ACT_VALUE_S            16
+#define ICE_AQC_ACT_VALUE_MSK          MAKEMASK(0xffff0000, 16)
+       __le16 value;
+};
+
+#define ICE_ACL_NUM_ACT_PER_ACT_PAIR 2
+struct ice_aqc_actpair {
+       struct ice_acl_act_entry act[ICE_ACL_NUM_ACT_PER_ACT_PAIR];
+};
+
+/* Generic format used to describe either input or response buffer
+ * for admin commands related to ACL profile
+ */
+struct ice_aqc_acl_prof_generic_frmt {
+       /* The first byte of the byte selection base is reserved to keep the
+        * first byte of the field vector where the packet direction info is
+        * available. Thus we should start at index 1 of the field vector to
+        * map its entries to the byte selection base.
+        */
+#define ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX    1
+       /* In each byte:
+        * Bit 0..5 = Byte selection for the byte selection base from the
+        * extracted fields (expressed as byte offset in extracted fields).
+        * Applicable values are 0..63
+        * Bit 6..7 = Reserved
+        */
+#define ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS                30
+       u8 byte_selection[ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS];
+       /* In each byte:
+        * Bit 0..4 = Word selection for the word selection base from the
+        * extracted fields (expressed as word offset in extracted fields).
+        * Applicable values are 0..31
+        * Bit 5..7 = Reserved
+        */
+#define ICE_AQC_ACL_PROF_WORD_SEL_ELEMS                32
+       u8 word_selection[ICE_AQC_ACL_PROF_WORD_SEL_ELEMS];
+       /* In each byte:
+        * Bit 0..3 = Double word selection for the double-word selection base
+        * from the extracted fields (expressed as double-word offset in
+        * extracted fields).
+        * Applicable values are 0..15
+        * Bit 4..7 = Reserved
+        */
+#define ICE_AQC_ACL_PROF_DWORD_SEL_ELEMS       15
+       u8 dword_selection[ICE_AQC_ACL_PROF_DWORD_SEL_ELEMS];
+       /* Scenario numbers for individual Physical Function's */
+#define ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS     8
+       u8 pf_scenario_num[ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS];
+};
+
+/* ACL - program ACL profile extraction (indirect 0x0C1D) */
+/* ACL - program ACL profile ranges (indirect 0x0C1E) */
+/* ACL - query ACL profile (indirect 0x0C21) */
+/* ACL - query ACL profile ranges (indirect 0x0C22) */
+struct ice_aqc_acl_profile {
+       u8 profile_id; /* Programmed/Updated profile ID */
+       u8 reserved[7];
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+/* Input buffer format for program profile extraction admin command and
+ * response buffer format for query profile admin command is as defined
+ * in struct ice_aqc_acl_prof_generic_frmt
+ */
+
+/* Input buffer format for program profile ranges and query profile ranges
+ * admin commands. Same format is used for response buffer in case of query
+ * profile ranges command
+ */
+struct ice_acl_rng_data {
+       /* The range checker output shall be sent when the value
+        * related to this range checker is lower than low boundary
+        */
+       __be16 low_boundary;
+       /* The range checker output shall be sent when the value
+        * related to this range checker is higher than high boundary
+        */
+       __be16 high_boundary;
+       /* A value of '0' in bit shall clear the relevant bit input
+        * to the range checker
+        */
+       __be16 mask;
+};
+
+struct ice_aqc_acl_profile_ranges {
+#define ICE_AQC_ACL_PROF_RANGES_NUM_CFG 8
+       struct ice_acl_rng_data checker_cfg[ICE_AQC_ACL_PROF_RANGES_NUM_CFG];
+};
+
+/* ACL - program ACL entry (indirect 0x0C20) */
+/* ACL - query ACL entry (indirect 0x0C24) */
+struct ice_aqc_acl_entry {
+       u8 tcam_index; /* Updated TCAM block index */
+       u8 reserved;
+       __le16 entry_index; /* Updated entry index */
+       __le32 reserved2;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+/* Input buffer format in case of program ACL entry and response buffer format
+ * in case of query ACL entry
+ */
+struct ice_aqc_acl_data {
+       /* Entry key and entry key invert are 40 bits wide.
+        * Byte 0..4 : entry key and Byte 5..7 are reserved
+        * Byte 8..12: entry key invert and Byte 13..15 are reserved
+        */
+       struct {
+               u8 val[5];
+               u8 reserved[3];
+       } entry_key, entry_key_invert;
+};
+
+/* ACL - query ACL counter (direct 0x0C27) */
+struct ice_aqc_acl_query_counter {
+       /* Queried counter index */
+       __le16 counter_index;
+       /* Queried counter bank */
+       u8 counter_bank;
+       union {
+               struct {
+                       u8 reserved[13];
+               } cmd;
+               struct {
+                       /* Holds counter value/packet counter value */
+                       u8 val[5];
+                       u8 reserved[8];
+               } resp;
+       } ops;
+};
+
 /* Add Tx LAN Queues (indirect 0x0C30) */
 struct ice_aqc_add_txqs {
        u8 num_qgrps;
@@ -2277,6 +2699,18 @@ struct ice_aq_desc {
                struct ice_aqc_get_set_rss_lut get_set_rss_lut;
                struct ice_aqc_get_set_rss_key get_set_rss_key;
                struct ice_aqc_clear_fd_table clear_fd_table;
+               struct ice_aqc_acl_alloc_table alloc_table;
+               struct ice_aqc_acl_tbl_actpair tbl_actpair;
+               struct ice_aqc_acl_alloc_scen alloc_scen;
+               struct ice_aqc_acl_dealloc_scen dealloc_scen;
+               struct ice_aqc_acl_update_query_scen update_query_scen;
+               struct ice_aqc_acl_alloc_counters alloc_counters;
+               struct ice_aqc_acl_dealloc_counters dealloc_counters;
+               struct ice_aqc_acl_dealloc_res dealloc_res;
+               struct ice_aqc_acl_entry program_query_entry;
+               struct ice_aqc_acl_actpair program_query_actpair;
+               struct ice_aqc_acl_profile profile;
+               struct ice_aqc_acl_query_counter query_counter;
                struct ice_aqc_add_txqs add_txqs;
                struct ice_aqc_dis_txqs dis_txqs;
                struct ice_aqc_move_txqs move_txqs;
@@ -2496,6 +2930,27 @@ enum ice_adminq_opc {
        ice_aqc_opc_get_rss_key                         = 0x0B04,
        ice_aqc_opc_get_rss_lut                         = 0x0B05,
        ice_aqc_opc_clear_fd_table                      = 0x0B06,
+       /* ACL commands */
+       ice_aqc_opc_alloc_acl_tbl                       = 0x0C10,
+       ice_aqc_opc_dealloc_acl_tbl                     = 0x0C11,
+       ice_aqc_opc_alloc_acl_actpair                   = 0x0C12,
+       ice_aqc_opc_dealloc_acl_actpair                 = 0x0C13,
+       ice_aqc_opc_alloc_acl_scen                      = 0x0C14,
+       ice_aqc_opc_dealloc_acl_scen                    = 0x0C15,
+       ice_aqc_opc_alloc_acl_counters                  = 0x0C16,
+       ice_aqc_opc_dealloc_acl_counters                = 0x0C17,
+       ice_aqc_opc_dealloc_acl_res                     = 0x0C1A,
+       ice_aqc_opc_update_acl_scen                     = 0x0C1B,
+       ice_aqc_opc_program_acl_actpair                 = 0x0C1C,
+       ice_aqc_opc_program_acl_prof_extraction         = 0x0C1D,
+       ice_aqc_opc_program_acl_prof_ranges             = 0x0C1E,
+       ice_aqc_opc_program_acl_entry                   = 0x0C20,
+       ice_aqc_opc_query_acl_prof                      = 0x0C21,
+       ice_aqc_opc_query_acl_prof_ranges               = 0x0C22,
+       ice_aqc_opc_query_acl_scen                      = 0x0C23,
+       ice_aqc_opc_query_acl_entry                     = 0x0C24,
+       ice_aqc_opc_query_acl_actpair                   = 0x0C25,
+       ice_aqc_opc_query_acl_counter                   = 0x0C27,
 
        /* Tx queue handling commands/events */
        ice_aqc_opc_add_txqs                            = 0x0C30,
index 90e7e08..c78782e 100644 (file)
@@ -956,19 +956,25 @@ void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *fltr)
  * ice_fdir_update_cntrs - increment / decrement filter counter
  * @hw: pointer to hardware structure
  * @flow: filter flow type
+ * @acl_fltr: true indicates an ACL filter
  * @add: true implies filters added
  */
 void
-ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add)
+ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow,
+                     bool acl_fltr, bool add)
 {
        int incr;
 
        incr = (add) ? 1 : -1;
        hw->fdir_active_fltr += incr;
-       if (flow == ICE_FLTR_PTYPE_NONF_NONE || flow >= ICE_FLTR_PTYPE_MAX)
+       if (flow == ICE_FLTR_PTYPE_NONF_NONE || flow >= ICE_FLTR_PTYPE_MAX) {
                ice_debug(hw, ICE_DBG_SW, "Unknown filter type %d\n", flow);
-       else
-               hw->fdir_fltr_cnt[flow] += incr;
+       } else {
+               if (acl_fltr)
+                       hw->acl_fltr_cnt[flow] += incr;
+               else
+                       hw->fdir_fltr_cnt[flow] += incr;
+       }
 }
 
 /**
index c811f76..ff42d2e 100644 (file)
@@ -204,6 +204,8 @@ struct ice_fdir_fltr {
        u16 cnt_index;
        u8 fdid_prio;
        u32 fltr_id;
+       /* Set to true for an ACL filter */
+       bool acl_fltr;
 };
 
 /* Dummy packet filter definition structure. */
@@ -234,6 +236,7 @@ bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
 struct ice_fdir_fltr *
 ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx);
 void
-ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add);
+ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow,
+                     bool acl_fltr, bool add);
 void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
 #endif /* _ICE_FDIR_H_ */
index 0c64bf6..077325a 100644 (file)
@@ -3528,7 +3528,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
 
                LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
                                         ice_flow_entry, l_entry)
-                       ice_flow_rem_entry(hw, ICE_FLOW_ENTRY_HNDL(e));
+                       ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
+                                          ICE_FLOW_ENTRY_HNDL(e));
 
                LIST_DEL(&p->l_entry);
                if (p->acts)
index 17fd242..f480153 100644 (file)
@@ -1023,6 +1023,126 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
        return status;
 }
 
+/**
+ * ice_flow_sel_acl_scen - returns the specific scenario
+ * @hw: pointer to the hardware structure
+ * @params: information about the flow to be processed
+ *
+ * This function will return the specific scenario based on the
+ * params passed to it
+ */
+static enum ice_status
+ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
+{
+       /* Find the best-fit scenario for the provided match width */
+       struct ice_acl_scen *cand_scen = NULL, *scen;
+
+       if (!hw->acl_tbl)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       /* Loop through each scenario and match against the scenario width
+        * to select the specific scenario
+        */
+       LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
+               if (scen->eff_width >= params->entry_length &&
+                   (!cand_scen || cand_scen->eff_width > scen->eff_width))
+                       cand_scen = scen;
+       if (!cand_scen)
+               return ICE_ERR_DOES_NOT_EXIST;
+
+       params->prof->cfg.scen = cand_scen;
+
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
+ * @params: information about the flow to be processed
+ */
+static enum ice_status
+ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
+{
+       u16 index, i, range_idx = 0;
+
+       index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+
+       for (i = 0; i < params->prof->segs_cnt; i++) {
+               struct ice_flow_seg_info *seg = &params->prof->segs[i];
+               u64 match = seg->match;
+               u8 j;
+
+               for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
+                       struct ice_flow_fld_info *fld;
+                       const u64 bit = BIT_ULL(j);
+
+                       if (!(match & bit))
+                               continue;
+
+                       fld = &seg->fields[j];
+                       fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
+
+                       if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
+                               fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
+
+                               /* Range checking only supported for single
+                                * words
+                                */
+                               if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
+                                                       fld->xtrct.disp,
+                                                       BITS_PER_BYTE * 2) > 1)
+                                       return ICE_ERR_PARAM;
+
+                               /* Ranges must define low and high values */
+                               if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
+                                   fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
+                                       return ICE_ERR_PARAM;
+
+                               fld->entry.val = range_idx++;
+                       } else {
+                               /* Store adjusted byte-length of field for later
+                                * use, taking into account potential
+                                * non-byte-aligned displacement
+                                */
+                               fld->entry.last = DIVIDE_AND_ROUND_UP
+                                       (ice_flds_info[j].size +
+                                        (fld->xtrct.disp % BITS_PER_BYTE),
+                                        BITS_PER_BYTE);
+                               fld->entry.val = index;
+                               index += fld->entry.last;
+                       }
+
+                       match &= ~bit;
+               }
+
+               for (j = 0; j < seg->raws_cnt; j++) {
+                       struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
+
+                       raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
+                       raw->info.entry.val = index;
+                       raw->info.entry.last = raw->info.src.last;
+                       index += raw->info.entry.last;
+               }
+       }
+
+       /* Currently only support using the byte selection base, which only
+        * allows for an effective entry size of 30 bytes. Reject anything
+        * larger.
+        */
+       if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
+               return ICE_ERR_PARAM;
+
+       /* Only 8 range checkers per profile, reject anything trying to use
+        * more
+        */
+       if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
+               return ICE_ERR_PARAM;
+
+       /* Store # bytes required for entry for later use */
+       params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+
+       return ICE_SUCCESS;
+}
+
 /**
  * ice_flow_proc_segs - process all packet segments associated with a profile
  * @hw: pointer to the HW struct
@@ -1048,6 +1168,14 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
                 */
                status = ICE_SUCCESS;
                break;
+       case ICE_BLK_ACL:
+               status = ice_flow_acl_def_entry_frmt(params);
+               if (status)
+                       return status;
+               status = ice_flow_sel_acl_scen(hw, params);
+               if (status)
+                       return status;
+               break;
        case ICE_BLK_FD:
                status = ICE_SUCCESS;
                break;
@@ -1166,6 +1294,11 @@ ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
        if (entry->entry)
                ice_free(hw, entry->entry);
 
+       if (entry->range_buf) {
+               ice_free(hw, entry->range_buf);
+               entry->range_buf = NULL;
+       }
+
        if (entry->acts) {
                ice_free(hw, entry->acts);
                entry->acts = NULL;
@@ -1175,17 +1308,155 @@ ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
        ice_free(hw, entry);
 }
 
+#define ICE_ACL_INVALID_SCEN   0x3f
+
+/**
+ * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @buf: destination buffer function writes partial xtrct sequence to
+ *
+ * returns ICE_SUCCESS if no pf is associated to the given profile
+ * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
+ * returns other error code for real error
+ */
+static enum ice_status
+ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
+                           struct ice_aqc_acl_prof_generic_frmt *buf)
+{
+       enum ice_status status;
+       u8 prof_id = 0;
+
+       status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
+       if (status)
+               return status;
+
+       status = ice_query_acl_prof(hw, prof_id, buf, NULL);
+       if (status)
+               return status;
+
+       /* If all pf's associated scenarios are all 0 or all
+        * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
+        * not been configured yet.
+        */
+       if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
+           buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
+           buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
+           buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
+               return ICE_SUCCESS;
+
+       if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
+           buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
+           buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
+           buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
+           buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
+           buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
+           buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
+           buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
+               return ICE_SUCCESS;
+       else
+               return ICE_ERR_IN_USE;
+}
+
+/**
+ * ice_flow_acl_free_act_cntr - Free the acl rule's actions
+ * @hw: pointer to the hardware structure
+ * @acts: array of actions to be performed on a match
+ * @acts_cnt: number of actions
+ */
+static enum ice_status
+ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
+                          u8 acts_cnt)
+{
+       int i;
+
+       for (i = 0; i < acts_cnt; i++) {
+               if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
+                   acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
+                   acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
+                       struct ice_acl_cntrs cntrs;
+                       enum ice_status status;
+
+                       cntrs.bank = 0; /* Only bank0 for the moment */
+                       cntrs.first_cntr =
+                                       LE16_TO_CPU(acts[i].data.acl_act.value);
+                       cntrs.last_cntr =
+                                       LE16_TO_CPU(acts[i].data.acl_act.value);
+
+                       if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
+                               cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
+                       else
+                               cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
+
+                       status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
+                       if (status)
+                               return status;
+               }
+       }
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ *
+ * Disassociate the scenario to the Profile for the PF of the VSI.
+ */
+static enum ice_status
+ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
+{
+       struct ice_aqc_acl_prof_generic_frmt buf;
+       enum ice_status status = ICE_SUCCESS;
+       u8 prof_id = 0;
+
+       ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
+
+       status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
+       if (status)
+               return status;
+
+       status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
+       if (status)
+               return status;
+
+       /* Clear scenario for this pf */
+       buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
+       status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
+
+       return status;
+}
+
 /**
  * ice_flow_rem_entry_sync - Remove a flow entry
  * @hw: pointer to the HW struct
+ * @blk: classification stage
  * @entry: flow entry to be removed
  */
 static enum ice_status
-ice_flow_rem_entry_sync(struct ice_hw *hw, struct ice_flow_entry *entry)
+ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
+                       struct ice_flow_entry *entry)
 {
        if (!entry)
                return ICE_ERR_BAD_PTR;
 
+       if (blk == ICE_BLK_ACL) {
+               enum ice_status status;
+
+               if (!entry->prof)
+                       return ICE_ERR_BAD_PTR;
+
+               status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
+                                          entry->scen_entry_idx);
+               if (status)
+                       return status;
+
+               /* Checks if we need to release an ACL counter. */
+               if (entry->acts_cnt && entry->acts)
+                       ice_flow_acl_free_act_cntr(hw, entry->acts,
+                                                  entry->acts_cnt);
+       }
+
        LIST_DEL(&entry->l_entry);
 
        ice_dealloc_flow_entry(hw, entry);
@@ -1311,7 +1582,7 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
 
                LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
                                         l_entry) {
-                       status = ice_flow_rem_entry_sync(hw, e);
+                       status = ice_flow_rem_entry_sync(hw, blk, e);
                        if (status)
                                break;
                }
@@ -1319,6 +1590,40 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
                ice_release_lock(&prof->entries_lock);
        }
 
+       if (blk == ICE_BLK_ACL) {
+               struct ice_aqc_acl_profile_ranges query_rng_buf;
+               struct ice_aqc_acl_prof_generic_frmt buf;
+               u8 prof_id = 0;
+
+               /* Deassociate the scenario to the Profile for the PF */
+               status = ice_flow_acl_disassoc_scen(hw, prof);
+               if (status)
+                       return status;
+
+               /* Clear the range-checker if the profile ID is no longer
+                * used by any PF
+                */
+               status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
+               if (status && status != ICE_ERR_IN_USE) {
+                       return status;
+               } else if (!status) {
+                       /* Clear the range-checker value for profile ID */
+                       ice_memset(&query_rng_buf, 0,
+                                  sizeof(struct ice_aqc_acl_profile_ranges),
+                                  ICE_NONDMA_MEM);
+
+                       status = ice_flow_get_hw_prof(hw, blk, prof->id,
+                                                     &prof_id);
+                       if (status)
+                               return status;
+
+                       status = ice_prog_acl_prof_ranges(hw, prof_id,
+                                                         &query_rng_buf, NULL);
+                       if (status)
+                               return status;
+               }
+       }
+
        /* Remove all hardware profiles associated with this flow profile */
        status = ice_rem_prof(hw, blk, prof->id);
        if (!status) {
@@ -1332,6 +1637,99 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
        return status;
 }
 
+/**
+ * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
+ * @buf: Destination buffer function writes partial xtrct sequence to
+ * @info: Info about field
+ */
+static void
+ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
+                              struct ice_flow_fld_info *info)
+{
+       u16 dst, i;
+       u8 src;
+
+       src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
+               info->xtrct.disp / BITS_PER_BYTE;
+       dst = info->entry.val;
+       for (i = 0; i < info->entry.last; i++)
+               /* HW stores field vector words in LE, convert words back to BE
+                * so constructed entries will end up in network order
+                */
+               buf->byte_selection[dst++] = src++ ^ 1;
+}
+
+/**
+ * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ */
+static enum ice_status
+ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
+{
+       struct ice_aqc_acl_prof_generic_frmt buf;
+       struct ice_flow_fld_info *info;
+       enum ice_status status;
+       u8 prof_id = 0;
+       u16 i;
+
+       ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
+
+       status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
+       if (status)
+               return status;
+
+       status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
+       if (status && status != ICE_ERR_IN_USE)
+               return status;
+
+       if (!status) {
+               /* Program the profile dependent configuration. This is done
+                * only once regardless of the number of PFs using that profile
+                */
+               ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
+
+               for (i = 0; i < prof->segs_cnt; i++) {
+                       struct ice_flow_seg_info *seg = &prof->segs[i];
+                       u64 match = seg->match;
+                       u16 j;
+
+                       for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
+                               const u64 bit = BIT_ULL(j);
+
+                               if (!(match & bit))
+                                       continue;
+
+                               info = &seg->fields[j];
+
+                               if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
+                                       buf.word_selection[info->entry.val] =
+                                                               info->xtrct.idx;
+                               else
+                                       ice_flow_acl_set_xtrct_seq_fld(&buf,
+                                                                      info);
+
+                               match &= ~bit;
+                       }
+
+                       for (j = 0; j < seg->raws_cnt; j++) {
+                               info = &seg->raws[j].info;
+                               ice_flow_acl_set_xtrct_seq_fld(&buf, info);
+                       }
+               }
+
+               ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
+                          ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
+                          ICE_NONDMA_MEM);
+       }
+
+       /* Update the current PF */
+       buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
+       status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
+
+       return status;
+}
+
 /**
  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
  * @hw: pointer to the hardware structure
@@ -1377,6 +1775,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
        enum ice_status status = ICE_SUCCESS;
 
        if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
+               if (blk == ICE_BLK_ACL) {
+                       status = ice_flow_acl_set_xtrct_seq(hw, prof);
+                       if (status)
+                               return status;
+               }
                status = ice_add_prof_id_flow(hw, blk,
                                              ice_get_hw_vsi_num(hw,
                                                                 vsi_handle),
@@ -1558,6 +1961,682 @@ u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
        return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
 }
 
+/**
+ * ice_flow_acl_check_actions - Checks the acl rule's actions
+ * @hw: pointer to the hardware structure
+ * @acts: array of actions to be performed on a match
+ * @acts_cnt: number of actions
+ * @cnt_alloc: indicates if a ACL counter has been allocated.
+ */
+static enum ice_status
+ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
+                          u8 acts_cnt, bool *cnt_alloc)
+{
+       ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
+       int i;
+
+       ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
+       *cnt_alloc = false;
+
+       if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
+               return ICE_ERR_OUT_OF_RANGE;
+
+       for (i = 0; i < acts_cnt; i++) {
+               if (acts[i].type != ICE_FLOW_ACT_NOP &&
+                   acts[i].type != ICE_FLOW_ACT_DROP &&
+                   acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
+                   acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
+                       return ICE_ERR_CFG;
+
+               /* If the caller want to add two actions of the same type, then
+                * it is considered invalid configuration.
+                */
+               if (ice_test_and_set_bit(acts[i].type, dup_check))
+                       return ICE_ERR_PARAM;
+       }
+
+       /* Checks if ACL counters are needed. */
+       for (i = 0; i < acts_cnt; i++) {
+               if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
+                   acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
+                   acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
+                       struct ice_acl_cntrs cntrs;
+                       enum ice_status status;
+
+                       cntrs.amount = 1;
+                       cntrs.bank = 0; /* Only bank0 for the moment */
+
+                       if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
+                               cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
+                       else
+                               cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
+
+                       status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
+                       if (status)
+                               return status;
+                       /* Counter index within the bank */
+                       acts[i].data.acl_act.value =
+                                               CPU_TO_LE16(cntrs.first_cntr);
+                       *cnt_alloc = true;
+               }
+       }
+
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
+ * @fld: number of the given field
+ * @info: info about field
+ * @range_buf: range checker configuration buffer
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @range: Input/output param indicating which range checkers are being used
+ */
+static void
+ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
+                             struct ice_aqc_acl_profile_ranges *range_buf,
+                             u8 *data, u8 *range)
+{
+       u16 new_mask;
+
+       /* If not specified, default mask is all bits in field */
+       new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
+                   BIT(ice_flds_info[fld].size) - 1 :
+                   (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
+
+       /* If the mask is 0, then we don't need to worry about this input
+        * range checker value.
+        */
+       if (new_mask) {
+               u16 new_high =
+                       (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
+               u16 new_low =
+                       (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
+               u8 range_idx = info->entry.val;
+
+               range_buf->checker_cfg[range_idx].low_boundary =
+                       CPU_TO_BE16(new_low);
+               range_buf->checker_cfg[range_idx].high_boundary =
+                       CPU_TO_BE16(new_high);
+               range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
+
+               /* Indicate which range checker is being used */
+               *range |= BIT(range_idx);
+       }
+}
+
+/**
+ * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
+ * @fld: number of the given field
+ * @info: info about the field
+ * @buf: buffer containing the entry
+ * @dontcare: buffer containing don't care mask for entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ */
+static void
+ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
+                           u8 *dontcare, u8 *data)
+{
+       u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
+       bool use_mask = false;
+       u8 disp;
+
+       src = info->src.val;
+       mask = info->src.mask;
+       dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+       disp = info->xtrct.disp % BITS_PER_BYTE;
+
+       if (mask != ICE_FLOW_FLD_OFF_INVAL)
+               use_mask = true;
+
+       for (k = 0; k < info->entry.last; k++, dst++) {
+               /* Add overflow bits from previous byte */
+               buf[dst] = (tmp_s & 0xff00) >> 8;
+
+               /* If mask is not valid, tmp_m is always zero, so just setting
+                * dontcare to 0 (no masked bits). If mask is valid, pulls in
+                * overflow bits of mask from prev byte
+                */
+               dontcare[dst] = (tmp_m & 0xff00) >> 8;
+
+               /* If there is displacement, last byte will only contain
+                * displaced data, but there is no more data to read from user
+                * buffer, so skip so as not to potentially read beyond end of
+                * user buffer
+                */
+               if (!disp || k < info->entry.last - 1) {
+                       /* Store shifted data to use in next byte */
+                       tmp_s = data[src++] << disp;
+
+                       /* Add current (shifted) byte */
+                       buf[dst] |= tmp_s & 0xff;
+
+                       /* Handle mask if valid */
+                       if (use_mask) {
+                               tmp_m = (~data[mask++] & 0xff) << disp;
+                               dontcare[dst] |= tmp_m & 0xff;
+                       }
+               }
+       }
+
+       /* Fill in don't care bits at beginning of field */
+       if (disp) {
+               dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+               for (k = 0; k < disp; k++)
+                       dontcare[dst] |= BIT(k);
+       }
+
+       end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
+
+       /* Fill in don't care bits at end of field */
+       if (end_disp) {
+               dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
+                     info->entry.last - 1;
+               for (k = end_disp; k < BITS_PER_BYTE; k++)
+                       dontcare[dst] |= BIT(k);
+       }
+}
+
+/**
+ * ice_flow_acl_frmt_entry - Format acl entry
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @e: pointer to the flow entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @acts: array of actions to be performed on a match
+ * @acts_cnt: number of actions
+ *
+ * Formats the key (and key_inverse) to be matched from the data passed in,
+ * along with data from the flow profile. This key/key_inverse pair makes up
+ * the 'entry' for an acl flow entry.
+ */
+static enum ice_status
+ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
+                       struct ice_flow_entry *e, u8 *data,
+                       struct ice_flow_action *acts, u8 acts_cnt)
+{
+       u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
+       struct ice_aqc_acl_profile_ranges *range_buf = NULL;
+       enum ice_status status;
+       bool cnt_alloc;
+       u8 prof_id = 0;
+       u16 i, buf_sz;
+
+       status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
+       if (status)
+               return status;
+
+       /* Format the result action */
+
+       status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
+       if (status)
+               return status;
+
+       status = ICE_ERR_NO_MEMORY;
+
+       e->acts = (struct ice_flow_action *)
+               ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
+                          ICE_NONDMA_TO_NONDMA);
+
+       if (!e->acts)
+               goto out;
+
+       e->acts_cnt = acts_cnt;
+
+       /* Format the matching data */
+       buf_sz = prof->cfg.scen->width;
+       buf = (u8 *)ice_malloc(hw, buf_sz);
+       if (!buf)
+               goto out;
+
+       dontcare = (u8 *)ice_malloc(hw, buf_sz);
+       if (!dontcare)
+               goto out;
+
+       /* 'key' buffer will store both key and key_inverse, so must be twice
+        * size of buf
+        */
+       key = (u8 *)ice_malloc(hw, buf_sz * 2);
+       if (!key)
+               goto out;
+
+       range_buf = (struct ice_aqc_acl_profile_ranges *)
+               ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
+       if (!range_buf)
+               goto out;
+
+       /* Set don't care mask to all 1's to start, will zero out used bytes */
+       ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
+
+       for (i = 0; i < prof->segs_cnt; i++) {
+               struct ice_flow_seg_info *seg = &prof->segs[i];
+               u64 match = seg->match;
+               u16 j;
+
+               for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
+                       struct ice_flow_fld_info *info;
+                       const u64 bit = BIT_ULL(j);
+
+                       if (!(match & bit))
+                               continue;
+
+                       info = &seg->fields[j];
+
+                       if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
+                               ice_flow_acl_frmt_entry_range(j, info,
+                                                             range_buf, data,
+                                                             &range);
+                       else
+                               ice_flow_acl_frmt_entry_fld(j, info, buf,
+                                                           dontcare, data);
+
+                       match &= ~bit;
+               }
+
+               for (j = 0; j < seg->raws_cnt; j++) {
+                       struct ice_flow_fld_info *info = &seg->raws[j].info;
+                       u16 dst, src, mask, k;
+                       bool use_mask = false;
+
+                       src = info->src.val;
+                       dst = info->entry.val -
+                                       ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+                       mask = info->src.mask;
+
+                       if (mask != ICE_FLOW_FLD_OFF_INVAL)
+                               use_mask = true;
+
+                       for (k = 0; k < info->entry.last; k++, dst++) {
+                               buf[dst] = data[src++];
+                               if (use_mask)
+                                       dontcare[dst] = ~data[mask++];
+                               else
+                                       dontcare[dst] = 0;
+                       }
+               }
+       }
+
+       buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
+       dontcare[prof->cfg.scen->pid_idx] = 0;
+
+       /* Format the buffer for direction flags */
+       dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
+
+       if (prof->dir == ICE_FLOW_RX)
+               buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
+
+       if (range) {
+               buf[prof->cfg.scen->rng_chk_idx] = range;
+               /* Mark any unused range checkers as don't care */
+               dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
+               e->range_buf = range_buf;
+       } else {
+               ice_free(hw, range_buf);
+       }
+
+       status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
+                            buf_sz);
+       if (status)
+               goto out;
+
+       e->entry = key;
+       e->entry_sz = buf_sz * 2;
+
+out:
+       if (buf)
+               ice_free(hw, buf);
+
+       if (dontcare)
+               ice_free(hw, dontcare);
+
+       if (status && key)
+               ice_free(hw, key);
+
+       if (status && range_buf) {
+               ice_free(hw, range_buf);
+               e->range_buf = NULL;
+       }
+
+       if (status && e->acts) {
+               ice_free(hw, e->acts);
+               e->acts = NULL;
+               e->acts_cnt = 0;
+       }
+
+       if (status && cnt_alloc)
+               ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
+
+       return status;
+}
+
+/**
+ * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
+ *                                    the compared data.
+ * @prof: pointer to flow profile
+ * @e: pointer to the comparing flow entry
+ * @do_chg_action: decide if we want to change the ACL action
+ * @do_add_entry: decide if we want to add the new ACL entry
+ * @do_rem_entry: decide if we want to remove the current ACL entry
+ *
+ * Find an ACL scenario entry that matches the compared data. In the same time,
+ * this function also figure out:
+ * a/ If we want to change the ACL action
+ * b/ If we want to add the new ACL entry
+ * c/ If we want to remove the current ACL entry
+ */
+static struct ice_flow_entry *
+ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
+                                 struct ice_flow_entry *e, bool *do_chg_action,
+                                 bool *do_add_entry, bool *do_rem_entry)
+{
+       struct ice_flow_entry *p, *return_entry = NULL;
+       u8 i, j;
+
+       /* Check if:
+        * a/ There exists an entry with same matching data, but different
+        *    priority, then we remove this existing ACL entry. Then, we
+        *    will add the new entry to the ACL scenario.
+        * b/ There exists an entry with same matching data, priority, and
+        *    result action, then we do nothing
+        * c/ There exists an entry with same matching data, priority, but
+        *    different, action, then do only change the action's entry.
+        * d/ Else, we add this new entry to the ACL scenario.
+        */
+       *do_chg_action = false;
+       *do_add_entry = true;
+       *do_rem_entry = false;
+       LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
+               if (memcmp(p->entry, e->entry, p->entry_sz))
+                       continue;
+
+               /* From this point, we have the same matching_data. */
+               *do_add_entry = false;
+               return_entry = p;
+
+               if (p->priority != e->priority) {
+                       /* matching data && !priority */
+                       *do_add_entry = true;
+                       *do_rem_entry = true;
+                       break;
+               }
+
+               /* From this point, we will have matching_data && priority */
+               if (p->acts_cnt != e->acts_cnt)
+                       *do_chg_action = true;
+               for (i = 0; i < p->acts_cnt; i++) {
+                       bool found_not_match = false;
+
+                       for (j = 0; j < e->acts_cnt; j++)
+                               if (memcmp(&p->acts[i], &e->acts[j],
+                                          sizeof(struct ice_flow_action))) {
+                                       found_not_match = true;
+                                       break;
+                               }
+
+                       if (found_not_match) {
+                               *do_chg_action = true;
+                               break;
+                       }
+               }
+
+               /* (do_chg_action = true) means :
+                *    matching_data && priority && !result_action
+                * (do_chg_action = false) means :
+                *    matching_data && priority && result_action
+                */
+               break;
+       }
+
+       return return_entry;
+}
+
+/**
+ * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
+ * @p: flow priority
+ */
+static enum ice_acl_entry_prior
+ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
+{
+       enum ice_acl_entry_prior acl_prior;
+
+       switch (p) {
+       case ICE_FLOW_PRIO_LOW:
+               acl_prior = ICE_LOW;
+               break;
+       case ICE_FLOW_PRIO_NORMAL:
+               acl_prior = ICE_NORMAL;
+               break;
+       case ICE_FLOW_PRIO_HIGH:
+               acl_prior = ICE_HIGH;
+               break;
+       default:
+               acl_prior = ICE_NORMAL;
+               break;
+       }
+
+       return acl_prior;
+}
+
+/**
+ * ice_flow_acl_union_rng_chk - Perform union operation between two
+ *                              range-range checker buffers
+ * @dst_buf: pointer to destination range checker buffer
+ * @src_buf: pointer to source range checker buffer
+ *
+ * For this function, we do the union between dst_buf and src_buf
+ * range checker buffer, and we will save the result back to dst_buf
+ */
+static enum ice_status
+ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
+                          struct ice_aqc_acl_profile_ranges *src_buf)
+{
+       u8 i, j;
+
+       if (!dst_buf || !src_buf)
+               return ICE_ERR_BAD_PTR;
+
+       for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
+               struct ice_acl_rng_data *cfg_data = NULL, *in_data;
+               bool will_populate = false;
+
+               in_data = &src_buf->checker_cfg[i];
+
+               if (!in_data->mask)
+                       break;
+
+               for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
+                       cfg_data = &dst_buf->checker_cfg[j];
+
+                       if (!cfg_data->mask ||
+                           !memcmp(cfg_data, in_data,
+                                   sizeof(struct ice_acl_rng_data))) {
+                               will_populate = true;
+                               break;
+                       }
+               }
+
+               if (will_populate) {
+                       ice_memcpy(cfg_data, in_data,
+                                  sizeof(struct ice_acl_rng_data),
+                                  ICE_NONDMA_TO_NONDMA);
+               } else {
+                       /* No available slot left to program range checker */
+                       return ICE_ERR_MAX_LIMIT;
+               }
+       }
+
+       return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @entry: double pointer to the flow entry
+ *
+ * For this function, we will look at the current added entries in the
+ * corresponding ACL scenario. Then, we will perform matching logic to
+ * see if we want to add/modify/do nothing with this new entry.
+ */
+static enum ice_status
+ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
+                                struct ice_flow_entry **entry)
+{
+       bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
+       struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
+       struct ice_acl_act_entry *acts = NULL;
+       struct ice_flow_entry *exist;
+       enum ice_status status = ICE_SUCCESS;
+       struct ice_flow_entry *e;
+       u8 i;
+
+       if (!entry || !(*entry) || !prof)
+               return ICE_ERR_BAD_PTR;
+
+       e = *(entry);
+
+       do_chg_rng_chk = false;
+       if (e->range_buf) {
+               u8 prof_id = 0;
+
+               status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
+                                             &prof_id);
+               if (status)
+                       return status;
+
+               /* Query the current range-checker value in FW */
+               status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
+                                                  NULL);
+               if (status)
+                       return status;
+               ice_memcpy(&cfg_rng_buf, &query_rng_buf,
+                          sizeof(struct ice_aqc_acl_profile_ranges),
+                          ICE_NONDMA_TO_NONDMA);
+
+               /* Generate the new range-checker value */
+               status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
+               if (status)
+                       return status;
+
+               /* Reconfigure the range check if the buffer is changed. */
+               do_chg_rng_chk = false;
+               if (memcmp(&query_rng_buf, &cfg_rng_buf,
+                          sizeof(struct ice_aqc_acl_profile_ranges))) {
+                       status = ice_prog_acl_prof_ranges(hw, prof_id,
+                                                         &cfg_rng_buf, NULL);
+                       if (status)
+                               return status;
+
+                       do_chg_rng_chk = true;
+               }
+       }
+
+       /* Figure out if we want to (change the ACL action) and/or
+        * (Add the new ACL entry) and/or (Remove the current ACL entry)
+        */
+       exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
+                                                 &do_add_entry, &do_rem_entry);
+
+       if (do_rem_entry) {
+               status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
+               if (status)
+                       return status;
+       }
+
+       /* Prepare the result action buffer */
+       acts = (struct ice_acl_act_entry *)ice_calloc
+               (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
+       for (i = 0; i < e->acts_cnt; i++)
+               ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
+                          sizeof(struct ice_acl_act_entry),
+                          ICE_NONDMA_TO_NONDMA);
+
+       if (do_add_entry) {
+               enum ice_acl_entry_prior prior;
+               u8 *keys, *inverts;
+               u16 entry_idx;
+
+               keys = (u8 *)e->entry;
+               inverts = keys + (e->entry_sz / 2);
+               prior = ice_flow_acl_convert_to_acl_prior(e->priority);
+
+               status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
+                                          inverts, acts, e->acts_cnt,
+                                          &entry_idx);
+               if (status)
+                       goto out;
+
+               e->scen_entry_idx = entry_idx;
+               LIST_ADD(&e->l_entry, &prof->entries);
+       } else {
+               if (do_chg_action) {
+                       /* For the action memory info, update the SW's copy of
+                        * exist entry with e's action memory info
+                        */
+                       ice_free(hw, exist->acts);
+                       exist->acts_cnt = e->acts_cnt;
+                       exist->acts = (struct ice_flow_action *)
+                               ice_calloc(hw, exist->acts_cnt,
+                                          sizeof(struct ice_flow_action));
+
+                       if (!exist->acts) {
+                               status = ICE_ERR_NO_MEMORY;
+                               goto out;
+                       }
+
+                       ice_memcpy(exist->acts, e->acts,
+                                  sizeof(struct ice_flow_action) * e->acts_cnt,
+                                  ICE_NONDMA_TO_NONDMA);
+
+                       status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
+                                                 e->acts_cnt,
+                                                 exist->scen_entry_idx);
+                       if (status)
+                               goto out;
+               }
+
+               if (do_chg_rng_chk) {
+                       /* In this case, we want to update the range checker
+                        * information of the exist entry
+                        */
+                       status = ice_flow_acl_union_rng_chk(exist->range_buf,
+                                                           e->range_buf);
+                       if (status)
+                               goto out;
+               }
+
+               /* As we don't add the new entry to our SW DB, deallocate its
+                * memories, and return the exist entry to the caller
+                */
+               ice_dealloc_flow_entry(hw, e);
+               *(entry) = exist;
+       }
+out:
+       if (acts)
+               ice_free(hw, acts);
+
+       return status;
+}
+
+/**
+ * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @e: double pointer to the flow entry
+ */
+static enum ice_status
+ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
+                           struct ice_flow_entry **e)
+{
+       enum ice_status status;
+
+       ice_acquire_lock(&prof->entries_lock);
+       status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
+       ice_release_lock(&prof->entries_lock);
+
+       return status;
+}
+
 /**
  * ice_flow_add_entry - Add a flow entry
  * @hw: pointer to the HW struct
@@ -1581,7 +2660,8 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
        struct ice_flow_entry *e = NULL;
        enum ice_status status = ICE_SUCCESS;
 
-       if (acts_cnt && !acts)
+       /* ACL entries must indicate an action */
+       if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
                return ICE_ERR_PARAM;
 
        /* No flow entry data is expected for RSS */
@@ -1619,6 +2699,18 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
        switch (blk) {
        case ICE_BLK_RSS:
                /* RSS will add only one entry per VSI per profile */
+               break;
+       case ICE_BLK_ACL:
+               /* ACL will handle the entry management */
+               status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
+                                                acts_cnt);
+               if (status)
+                       goto out;
+
+               status = ice_flow_acl_add_scen_entry(hw, prof, &e);
+               if (status)
+                       goto out;
+
                break;
        case ICE_BLK_FD:
                break;
@@ -1651,13 +2743,15 @@ out:
 /**
  * ice_flow_rem_entry - Remove a flow entry
  * @hw: pointer to the HW struct
+ * @blk: classification stage
  * @entry_h: handle to the flow entry to be removed
  */
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h)
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
+                                  u64 entry_h)
 {
        struct ice_flow_entry *entry;
        struct ice_flow_prof *prof;
-       enum ice_status status;
+       enum ice_status status = ICE_SUCCESS;
 
        if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
                return ICE_ERR_PARAM;
@@ -1667,9 +2761,11 @@ enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h)
        /* Retain the pointer to the flow profile as the entry will be freed */
        prof = entry->prof;
 
-       ice_acquire_lock(&prof->entries_lock);
-       status = ice_flow_rem_entry_sync(hw, entry);
-       ice_release_lock(&prof->entries_lock);
+       if (prof) {
+               ice_acquire_lock(&prof->entries_lock);
+               status = ice_flow_rem_entry_sync(hw, blk, entry);
+               ice_release_lock(&prof->entries_lock);
+       }
 
        return status;
 }
index 4c2067f..ec50b85 100644 (file)
@@ -6,6 +6,7 @@
 #define _ICE_FLOW_H_
 
 #include "ice_flex_type.h"
+#include "ice_acl.h"
 #define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix)))
 #define ICE_FLOW_PROF_ID_INVAL         0xfffffffffffffffful
 #define ICE_FLOW_PROF_ID_BYPASS                0
@@ -308,9 +309,14 @@ struct ice_flow_entry {
        struct ice_flow_action *acts;
        /* Flow entry's content */
        void *entry;
+       /* Range buffer (For ACL only) */
+       struct ice_aqc_acl_profile_ranges *range_buf;
        enum ice_flow_priority priority;
        u16 vsi_handle;
        u16 entry_sz;
+       /* Entry index in the ACL's scenario */
+       u16 scen_entry_idx;
+#define ICE_FLOW_ACL_MAX_NUM_ACT       2
        u8 acts_cnt;
 };
 
@@ -336,6 +342,7 @@ struct ice_flow_prof {
 
        union {
                /* struct sw_recipe */
+               struct ice_acl_scen *scen;
                /* struct fd */
                u32 data;
                /* Symmetric Hash for RSS */
@@ -381,6 +388,7 @@ enum ice_flow_action_type {
 struct ice_flow_action {
        enum ice_flow_action_type type;
        union {
+               struct ice_acl_act_entry acl_act;
                u32 dummy;
        } data;
 };
@@ -408,7 +416,8 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
                   u64 entry_id, u16 vsi, enum ice_flow_priority prio,
                   void *data, struct ice_flow_action *acts, u8 acts_cnt,
                   u64 *entry_h);
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
+                                  u64 entry_h);
 void
 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
                 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
index 89d4764..a89c657 100644 (file)
@@ -99,6 +99,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
 #define ICE_HI_DWORD(x)                ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
 #define ICE_LO_DWORD(x)                ((u32)((x) & 0xFFFFFFFF))
 #define ICE_HI_WORD(x)         ((u16)(((x) >> 16) & 0xFFFF))
+#define ICE_LO_WORD(x)         ((u16)((x) & 0xFFFF))
 
 /* debug masks - set these bits in hw->debug_mask to control output */
 #define ICE_DBG_TRACE          BIT_ULL(0) /* for function-trace only */
@@ -119,6 +120,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
 
 #define ICE_DBG_PKG            BIT_ULL(16)
 #define ICE_DBG_RES            BIT_ULL(17)
+#define ICE_DBG_ACL            BIT_ULL(18)
 #define ICE_DBG_AQ_MSG         BIT_ULL(24)
 #define ICE_DBG_AQ_DESC                BIT_ULL(25)
 #define ICE_DBG_AQ_DESC_BUF    BIT_ULL(26)
@@ -389,6 +391,8 @@ struct ice_hw_common_caps {
        u8 apm_wol_support;
        u8 acpi_prog_mthd;
        u8 proxy_support;
+       bool nvm_unified_update;
+#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT       BIT(3)
 };
 
 /* Function specific capabilities */
@@ -879,6 +883,9 @@ struct ice_hw {
        /* tunneling info */
        struct ice_tunnel_table tnl;
 
+       struct ice_acl_tbl *acl_tbl;
+       struct ice_fd_hw_prof **acl_prof;
+       u16 acl_fltr_cnt[ICE_FLTR_PTYPE_MAX];
        /* HW block tables */
        struct ice_blk_info blk[ICE_BLK_COUNT];
        struct ice_lock fl_profs_locks[ICE_BLK_COUNT];  /* lock fltr profiles */
index eff1555..100630c 100644 (file)
@@ -11,6 +11,8 @@ sources = [
        'ice_flow.c',
        'ice_dcb.c',
        'ice_fdir.c',
+       'ice_acl.c',
+       'ice_acl_ctrl.c',
 ]
 
 error_cflags = ['-Wno-unused-value',
index 6342b56..a082a13 100644 (file)
@@ -584,7 +584,7 @@ ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
                                                     hw_prof->vsi_h[i]);
                        ice_rem_prof_id_flow(hw, ICE_BLK_FD,
                                             vsi_num, ptype);
-                       ice_flow_rem_entry(hw,
+                       ice_flow_rem_entry(hw, ICE_BLK_FD,
                                           hw_prof->entry_h[i][is_tunnel]);
                        hw_prof->entry_h[i][is_tunnel] = 0;
                }
@@ -876,7 +876,7 @@ ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
 err_add_entry:
        vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
        ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
-       ice_flow_rem_entry(hw, entry_1);
+       ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
 err_add_prof:
        ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);