+/**
+ * ice_flow_acl_check_actions - Checks the ACL rule's actions
+ * @hw: pointer to the hardware structure
+ * @acts: array of actions to be performed on a match
+ * @acts_cnt: number of actions
+ * @cnt_alloc: indicates if an ACL counter has been allocated.
+ */
+static enum ice_status
+ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
+ u8 acts_cnt, bool *cnt_alloc)
+{
+ ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
+ int i;
+
+ ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
+ *cnt_alloc = false;
+
+ if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ for (i = 0; i < acts_cnt; i++) {
+ if (acts[i].type != ICE_FLOW_ACT_NOP &&
+ acts[i].type != ICE_FLOW_ACT_DROP &&
+ acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
+ acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
+ return ICE_ERR_CFG;
+
+ /* If the caller want to add two actions of the same type, then
+ * it is considered invalid configuration.
+ */
+ if (ice_test_and_set_bit(acts[i].type, dup_check))
+ return ICE_ERR_PARAM;
+ }
+
+ /* Checks if ACL counters are needed. */
+ for (i = 0; i < acts_cnt; i++) {
+ if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
+ acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
+ acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
+ struct ice_acl_cntrs cntrs;
+ enum ice_status status;
+
+ cntrs.amount = 1;
+ cntrs.bank = 0; /* Only bank0 for the moment */
+
+ if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
+ cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
+ else
+ cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
+
+ status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
+ if (status)
+ return status;
+ /* Counter index within the bank */
+ acts[i].data.acl_act.value =
+ CPU_TO_LE16(cntrs.first_cntr);
+ *cnt_alloc = true;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
+ * @fld: number of the given field
+ * @info: info about field
+ * @range_buf: range checker configuration buffer
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @range: Input/output param indicating which range checkers are being used
+ */
+static void
+ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
+ struct ice_aqc_acl_profile_ranges *range_buf,
+ u8 *data, u8 *range)
+{
+ u16 new_mask;
+
+ /* If not specified, default mask is all bits in field */
+ new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
+ BIT(ice_flds_info[fld].size) - 1 :
+ (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
+
+ /* If the mask is 0, then we don't need to worry about this input
+ * range checker value.
+ */
+ if (new_mask) {
+ u16 new_high =
+ (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
+ u16 new_low =
+ (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
+ u8 range_idx = info->entry.val;
+
+ range_buf->checker_cfg[range_idx].low_boundary =
+ CPU_TO_BE16(new_low);
+ range_buf->checker_cfg[range_idx].high_boundary =
+ CPU_TO_BE16(new_high);
+ range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
+
+ /* Indicate which range checker is being used */
+ *range |= BIT(range_idx);
+ }
+}
+
+/**
+ * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
+ * @fld: number of the given field
+ * @info: info about the field
+ * @buf: buffer containing the entry
+ * @dontcare: buffer containing don't care mask for entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ */
+static void
+ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
+ u8 *dontcare, u8 *data)
+{
+ u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
+ bool use_mask = false;
+ u8 disp;
+
+ src = info->src.val;
+ mask = info->src.mask;
+ dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+ disp = info->xtrct.disp % BITS_PER_BYTE;
+
+ if (mask != ICE_FLOW_FLD_OFF_INVAL)
+ use_mask = true;
+
+ for (k = 0; k < info->entry.last; k++, dst++) {
+ /* Add overflow bits from previous byte */
+ buf[dst] = (tmp_s & 0xff00) >> 8;
+
+ /* If mask is not valid, tmp_m is always zero, so just setting
+ * dontcare to 0 (no masked bits). If mask is valid, pulls in
+ * overflow bits of mask from prev byte
+ */
+ dontcare[dst] = (tmp_m & 0xff00) >> 8;
+
+ /* If there is displacement, last byte will only contain
+ * displaced data, but there is no more data to read from user
+ * buffer, so skip so as not to potentially read beyond end of
+ * user buffer
+ */
+ if (!disp || k < info->entry.last - 1) {
+ /* Store shifted data to use in next byte */
+ tmp_s = data[src++] << disp;
+
+ /* Add current (shifted) byte */
+ buf[dst] |= tmp_s & 0xff;
+
+ /* Handle mask if valid */
+ if (use_mask) {
+ tmp_m = (~data[mask++] & 0xff) << disp;
+ dontcare[dst] |= tmp_m & 0xff;
+ }
+ }
+ }
+
+ /* Fill in don't care bits at beginning of field */
+ if (disp) {
+ dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+ for (k = 0; k < disp; k++)
+ dontcare[dst] |= BIT(k);
+ }
+
+ end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
+
+ /* Fill in don't care bits at end of field */
+ if (end_disp) {
+ dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
+ info->entry.last - 1;
+ for (k = end_disp; k < BITS_PER_BYTE; k++)
+ dontcare[dst] |= BIT(k);
+ }
+}
+
+/**
+ * ice_flow_acl_frmt_entry - Format ACL entry
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @e: pointer to the flow entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @acts: array of actions to be performed on a match
+ * @acts_cnt: number of actions
+ *
+ * Formats the key (and key_inverse) to be matched from the data passed in,
+ * along with data from the flow profile. This key/key_inverse pair makes up
+ * the 'entry' for an ACL flow entry.
+ */
+static enum ice_status
+ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
+ struct ice_flow_entry *e, u8 *data,
+ struct ice_flow_action *acts, u8 acts_cnt)
+{
+ u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
+ struct ice_aqc_acl_profile_ranges *range_buf = NULL;
+ enum ice_status status;
+ bool cnt_alloc;
+ u8 prof_id = 0;
+ u16 i, buf_sz;
+
+ status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
+ if (status)
+ return status;
+
+ /* Format the result action */
+
+ status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
+ if (status)
+ return status;
+
+ status = ICE_ERR_NO_MEMORY;
+
+ e->acts = (struct ice_flow_action *)
+ ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
+ ICE_NONDMA_TO_NONDMA);
+
+ if (!e->acts)
+ goto out;
+
+ e->acts_cnt = acts_cnt;
+
+ /* Format the matching data */
+ buf_sz = prof->cfg.scen->width;
+ buf = (u8 *)ice_malloc(hw, buf_sz);
+ if (!buf)
+ goto out;
+
+ dontcare = (u8 *)ice_malloc(hw, buf_sz);
+ if (!dontcare)
+ goto out;
+
+ /* 'key' buffer will store both key and key_inverse, so must be twice
+ * size of buf
+ */
+ key = (u8 *)ice_malloc(hw, buf_sz * 2);
+ if (!key)
+ goto out;
+
+ range_buf = (struct ice_aqc_acl_profile_ranges *)
+ ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
+ if (!range_buf)
+ goto out;
+
+ /* Set don't care mask to all 1's to start, will zero out used bytes */
+ ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
+
+ for (i = 0; i < prof->segs_cnt; i++) {
+ struct ice_flow_seg_info *seg = &prof->segs[i];
+ u64 match = seg->match;
+ u16 j;
+
+ for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
+ struct ice_flow_fld_info *info;
+ const u64 bit = BIT_ULL(j);
+
+ if (!(match & bit))
+ continue;
+
+ info = &seg->fields[j];
+
+ if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
+ ice_flow_acl_frmt_entry_range(j, info,
+ range_buf, data,
+ &range);
+ else
+ ice_flow_acl_frmt_entry_fld(j, info, buf,
+ dontcare, data);
+
+ match &= ~bit;
+ }
+
+ for (j = 0; j < seg->raws_cnt; j++) {
+ struct ice_flow_fld_info *info = &seg->raws[j].info;
+ u16 dst, src, mask, k;
+ bool use_mask = false;
+
+ src = info->src.val;
+ dst = info->entry.val -
+ ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+ mask = info->src.mask;
+
+ if (mask != ICE_FLOW_FLD_OFF_INVAL)
+ use_mask = true;
+
+ for (k = 0; k < info->entry.last; k++, dst++) {
+ buf[dst] = data[src++];
+ if (use_mask)
+ dontcare[dst] = ~data[mask++];
+ else
+ dontcare[dst] = 0;
+ }
+ }
+ }
+
+ buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
+ dontcare[prof->cfg.scen->pid_idx] = 0;
+
+ /* Format the buffer for direction flags */
+ dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
+
+ if (prof->dir == ICE_FLOW_RX)
+ buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
+
+ if (range) {
+ buf[prof->cfg.scen->rng_chk_idx] = range;
+ /* Mark any unused range checkers as don't care */
+ dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
+ e->range_buf = range_buf;
+ } else {
+ ice_free(hw, range_buf);
+ }
+
+ status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
+ buf_sz);
+ if (status)
+ goto out;
+
+ e->entry = key;
+ e->entry_sz = buf_sz * 2;
+
+out:
+ if (buf)
+ ice_free(hw, buf);
+
+ if (dontcare)
+ ice_free(hw, dontcare);
+
+ if (status && key)
+ ice_free(hw, key);
+
+ if (status && range_buf) {
+ ice_free(hw, range_buf);
+ e->range_buf = NULL;
+ }
+
+ if (status && e->acts) {
+ ice_free(hw, e->acts);
+ e->acts = NULL;
+ e->acts_cnt = 0;
+ }
+
+ if (status && cnt_alloc)
+ ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
+
+ return status;
+}
+
+/**
+ * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
+ * the compared data.
+ * @prof: pointer to flow profile
+ * @e: pointer to the comparing flow entry
+ * @do_chg_action: decide if we want to change the ACL action
+ * @do_add_entry: decide if we want to add the new ACL entry
+ * @do_rem_entry: decide if we want to remove the current ACL entry
+ *
+ * Find an ACL scenario entry that matches the compared data. In the same time,
+ * this function also figure out:
+ * a/ If we want to change the ACL action
+ * b/ If we want to add the new ACL entry
+ * c/ If we want to remove the current ACL entry
+ */
+static struct ice_flow_entry *
+ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
+ struct ice_flow_entry *e, bool *do_chg_action,
+ bool *do_add_entry, bool *do_rem_entry)
+{
+ struct ice_flow_entry *p, *return_entry = NULL;
+ u8 i, j;
+
+ /* Check if:
+ * a/ There exists an entry with same matching data, but different
+ * priority, then we remove this existing ACL entry. Then, we
+ * will add the new entry to the ACL scenario.
+ * b/ There exists an entry with same matching data, priority, and
+ * result action, then we do nothing
+ * c/ There exists an entry with same matching data, priority, but
+ * different, action, then do only change the action's entry.
+ * d/ Else, we add this new entry to the ACL scenario.
+ */
+ *do_chg_action = false;
+ *do_add_entry = true;
+ *do_rem_entry = false;
+ LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
+ if (memcmp(p->entry, e->entry, p->entry_sz))
+ continue;
+
+ /* From this point, we have the same matching_data. */
+ *do_add_entry = false;
+ return_entry = p;
+
+ if (p->priority != e->priority) {
+ /* matching data && !priority */
+ *do_add_entry = true;
+ *do_rem_entry = true;
+ break;
+ }
+
+ /* From this point, we will have matching_data && priority */
+ if (p->acts_cnt != e->acts_cnt)
+ *do_chg_action = true;
+ for (i = 0; i < p->acts_cnt; i++) {
+ bool found_not_match = false;
+
+ for (j = 0; j < e->acts_cnt; j++)
+ if (memcmp(&p->acts[i], &e->acts[j],
+ sizeof(struct ice_flow_action))) {
+ found_not_match = true;
+ break;
+ }
+
+ if (found_not_match) {
+ *do_chg_action = true;
+ break;
+ }
+ }
+
+ /* (do_chg_action = true) means :
+ * matching_data && priority && !result_action
+ * (do_chg_action = false) means :
+ * matching_data && priority && result_action
+ */
+ break;
+ }
+
+ return return_entry;
+}
+
+/**
+ * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
+ * @p: flow priority
+ */
+static enum ice_acl_entry_prior
+ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
+{
+ enum ice_acl_entry_prior acl_prior;
+
+ switch (p) {
+ case ICE_FLOW_PRIO_LOW:
+ acl_prior = ICE_LOW;
+ break;
+ case ICE_FLOW_PRIO_NORMAL:
+ acl_prior = ICE_NORMAL;
+ break;
+ case ICE_FLOW_PRIO_HIGH:
+ acl_prior = ICE_HIGH;
+ break;
+ default:
+ acl_prior = ICE_NORMAL;
+ break;
+ }
+
+ return acl_prior;
+}
+
+/**
+ * ice_flow_acl_union_rng_chk - Perform union operation between two
+ * range-range checker buffers
+ * @dst_buf: pointer to destination range checker buffer
+ * @src_buf: pointer to source range checker buffer
+ *
+ * For this function, we do the union between dst_buf and src_buf
+ * range checker buffer, and we will save the result back to dst_buf
+ */
+static enum ice_status
+ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
+ struct ice_aqc_acl_profile_ranges *src_buf)
+{
+ u8 i, j;
+
+ if (!dst_buf || !src_buf)
+ return ICE_ERR_BAD_PTR;
+
+ for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
+ struct ice_acl_rng_data *cfg_data = NULL, *in_data;
+ bool will_populate = false;
+
+ in_data = &src_buf->checker_cfg[i];
+
+ if (!in_data->mask)
+ break;
+
+ for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
+ cfg_data = &dst_buf->checker_cfg[j];
+
+ if (!cfg_data->mask ||
+ !memcmp(cfg_data, in_data,
+ sizeof(struct ice_acl_rng_data))) {
+ will_populate = true;
+ break;
+ }
+ }
+
+ if (will_populate) {
+ ice_memcpy(cfg_data, in_data,
+ sizeof(struct ice_acl_rng_data),
+ ICE_NONDMA_TO_NONDMA);
+ } else {
+ /* No available slot left to program range checker */
+ return ICE_ERR_MAX_LIMIT;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @entry: double pointer to the flow entry
+ *
+ * For this function, we will look at the current added entries in the
+ * corresponding ACL scenario. Then, we will perform matching logic to
+ * see if we want to add/modify/do nothing with this new entry.
+ */
+static enum ice_status
+ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
+ struct ice_flow_entry **entry)
+{
+ bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
+ struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
+ struct ice_acl_act_entry *acts = NULL;
+ struct ice_flow_entry *exist;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_flow_entry *e;
+ u8 i;
+
+ if (!entry || !(*entry) || !prof)
+ return ICE_ERR_BAD_PTR;
+
+ e = *(entry);
+
+ do_chg_rng_chk = false;
+ if (e->range_buf) {
+ u8 prof_id = 0;
+
+ status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
+ &prof_id);
+ if (status)
+ return status;
+
+ /* Query the current range-checker value in FW */
+ status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
+ NULL);
+ if (status)
+ return status;
+ ice_memcpy(&cfg_rng_buf, &query_rng_buf,
+ sizeof(struct ice_aqc_acl_profile_ranges),
+ ICE_NONDMA_TO_NONDMA);
+
+ /* Generate the new range-checker value */
+ status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
+ if (status)
+ return status;
+
+ /* Reconfigure the range check if the buffer is changed. */
+ do_chg_rng_chk = false;
+ if (memcmp(&query_rng_buf, &cfg_rng_buf,
+ sizeof(struct ice_aqc_acl_profile_ranges))) {
+ status = ice_prog_acl_prof_ranges(hw, prof_id,
+ &cfg_rng_buf, NULL);
+ if (status)
+ return status;
+
+ do_chg_rng_chk = true;
+ }
+ }
+
+ /* Figure out if we want to (change the ACL action) and/or
+ * (Add the new ACL entry) and/or (Remove the current ACL entry)
+ */
+ exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
+ &do_add_entry, &do_rem_entry);
+
+ if (do_rem_entry) {
+ status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
+ if (status)
+ return status;
+ }
+
+ /* Prepare the result action buffer */
+ acts = (struct ice_acl_act_entry *)ice_calloc
+ (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
+ for (i = 0; i < e->acts_cnt; i++)
+ ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
+ sizeof(struct ice_acl_act_entry),
+ ICE_NONDMA_TO_NONDMA);
+
+ if (do_add_entry) {
+ enum ice_acl_entry_prior prior;
+ u8 *keys, *inverts;
+ u16 entry_idx;
+
+ keys = (u8 *)e->entry;
+ inverts = keys + (e->entry_sz / 2);
+ prior = ice_flow_acl_convert_to_acl_prior(e->priority);
+
+ status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
+ inverts, acts, e->acts_cnt,
+ &entry_idx);
+ if (status)
+ goto out;
+
+ e->scen_entry_idx = entry_idx;
+ LIST_ADD(&e->l_entry, &prof->entries);
+ } else {
+ if (do_chg_action) {
+ /* For the action memory info, update the SW's copy of
+ * exist entry with e's action memory info
+ */
+ ice_free(hw, exist->acts);
+ exist->acts_cnt = e->acts_cnt;
+ exist->acts = (struct ice_flow_action *)
+ ice_calloc(hw, exist->acts_cnt,
+ sizeof(struct ice_flow_action));
+
+ if (!exist->acts) {
+ status = ICE_ERR_NO_MEMORY;
+ goto out;
+ }
+
+ ice_memcpy(exist->acts, e->acts,
+ sizeof(struct ice_flow_action) * e->acts_cnt,
+ ICE_NONDMA_TO_NONDMA);
+
+ status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
+ e->acts_cnt,
+ exist->scen_entry_idx);
+ if (status)
+ goto out;
+ }
+
+ if (do_chg_rng_chk) {
+ /* In this case, we want to update the range checker
+ * information of the exist entry
+ */
+ status = ice_flow_acl_union_rng_chk(exist->range_buf,
+ e->range_buf);
+ if (status)
+ goto out;
+ }
+
+ /* As we don't add the new entry to our SW DB, deallocate its
+ * memories, and return the exist entry to the caller
+ */
+ ice_dealloc_flow_entry(hw, e);
+ *(entry) = exist;
+ }
+out:
+ if (acts)
+ ice_free(hw, acts);
+
+ return status;
+}
+
+/**
+ * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @e: double pointer to the flow entry
+ */
+static enum ice_status
+ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
+ struct ice_flow_entry **e)
+{
+ enum ice_status status;
+
+ ice_acquire_lock(&prof->entries_lock);
+ status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
+ ice_release_lock(&prof->entries_lock);
+
+ return status;
+}
+