1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
8 /* Determine the TCAM index of entry 'e' within the ACL table */
9 #define ICE_ACL_TBL_TCAM_IDX(e) ((e) / ICE_AQC_ACL_TCAM_DEPTH)
11 /* Determine the entry index within the TCAM */
12 #define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((e) % ICE_AQC_ACL_TCAM_DEPTH)
14 #define ICE_ACL_SCEN_ENTRY_INVAL 0xFFFF
17 * @scen: pointer to the scenario struct
19 * Initialize the scenario control structure.
21 static void ice_acl_init_entry(struct ice_acl_scen *scen)
24 * low priority: start from the highest index, 25% of total entries
25 * normal priority: start from the highest index, 50% of total entries
26 * high priority: start from the lowest index, 25% of total entries
28 scen->first_idx[ICE_LOW] = scen->num_entry - 1;
29 scen->first_idx[ICE_NORMAL] = scen->num_entry - scen->num_entry / 4 - 1;
30 scen->first_idx[ICE_HIGH] = 0;
32 scen->last_idx[ICE_LOW] = scen->num_entry - scen->num_entry / 4;
33 scen->last_idx[ICE_NORMAL] = scen->num_entry / 4;
34 scen->last_idx[ICE_HIGH] = scen->num_entry / 4 - 1;
38 * ice_acl_scen_assign_entry_idx
39 * @scen: pointer to the scenario struct
40 * @prior: the priority of the flow entry being allocated
42 * To find the index of an available entry in scenario
44 * Returns ICE_ACL_SCEN_ENTRY_INVAL if fails
45 * Returns index on success
47 static u16 ice_acl_scen_assign_entry_idx(struct ice_acl_scen *scen,
48 enum ice_acl_entry_prior prior)
50 u16 first_idx, last_idx, i;
53 if (prior >= ICE_MAX_PRIOR)
54 return ICE_ACL_SCEN_ENTRY_INVAL;
56 first_idx = scen->first_idx[prior];
57 last_idx = scen->last_idx[prior];
58 step = first_idx <= last_idx ? 1 : -1;
60 for (i = first_idx; i != last_idx + step; i += step)
61 if (!ice_test_and_set_bit(i, scen->entry_bitmap))
64 return ICE_ACL_SCEN_ENTRY_INVAL;
68 * ice_acl_scen_free_entry_idx
69 * @scen: pointer to the scenario struct
70 * @idx: the index of the flow entry being de-allocated
72 * To mark an entry available in scenario
74 static enum ice_status
75 ice_acl_scen_free_entry_idx(struct ice_acl_scen *scen, u16 idx)
77 if (idx >= scen->num_entry)
78 return ICE_ERR_MAX_LIMIT;
80 if (!ice_test_and_clear_bit(idx, scen->entry_bitmap))
81 return ICE_ERR_DOES_NOT_EXIST;
87 * ice_acl_tbl_calc_end_idx
88 * @start: start index of the TCAM entry of this partition
89 * @num_entries: number of entries in this partition
90 * @width: width of a partition in number of TCAMs
92 * Calculate the end entry index for a partition with starting entry index
93 * 'start', entries 'num_entries', and width 'width'.
95 static u16 ice_acl_tbl_calc_end_idx(u16 start, u16 num_entries, u16 width)
97 u16 end_idx, add_entries = 0;
99 end_idx = start + (num_entries - 1);
101 /* In case that our ACL partition requires cascading TCAMs */
105 /* Figure out the TCAM stacked level in this ACL scenario */
106 num_stack_level = (start % ICE_AQC_ACL_TCAM_DEPTH) +
108 num_stack_level = DIVIDE_AND_ROUND_UP(num_stack_level,
109 ICE_AQC_ACL_TCAM_DEPTH);
111 /* In this case, each entries in our ACL partition span
112 * multiple TCAMs. Thus, we will need to add
113 * ((width - 1) * num_stack_level) TCAM's entries to
116 * For example : In our case, our scenario is 2x2:
119 * Assuming that a TCAM will have 512 entries. If "start"
120 * is 500, "num_entries" is 3 and "width" = 2, then end_idx
121 * should be 1024 (belongs to TCAM 2).
122 * Before going to this if statement, end_idx will have the
123 * value of 512. If "width" is 1, then the final value of
124 * end_idx is 512. However, in our case, width is 2, then we
125 * will need add (2 - 1) * 1 * 512. As result, end_idx will
126 * have the value of 1024.
128 add_entries = (width - 1) * num_stack_level *
129 ICE_AQC_ACL_TCAM_DEPTH;
132 return end_idx + add_entries;
137 * @hw: pointer to the hardware structure
139 * Initialize the ACL table by invalidating TCAM entries and action pairs.
141 static enum ice_status ice_acl_init_tbl(struct ice_hw *hw)
143 struct ice_aqc_actpair act_buf;
144 struct ice_aqc_acl_data buf;
145 enum ice_status status = ICE_SUCCESS;
146 struct ice_acl_tbl *tbl;
152 status = ICE_ERR_CFG;
156 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
157 ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
159 tcam_idx = tbl->first_tcam;
160 idx = tbl->first_entry;
161 while (tcam_idx < tbl->last_tcam ||
162 (tcam_idx == tbl->last_tcam && idx <= tbl->last_entry)) {
163 /* Use the same value for entry_key and entry_key_inv since
164 * we are initializing the fields to 0
166 status = ice_aq_program_acl_entry(hw, tcam_idx, idx, &buf,
171 if (++idx > tbl->last_entry) {
173 idx = tbl->first_entry;
177 for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++) {
178 u16 act_entry_idx, start, end;
180 if (tbl->act_mems[i].act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL)
183 start = tbl->first_entry;
184 end = tbl->last_entry;
186 for (act_entry_idx = start; act_entry_idx <= end;
188 /* Invalidate all allocated action pairs */
189 status = ice_aq_program_actpair(hw, i, act_entry_idx,
200 * ice_acl_assign_act_mems_to_tcam
201 * @tbl: pointer to acl table structure
202 * @cur_tcam: Index of current TCAM. Value = 0 to (ICE_AQC_ACL_SLICES - 1)
203 * @cur_mem_idx: Index of current action memory bank. Value = 0 to
204 * (ICE_AQC_MAX_ACTION_MEMORIES - 1)
205 * @num_mem: Number of action memory banks for this TCAM
207 * Assign "num_mem" valid action memory banks from "curr_mem_idx" to
211 ice_acl_assign_act_mems_to_tcam(struct ice_acl_tbl *tbl, u8 cur_tcam,
212 u8 *cur_mem_idx, u8 num_mem)
217 *cur_mem_idx < ICE_AQC_MAX_ACTION_MEMORIES && mem_cnt < num_mem;
219 struct ice_acl_act_mem *p_mem = &tbl->act_mems[*cur_mem_idx];
221 if (p_mem->act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL)
224 p_mem->member_of_tcam = cur_tcam;
231 * ice_acl_divide_act_mems_to_tcams
232 * @tbl: pointer to acl table structure
234 * Figure out how to divide given action memory banks to given TCAMs. This
235 * division is for SW book keeping. In the time when scenario is created,
236 * an action memory bank can be used for different TCAM.
238 * For example, given that we have 2x2 ACL table with each table entry has
239 * 2 action memory pairs. As the result, we will have 4 TCAMs (T1,T2,T3,T4)
240 * and 4 action memory banks (A1,A2,A3,A4)
241 * [T1 - T2] { A1 - A2 }
242 * [T3 - T4] { A3 - A4 }
243 * In the time when we need to create a scenario, for example, 2x1 scenario,
244 * we will use [T3,T4] in a cascaded layout. As it is a requirement that all
245 * action memory banks in a cascaded TCAM's row will need to associate with
246 * the last TCAM. Thus, we will associate action memory banks [A3] and [A4]
248 * For SW book-keeping purpose, we will keep theoretical maps between TCAM
249 * [Tn] to action memory bank [An].
251 static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl)
253 u16 num_cscd, stack_level, stack_idx, min_act_mem;
254 u8 tcam_idx = tbl->first_tcam;
255 u16 max_idx_to_get_extra;
258 /* Determine number of stacked TCAMs */
259 stack_level = DIVIDE_AND_ROUND_UP(tbl->info.depth,
260 ICE_AQC_ACL_TCAM_DEPTH);
262 /* Determine number of cascaded TCAMs */
263 num_cscd = DIVIDE_AND_ROUND_UP(tbl->info.width,
264 ICE_AQC_ACL_KEY_WIDTH_BYTES);
266 /* In a line of cascaded TCAM, given the number of action memory
267 * banks per ACL table entry, we want to fairly divide these action
268 * memory banks between these TCAMs.
270 * For example, there are 3 TCAMs (TCAM 3,4,5) in a line of
271 * cascaded TCAM, and there are 7 act_mems for each ACL table entry.
273 * [TCAM_3 will have 3 act_mems]
274 * [TCAM_4 will have 2 act_mems]
275 * [TCAM_5 will have 2 act_mems]
277 min_act_mem = tbl->info.entry_act_pairs / num_cscd;
278 max_idx_to_get_extra = tbl->info.entry_act_pairs % num_cscd;
280 for (stack_idx = 0; stack_idx < stack_level; stack_idx++) {
283 for (i = 0; i < num_cscd; i++) {
284 u8 total_act_mem = min_act_mem;
286 if (i < max_idx_to_get_extra)
289 ice_acl_assign_act_mems_to_tcam(tbl, tcam_idx,
300 * @hw: pointer to the HW struct
301 * @params: parameters for the table to be created
303 * Create a LEM table for ACL usage. We are currently starting with some fixed
304 * values for the size of the table, but this will need to grow as more flow
305 * entries are added by the user level.
308 ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params)
310 u16 width, depth, first_e, last_e, i;
311 struct ice_aqc_acl_generic *resp_buf;
312 struct ice_acl_alloc_tbl tbl_alloc;
313 struct ice_acl_tbl *tbl;
314 enum ice_status status;
317 return ICE_ERR_ALREADY_EXISTS;
320 return ICE_ERR_PARAM;
322 /* round up the width to the next TCAM width boundary. */
323 width = ROUND_UP(params->width, (u16)ICE_AQC_ACL_KEY_WIDTH_BYTES);
324 /* depth should be provided in chunk (64 entry) increments */
325 depth = ICE_ALIGN(params->depth, ICE_ACL_ENTRY_ALLOC_UNIT);
327 if (params->entry_act_pairs < width / ICE_AQC_ACL_KEY_WIDTH_BYTES) {
328 params->entry_act_pairs = width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
330 if (params->entry_act_pairs > ICE_AQC_TBL_MAX_ACTION_PAIRS)
331 params->entry_act_pairs = ICE_AQC_TBL_MAX_ACTION_PAIRS;
334 /* Validate that width*depth will not exceed the TCAM limit */
335 if ((DIVIDE_AND_ROUND_UP(depth, ICE_AQC_ACL_TCAM_DEPTH) *
336 (width / ICE_AQC_ACL_KEY_WIDTH_BYTES)) > ICE_AQC_ACL_SLICES)
337 return ICE_ERR_MAX_LIMIT;
339 ice_memset(&tbl_alloc, 0, sizeof(tbl_alloc), ICE_NONDMA_MEM);
340 tbl_alloc.width = width;
341 tbl_alloc.depth = depth;
342 tbl_alloc.act_pairs_per_entry = params->entry_act_pairs;
343 tbl_alloc.concurr = params->concurr;
344 /* Set dependent_alloc_id only for concurrent table type */
345 if (params->concurr) {
346 tbl_alloc.num_dependent_alloc_ids =
347 ICE_AQC_MAX_CONCURRENT_ACL_TBL;
349 for (i = 0; i < ICE_AQC_MAX_CONCURRENT_ACL_TBL; i++)
350 tbl_alloc.buf.data_buf.alloc_ids[i] =
351 CPU_TO_LE16(params->dep_tbls[i]);
354 /* call the aq command to create the ACL table with these values */
355 status = ice_aq_alloc_acl_tbl(hw, &tbl_alloc, NULL);
358 if (LE16_TO_CPU(tbl_alloc.buf.resp_buf.alloc_id) <
359 ICE_AQC_ALLOC_ID_LESS_THAN_4K)
360 ice_debug(hw, ICE_DBG_ACL,
361 "Alloc ACL table failed. Unavailable resource.\n");
363 ice_debug(hw, ICE_DBG_ACL,
364 "AQ allocation of ACL failed with error. status: %d\n",
369 tbl = (struct ice_acl_tbl *)ice_malloc(hw, sizeof(*tbl));
371 status = ICE_ERR_NO_MEMORY;
376 resp_buf = &tbl_alloc.buf.resp_buf;
378 /* Retrieve information of the allocated table */
379 tbl->id = LE16_TO_CPU(resp_buf->alloc_id);
380 tbl->first_tcam = resp_buf->ops.table.first_tcam;
381 tbl->last_tcam = resp_buf->ops.table.last_tcam;
382 tbl->first_entry = LE16_TO_CPU(resp_buf->first_entry);
383 tbl->last_entry = LE16_TO_CPU(resp_buf->last_entry);
386 tbl->info.width = width;
387 tbl->info.depth = depth;
390 for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++)
391 tbl->act_mems[i].act_mem = resp_buf->act_mem[i];
393 /* Figure out which TCAMs that these newly allocated action memories
396 ice_acl_divide_act_mems_to_tcams(tbl);
398 /* Initialize the resources allocated by invalidating all TCAM entries
399 * and all the action pairs
401 status = ice_acl_init_tbl(hw);
405 ice_debug(hw, ICE_DBG_ACL,
406 "Initialization of TCAM entries failed. status: %d\n",
411 first_e = (tbl->first_tcam * ICE_AQC_MAX_TCAM_ALLOC_UNITS) +
412 (tbl->first_entry / ICE_ACL_ENTRY_ALLOC_UNIT);
413 last_e = (tbl->last_tcam * ICE_AQC_MAX_TCAM_ALLOC_UNITS) +
414 (tbl->last_entry / ICE_ACL_ENTRY_ALLOC_UNIT);
416 /* Indicate available entries in the table */
417 for (i = first_e; i <= last_e; i++)
418 ice_set_bit(i, tbl->avail);
420 INIT_LIST_HEAD(&tbl->scens);
427 * ice_acl_alloc_partition - Allocate a partition from the ACL table
428 * @hw: pointer to the hardware structure
429 * @req: info of partition being allocated
431 static enum ice_status
432 ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req)
434 u16 start = 0, cnt = 0, off = 0;
435 u16 width, r_entries, row;
439 /* Determine the number of TCAMs each entry overlaps */
440 width = DIVIDE_AND_ROUND_UP(req->width, ICE_AQC_ACL_KEY_WIDTH_BYTES);
442 /* Check if we have enough TCAMs to accommodate the width */
443 if (width > hw->acl_tbl->last_tcam - hw->acl_tbl->first_tcam + 1)
444 return ICE_ERR_MAX_LIMIT;
446 /* Number of entries must be multiple of ICE_ACL_ENTRY_ALLOC_UNIT's */
447 r_entries = ICE_ALIGN(req->num_entry, ICE_ACL_ENTRY_ALLOC_UNIT);
449 /* To look for an available partition that can accommodate the request,
450 * the process first logically arranges available TCAMs in rows such
451 * that each row produces entries with the requested width. It then
452 * scans the TCAMs' available bitmap, one bit at a time, and
453 * accumulates contiguous available 64-entry chunks until there are
454 * enough of them or when all TCAM configurations have been checked.
456 * For width of 1 TCAM, the scanning process starts from the top most
457 * TCAM, and goes downward. Available bitmaps are examined from LSB
460 * For width of multiple TCAMs, the process starts from the bottom-most
461 * row of TCAMs, and goes upward. Available bitmaps are examined from
462 * the MSB to the LSB.
464 * To make sure that adjacent TCAMs can be logically arranged in the
465 * same row, the scanning process may have multiple passes. In each
466 * pass, the first TCAM of the bottom-most row is displaced by one
467 * additional TCAM. The width of the row and the number of the TCAMs
468 * available determine the number of passes. When the displacement is
469 * more than the size of width, the TCAM row configurations will
470 * repeat. The process will terminate when the configurations repeat.
472 * Available partitions can span more than one row of TCAMs.
475 row = hw->acl_tbl->first_tcam;
478 /* Start with the bottom-most row, and scan for available
481 row = hw->acl_tbl->last_tcam + 1 - width;
488 /* Scan all 64-entry chunks, one chunk at a time, in the
492 i < ICE_AQC_MAX_TCAM_ALLOC_UNITS && cnt < r_entries;
497 /* Compute the cumulative available mask across the
498 * TCAM row to determine if the current 64-entry chunk
501 p = dir > 0 ? i : ICE_AQC_MAX_TCAM_ALLOC_UNITS - i - 1;
502 for (w = row; w < row + width && avail; w++) {
505 b = (w * ICE_AQC_MAX_TCAM_ALLOC_UNITS) + p;
506 avail &= ice_is_bit_set(hw->acl_tbl->avail, b);
512 /* Compute the starting index of the newly
513 * found partition. When 'dir' is negative, the
514 * scan processes is going upward. If so, the
515 * starting index needs to be updated for every
516 * available 64-entry chunk found.
519 start = (row * ICE_AQC_ACL_TCAM_DEPTH) +
520 (p * ICE_ACL_ENTRY_ALLOC_UNIT);
521 cnt += ICE_ACL_ENTRY_ALLOC_UNIT;
525 if (cnt >= r_entries) {
527 req->num_entry = r_entries;
528 req->end = ice_acl_tbl_calc_end_idx(start, r_entries,
533 row = (dir > 0) ? (row + width) : (row - width);
534 if (row > hw->acl_tbl->last_tcam ||
535 row < hw->acl_tbl->first_tcam) {
536 /* All rows have been checked. Increment 'off' that
537 * will help yield a different TCAM configuration in
538 * which adjacent TCAMs can be alternatively in the
543 /* However, if the new 'off' value yields previously
544 * checked configurations, then exit.
549 row = dir > 0 ? off :
550 hw->acl_tbl->last_tcam + 1 - off -
555 return cnt >= r_entries ? ICE_SUCCESS : ICE_ERR_MAX_LIMIT;
559 * ice_acl_fill_tcam_select
560 * @scen_buf: Pointer to the scenario buffer that needs to be populated
561 * @scen: Pointer to the available space for the scenario
562 * @tcam_idx: Index of the TCAM used for this scenario
563 * @tcam_idx_in_cascade : Local index of the TCAM in the cascade scenario
565 * For all TCAM that participate in this scenario, fill out the tcam_select
569 ice_acl_fill_tcam_select(struct ice_aqc_acl_scen *scen_buf,
570 struct ice_acl_scen *scen, u16 tcam_idx,
571 u16 tcam_idx_in_cascade)
573 u16 cascade_cnt, idx;
576 idx = tcam_idx_in_cascade * ICE_AQC_ACL_KEY_WIDTH_BYTES;
577 cascade_cnt = DIVIDE_AND_ROUND_UP(scen->width,
578 ICE_AQC_ACL_KEY_WIDTH_BYTES);
580 /* For each scenario, we reserved last three bytes of scenario width for
581 * profile ID, range checker, and packet direction. Thus, the last three
582 * bytes of the last cascaded TCAMs will have value of 1st, 31st and
583 * 32nd byte location of BYTE selection base.
585 * For other bytes in the TCAMs:
586 * For non-cascade mode (1 TCAM wide) scenario, TCAM[x]'s Select {0-1}
587 * select indices 0-1 of the Byte Selection Base
588 * For cascade mode, the leftmost TCAM of the first cascade row selects
589 * indices 0-4 of the Byte Selection Base; the second TCAM in the
590 * cascade row selects indices starting with 5-n
592 for (j = 0; j < ICE_AQC_ACL_KEY_WIDTH_BYTES; j++) {
593 /* PKT DIR uses the 1st location of Byte Selection Base: + 1 */
594 u8 val = ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx;
596 if (tcam_idx_in_cascade == cascade_cnt - 1) {
597 if (j == ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM)
598 val = ICE_AQC_ACL_BYTE_SEL_BASE_RNG_CHK;
599 else if (j == ICE_ACL_SCEN_PID_IDX_IN_TCAM)
600 val = ICE_AQC_ACL_BYTE_SEL_BASE_PID;
601 else if (j == ICE_ACL_SCEN_PKT_DIR_IDX_IN_TCAM)
602 val = ICE_AQC_ACL_BYTE_SEL_BASE_PKT_DIR;
605 /* In case that scenario's width is greater than the width of
606 * the Byte selection base, we will not assign a value to the
607 * tcam_select[j]. As a result, the tcam_select[j] will have
608 * default value which is zero.
610 if (val > ICE_AQC_ACL_BYTE_SEL_BASE_RNG_CHK)
613 scen_buf->tcam_cfg[tcam_idx].tcam_select[j] = val;
620 * ice_acl_set_scen_chnk_msk
621 * @scen_buf: Pointer to the scenario buffer that needs to be populated
622 * @scen: pointer to the available space for the scenario
624 * Set the chunk mask for the entries that will be used by this scenario
627 ice_acl_set_scen_chnk_msk(struct ice_aqc_acl_scen *scen_buf,
628 struct ice_acl_scen *scen)
630 u16 tcam_idx, num_cscd, units, cnt;
633 /* Determine the starting TCAM index and offset of the start entry */
634 tcam_idx = ICE_ACL_TBL_TCAM_IDX(scen->start);
635 chnk_offst = (u8)((scen->start % ICE_AQC_ACL_TCAM_DEPTH) /
636 ICE_ACL_ENTRY_ALLOC_UNIT);
638 /* Entries are allocated and tracked in multiple of 64's */
639 units = scen->num_entry / ICE_ACL_ENTRY_ALLOC_UNIT;
641 /* Determine number of cascaded TCAMs */
642 num_cscd = scen->width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
644 for (cnt = 0; cnt < units; cnt++) {
647 /* Set the corresponding bitmap of individual 64-entry
648 * chunk spans across a cascade of 1 or more TCAMs
649 * For each TCAM, there will be (ICE_AQC_ACL_TCAM_DEPTH
650 * / ICE_ACL_ENTRY_ALLOC_UNIT) or 8 chunks.
652 for (i = tcam_idx; i < tcam_idx + num_cscd; i++)
653 scen_buf->tcam_cfg[i].chnk_msk |= BIT(chnk_offst);
655 chnk_offst = (chnk_offst + 1) % ICE_AQC_MAX_TCAM_ALLOC_UNITS;
657 tcam_idx += num_cscd;
662 * ice_acl_assign_act_mem_for_scen
663 * @tbl: pointer to acl table structure
664 * @scen: pointer to the scenario struct
665 * @scen_buf: pointer to the available space for the scenario
666 * @current_tcam_idx: theoretical index of the TCAM that we associated those
667 * action memory banks with, at the table creation time.
668 * @target_tcam_idx: index of the TCAM that we want to associate those action
672 ice_acl_assign_act_mem_for_scen(struct ice_acl_tbl *tbl,
673 struct ice_acl_scen *scen,
674 struct ice_aqc_acl_scen *scen_buf,
680 for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++) {
681 struct ice_acl_act_mem *p_mem = &tbl->act_mems[i];
683 if (p_mem->act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL ||
684 p_mem->member_of_tcam != current_tcam_idx)
687 scen_buf->act_mem_cfg[i] = target_tcam_idx;
688 scen_buf->act_mem_cfg[i] |= ICE_AQC_ACL_SCE_ACT_MEM_EN;
689 ice_set_bit(i, scen->act_mem_bitmap);
694 * ice_acl_commit_partition - Indicate if the specified partition is active
695 * @hw: pointer to the hardware structure
696 * @scen: pointer to the scenario struct
697 * @commit: true if the partition is being commit
700 ice_acl_commit_partition(struct ice_hw *hw, struct ice_acl_scen *scen,
703 u16 tcam_idx, off, num_cscd, units, cnt;
705 /* Determine the starting TCAM index and offset of the start entry */
706 tcam_idx = ICE_ACL_TBL_TCAM_IDX(scen->start);
707 off = (scen->start % ICE_AQC_ACL_TCAM_DEPTH) /
708 ICE_ACL_ENTRY_ALLOC_UNIT;
710 /* Entries are allocated and tracked in multiple of 64's */
711 units = scen->num_entry / ICE_ACL_ENTRY_ALLOC_UNIT;
713 /* Determine number of cascaded TCAM */
714 num_cscd = scen->width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
716 for (cnt = 0; cnt < units; cnt++) {
719 /* Set/clear the corresponding bitmap of individual 64-entry
720 * chunk spans across a row of 1 or more TCAMs
722 for (w = 0; w < num_cscd; w++) {
725 b = ((tcam_idx + w) * ICE_AQC_MAX_TCAM_ALLOC_UNITS) +
728 ice_set_bit(b, hw->acl_tbl->avail);
730 ice_clear_bit(b, hw->acl_tbl->avail);
733 off = (off + 1) % ICE_AQC_MAX_TCAM_ALLOC_UNITS;
735 tcam_idx += num_cscd;
740 * ice_acl_create_scen
741 * @hw: pointer to the hardware structure
742 * @match_width: number of bytes to be matched in this scenario
743 * @num_entries: number of entries to be allocated for the scenario
744 * @scen_id: holds returned scenario ID if successful
747 ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries,
750 u8 cascade_cnt, first_tcam, last_tcam, i, k;
751 struct ice_aqc_acl_scen scen_buf;
752 struct ice_acl_scen *scen;
753 enum ice_status status;
756 return ICE_ERR_DOES_NOT_EXIST;
758 scen = (struct ice_acl_scen *)ice_malloc(hw, sizeof(*scen));
760 return ICE_ERR_NO_MEMORY;
762 scen->start = hw->acl_tbl->first_entry;
763 scen->width = ICE_AQC_ACL_KEY_WIDTH_BYTES *
764 DIVIDE_AND_ROUND_UP(match_width, ICE_AQC_ACL_KEY_WIDTH_BYTES);
765 scen->num_entry = num_entries;
767 status = ice_acl_alloc_partition(hw, scen);
773 ice_memset(&scen_buf, 0, sizeof(scen_buf), ICE_NONDMA_MEM);
775 /* Determine the number of cascade TCAMs, given the scenario's width */
776 cascade_cnt = DIVIDE_AND_ROUND_UP(scen->width,
777 ICE_AQC_ACL_KEY_WIDTH_BYTES);
778 first_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
779 last_tcam = ICE_ACL_TBL_TCAM_IDX(scen->end);
781 /* For each scenario, we reserved last three bytes of scenario width for
782 * packet direction flag, profile ID and range checker. Thus, we want to
783 * return back to the caller the eff_width, pkt_dir_idx, rng_chk_idx and
786 scen->eff_width = cascade_cnt * ICE_AQC_ACL_KEY_WIDTH_BYTES -
787 ICE_ACL_SCEN_MIN_WIDTH;
788 scen->rng_chk_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES +
789 ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM;
790 scen->pid_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES +
791 ICE_ACL_SCEN_PID_IDX_IN_TCAM;
792 scen->pkt_dir_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES +
793 ICE_ACL_SCEN_PKT_DIR_IDX_IN_TCAM;
795 /* set the chunk mask for the tcams */
796 ice_acl_set_scen_chnk_msk(&scen_buf, scen);
798 /* set the TCAM select and start_cmp and start_set bits */
800 /* set the START_SET bit at the beginning of the stack */
801 scen_buf.tcam_cfg[k].start_cmp_set |= ICE_AQC_ACL_ALLOC_SCE_START_SET;
802 while (k <= last_tcam) {
803 u8 last_tcam_idx_cascade = cascade_cnt + k - 1;
805 /* set start_cmp for the first cascaded TCAM */
806 scen_buf.tcam_cfg[k].start_cmp_set |=
807 ICE_AQC_ACL_ALLOC_SCE_START_CMP;
809 /* cascade TCAMs up to the width of the scenario */
810 for (i = k; i < cascade_cnt + k; i++) {
811 ice_acl_fill_tcam_select(&scen_buf, scen, i, i - k);
812 ice_acl_assign_act_mem_for_scen(hw->acl_tbl, scen,
815 last_tcam_idx_cascade);
821 /* We need to set the start_cmp bit for the unused TCAMs. */
823 while (i < first_tcam)
824 scen_buf.tcam_cfg[i++].start_cmp_set =
825 ICE_AQC_ACL_ALLOC_SCE_START_CMP;
828 while (i < ICE_AQC_ACL_SLICES)
829 scen_buf.tcam_cfg[i++].start_cmp_set =
830 ICE_AQC_ACL_ALLOC_SCE_START_CMP;
832 status = ice_aq_alloc_acl_scen(hw, scen_id, &scen_buf, NULL);
834 ice_debug(hw, ICE_DBG_ACL,
835 "AQ allocation of ACL scenario failed. status: %d\n",
842 ice_acl_commit_partition(hw, scen, false);
843 ice_acl_init_entry(scen);
844 LIST_ADD(&scen->list_entry, &hw->acl_tbl->scens);
850 * ice_acl_destroy_tbl - Destroy a previously created LEM table for ACL
851 * @hw: pointer to the HW struct
853 enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw)
855 struct ice_acl_scen *pos_scen, *tmp_scen;
856 struct ice_aqc_acl_generic resp_buf;
857 struct ice_aqc_acl_scen buf;
858 enum ice_status status;
862 return ICE_ERR_DOES_NOT_EXIST;
864 /* Mark all the created scenario's TCAM to stop the packet lookup and
865 * delete them afterward
867 LIST_FOR_EACH_ENTRY_SAFE(pos_scen, tmp_scen, &hw->acl_tbl->scens,
868 ice_acl_scen, list_entry) {
869 status = ice_aq_query_acl_scen(hw, pos_scen->id, &buf, NULL);
871 ice_debug(hw, ICE_DBG_ACL, "ice_aq_query_acl_scen() failed. status: %d\n",
876 for (i = 0; i < ICE_AQC_ACL_SLICES; i++) {
877 buf.tcam_cfg[i].chnk_msk = 0;
878 buf.tcam_cfg[i].start_cmp_set =
879 ICE_AQC_ACL_ALLOC_SCE_START_CMP;
882 for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++)
883 buf.act_mem_cfg[i] = 0;
885 status = ice_aq_update_acl_scen(hw, pos_scen->id, &buf, NULL);
887 ice_debug(hw, ICE_DBG_ACL, "ice_aq_update_acl_scen() failed. status: %d\n",
892 status = ice_acl_destroy_scen(hw, pos_scen->id);
894 ice_debug(hw, ICE_DBG_ACL, "deletion of scenario failed. status: %d\n",
900 /* call the aq command to destroy the ACL table */
901 status = ice_aq_dealloc_acl_tbl(hw, hw->acl_tbl->id, &resp_buf, NULL);
904 ice_debug(hw, ICE_DBG_ACL,
905 "AQ de-allocation of ACL failed. status: %d\n",
910 ice_free(hw, hw->acl_tbl);
917 * ice_acl_add_entry - Add a flow entry to an ACL scenario
918 * @hw: pointer to the HW struct
919 * @scen: scenario to add the entry to
920 * @prior: priority level of the entry being added
921 * @keys: buffer of the value of the key to be programmed to the ACL entry
922 * @inverts: buffer of the value of the key inverts to be programmed
923 * @acts: pointer to a buffer containing formatted actions
924 * @acts_cnt: indicates the number of actions stored in "acts"
925 * @entry_idx: returned scenario relative index of the added flow entry
927 * Given an ACL table and a scenario, to add the specified key and key invert
928 * to an available entry in the specified scenario.
929 * The "keys" and "inverts" buffers must be of the size which is the same as
930 * the scenario's width
933 ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
934 enum ice_acl_entry_prior prior, u8 *keys, u8 *inverts,
935 struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx)
937 u8 i, entry_tcam, num_cscd, idx, offset;
938 struct ice_aqc_acl_data buf;
939 enum ice_status status = ICE_SUCCESS;
942 return ICE_ERR_DOES_NOT_EXIST;
944 *entry_idx = ice_acl_scen_assign_entry_idx(scen, prior);
945 if (*entry_idx >= scen->num_entry) {
947 return ICE_ERR_MAX_LIMIT;
950 /* Determine number of cascaded TCAMs */
951 num_cscd = DIVIDE_AND_ROUND_UP(scen->width,
952 ICE_AQC_ACL_KEY_WIDTH_BYTES);
954 entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
955 idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + *entry_idx);
957 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
958 for (i = 0; i < num_cscd; i++) {
959 /* If the key spans more than one TCAM in the case of cascaded
960 * TCAMs, the key and key inverts need to be properly split
961 * among TCAMs.E.g.bytes 0 - 4 go to an index in the first TCAM
962 * and bytes 5 - 9 go to the same index in the next TCAM, etc.
963 * If the entry spans more than one TCAM in a cascaded TCAM
964 * mode, the programming of the entries in the TCAMs must be in
965 * reversed order - the TCAM entry of the rightmost TCAM should
966 * be programmed first; the TCAM entry of the leftmost TCAM
967 * should be programmed last.
969 offset = num_cscd - i - 1;
970 ice_memcpy(&buf.entry_key.val,
971 &keys[offset * sizeof(buf.entry_key.val)],
972 sizeof(buf.entry_key.val), ICE_NONDMA_TO_NONDMA);
973 ice_memcpy(&buf.entry_key_invert.val,
974 &inverts[offset * sizeof(buf.entry_key_invert.val)],
975 sizeof(buf.entry_key_invert.val),
976 ICE_NONDMA_TO_NONDMA);
977 status = ice_aq_program_acl_entry(hw, entry_tcam + offset, idx,
980 ice_debug(hw, ICE_DBG_ACL,
981 "aq program acl entry failed status: %d\n",
987 /* Program the action memory */
988 status = ice_acl_prog_act(hw, scen, acts, acts_cnt, *entry_idx);
992 ice_acl_rem_entry(hw, scen, *entry_idx);
1000 * ice_acl_prog_act - Program a scenario's action memory
1001 * @hw: pointer to the HW struct
1002 * @scen: scenario to add the entry to
1003 * @acts: pointer to a buffer containing formatted actions
1004 * @acts_cnt: indicates the number of actions stored in "acts"
1005 * @entry_idx: scenario relative index of the added flow entry
1007 * Program a scenario's action memory
1010 ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
1011 struct ice_acl_act_entry *acts, u8 acts_cnt,
1014 u8 entry_tcam, num_cscd, i, actx_idx = 0;
1015 struct ice_aqc_actpair act_buf;
1016 enum ice_status status = ICE_SUCCESS;
1019 if (entry_idx >= scen->num_entry)
1020 return ICE_ERR_MAX_LIMIT;
1022 ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
1024 /* Determine number of cascaded TCAMs */
1025 num_cscd = DIVIDE_AND_ROUND_UP(scen->width,
1026 ICE_AQC_ACL_KEY_WIDTH_BYTES);
1028 entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
1029 idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + entry_idx);
1031 i = ice_find_first_bit(scen->act_mem_bitmap,
1032 ICE_AQC_MAX_ACTION_MEMORIES);
1033 while (i < ICE_AQC_MAX_ACTION_MEMORIES) {
1034 struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i];
1036 if (actx_idx >= acts_cnt)
1038 if (mem->member_of_tcam >= entry_tcam &&
1039 mem->member_of_tcam < entry_tcam + num_cscd) {
1040 ice_memcpy(&act_buf.act[0], &acts[actx_idx],
1041 sizeof(struct ice_acl_act_entry),
1042 ICE_NONDMA_TO_NONDMA);
1044 if (++actx_idx < acts_cnt) {
1045 ice_memcpy(&act_buf.act[1], &acts[actx_idx],
1046 sizeof(struct ice_acl_act_entry),
1047 ICE_NONDMA_TO_NONDMA);
1050 status = ice_aq_program_actpair(hw, i, idx, &act_buf,
1053 ice_debug(hw, ICE_DBG_ACL,
1054 "program actpair failed status: %d\n",
1061 i = ice_find_next_bit(scen->act_mem_bitmap,
1062 ICE_AQC_MAX_ACTION_MEMORIES, i + 1);
1065 if (!status && actx_idx < acts_cnt)
1066 status = ICE_ERR_MAX_LIMIT;
1072 * ice_acl_rem_entry - Remove a flow entry from an ACL scenario
1073 * @hw: pointer to the HW struct
1074 * @scen: scenario to remove the entry from
1075 * @entry_idx: the scenario-relative index of the flow entry being removed
1078 ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
1080 struct ice_aqc_actpair act_buf;
1081 struct ice_aqc_acl_data buf;
1082 u8 entry_tcam, num_cscd, i;
1083 enum ice_status status = ICE_SUCCESS;
1087 return ICE_ERR_DOES_NOT_EXIST;
1089 if (entry_idx >= scen->num_entry)
1090 return ICE_ERR_MAX_LIMIT;
1092 if (!ice_is_bit_set(scen->entry_bitmap, entry_idx))
1093 return ICE_ERR_DOES_NOT_EXIST;
1095 /* Determine number of cascaded TCAMs */
1096 num_cscd = DIVIDE_AND_ROUND_UP(scen->width,
1097 ICE_AQC_ACL_KEY_WIDTH_BYTES);
1099 entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
1100 idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + entry_idx);
1102 /* invalidate the flow entry */
1103 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1104 for (i = 0; i < num_cscd; i++) {
1105 status = ice_aq_program_acl_entry(hw, entry_tcam + i, idx, &buf,
1108 ice_debug(hw, ICE_DBG_ACL,
1109 "aq program acl entry failed status: %d\n",
1113 ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
1114 i = ice_find_first_bit(scen->act_mem_bitmap,
1115 ICE_AQC_MAX_ACTION_MEMORIES);
1116 while (i < ICE_AQC_MAX_ACTION_MEMORIES) {
1117 struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i];
1119 if (mem->member_of_tcam >= entry_tcam &&
1120 mem->member_of_tcam < entry_tcam + num_cscd) {
1121 /* Invalidate allocated action pairs */
1122 status = ice_aq_program_actpair(hw, i, idx, &act_buf,
1125 ice_debug(hw, ICE_DBG_ACL,
1126 "program actpair failed.status: %d\n",
1130 i = ice_find_next_bit(scen->act_mem_bitmap,
1131 ICE_AQC_MAX_ACTION_MEMORIES, i + 1);
1134 ice_acl_scen_free_entry_idx(scen, entry_idx);
1140 * ice_acl_destroy_scen - Destroy an ACL scenario
1141 * @hw: pointer to the HW struct
1142 * @scen_id: ID of the remove scenario
1144 enum ice_status ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id)
1146 struct ice_acl_scen *scen, *tmp_scen;
1147 struct ice_flow_prof *p, *tmp;
1148 enum ice_status status;
1151 return ICE_ERR_DOES_NOT_EXIST;
1153 /* Remove profiles that use "scen_id" scenario */
1154 LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[ICE_BLK_ACL],
1155 ice_flow_prof, l_entry)
1156 if (p->cfg.scen && p->cfg.scen->id == scen_id) {
1157 status = ice_flow_rem_prof(hw, ICE_BLK_ACL, p->id);
1159 ice_debug(hw, ICE_DBG_ACL,
1160 "ice_flow_rem_prof failed. status: %d\n",
1166 /* Call the aq command to destroy the targeted scenario */
1167 status = ice_aq_dealloc_acl_scen(hw, scen_id, NULL);
1170 ice_debug(hw, ICE_DBG_ACL,
1171 "AQ de-allocation of scenario failed. status: %d\n",
1176 /* Remove scenario from hw->acl_tbl->scens */
1177 LIST_FOR_EACH_ENTRY_SAFE(scen, tmp_scen, &hw->acl_tbl->scens,
1178 ice_acl_scen, list_entry)
1179 if (scen->id == scen_id) {
1180 LIST_DEL(&scen->list_entry);