1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
6 #include "ice_flex_pipe.h"
7 #include "ice_protocol_type.h"
10 /* For supporting double VLAN mode, it is necessary to enable or disable certain
11 * boost tcam entries. The metadata labels names that match the following
12 * prefixes will be saved to allow enabling double VLAN mode.
14 #define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
15 #define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
17 /* To support tunneling entries by PF, the package will append the PF number to
18 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
20 #define ICE_TNL_PRE "TNL_"
21 static const struct ice_tunnel_type_scan tnls[] = {
22 { TNL_VXLAN, "TNL_VXLAN_PF" },
23 { TNL_GENEVE, "TNL_GENEVE_PF" },
24 { TNL_ECPRI, "TNL_UDP_ECPRI_PF" },
28 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
32 ICE_SID_XLT_KEY_BUILDER_SW,
35 ICE_SID_PROFID_TCAM_SW,
36 ICE_SID_PROFID_REDIR_SW,
38 ICE_SID_CDID_KEY_BUILDER_SW,
45 ICE_SID_XLT_KEY_BUILDER_ACL,
48 ICE_SID_PROFID_TCAM_ACL,
49 ICE_SID_PROFID_REDIR_ACL,
51 ICE_SID_CDID_KEY_BUILDER_ACL,
52 ICE_SID_CDID_REDIR_ACL
58 ICE_SID_XLT_KEY_BUILDER_FD,
61 ICE_SID_PROFID_TCAM_FD,
62 ICE_SID_PROFID_REDIR_FD,
64 ICE_SID_CDID_KEY_BUILDER_FD,
71 ICE_SID_XLT_KEY_BUILDER_RSS,
74 ICE_SID_PROFID_TCAM_RSS,
75 ICE_SID_PROFID_REDIR_RSS,
77 ICE_SID_CDID_KEY_BUILDER_RSS,
78 ICE_SID_CDID_REDIR_RSS
84 ICE_SID_XLT_KEY_BUILDER_PE,
87 ICE_SID_PROFID_TCAM_PE,
88 ICE_SID_PROFID_REDIR_PE,
90 ICE_SID_CDID_KEY_BUILDER_PE,
96 * ice_sect_id - returns section ID
100 * This helper function returns the proper section ID given a block type and a
103 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
105 return ice_sect_lkup[blk][sect];
110 * @buf: pointer to the ice buffer
112 * This helper function validates a buffer's header.
114 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
116 struct ice_buf_hdr *hdr;
120 hdr = (struct ice_buf_hdr *)buf->buf;
122 section_count = LE16_TO_CPU(hdr->section_count);
123 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
126 data_end = LE16_TO_CPU(hdr->data_end);
127 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
135 * @ice_seg: pointer to the ice segment
137 * Returns the address of the buffer table within the ice segment.
139 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
141 struct ice_nvm_table *nvms;
143 nvms = (struct ice_nvm_table *)
144 (ice_seg->device_table +
145 LE32_TO_CPU(ice_seg->device_table_count));
147 return (_FORCE_ struct ice_buf_table *)
148 (nvms->vers + LE32_TO_CPU(nvms->table_count));
153 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
154 * @state: pointer to the enum state
156 * This function will enumerate all the buffers in the ice segment. The first
157 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
158 * ice_seg is set to NULL which continues the enumeration. When the function
159 * returns a NULL pointer, then the end of the buffers has been reached, or an
160 * unexpected value has been detected (for example an invalid section count or
161 * an invalid buffer end value).
163 static struct ice_buf_hdr *
164 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
167 state->buf_table = ice_find_buf_table(ice_seg);
168 if (!state->buf_table)
172 return ice_pkg_val_buf(state->buf_table->buf_array);
175 if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
176 return ice_pkg_val_buf(state->buf_table->buf_array +
183 * ice_pkg_advance_sect
184 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
185 * @state: pointer to the enum state
187 * This helper function will advance the section within the ice segment,
188 * also advancing the buffer if needed.
191 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
193 if (!ice_seg && !state->buf)
196 if (!ice_seg && state->buf)
197 if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
200 state->buf = ice_pkg_enum_buf(ice_seg, state);
204 /* start of new buffer, reset section index */
210 * ice_pkg_enum_section
211 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
212 * @state: pointer to the enum state
213 * @sect_type: section type to enumerate
215 * This function will enumerate all the sections of a particular type in the
216 * ice segment. The first call is made with the ice_seg parameter non-NULL;
217 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
218 * When the function returns a NULL pointer, then the end of the matching
219 * sections has been reached.
222 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
228 state->type = sect_type;
230 if (!ice_pkg_advance_sect(ice_seg, state))
233 /* scan for next matching section */
234 while (state->buf->section_entry[state->sect_idx].type !=
235 CPU_TO_LE32(state->type))
236 if (!ice_pkg_advance_sect(NULL, state))
239 /* validate section */
240 offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
241 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
244 size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
245 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
248 /* make sure the section fits in the buffer */
249 if (offset + size > ICE_PKG_BUF_SIZE)
253 LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
255 /* calc pointer to this section */
256 state->sect = ((u8 *)state->buf) +
257 LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
264 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
265 * @state: pointer to the enum state
266 * @sect_type: section type to enumerate
267 * @offset: pointer to variable that receives the offset in the table (optional)
268 * @handler: function that handles access to the entries into the section type
270 * This function will enumerate all the entries in particular section type in
271 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
272 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
273 * When the function returns a NULL pointer, then the end of the entries has
276 * Since each section may have a different header and entry size, the handler
277 * function is needed to determine the number and location entries in each
280 * The offset parameter is optional, but should be used for sections that
281 * contain an offset for each section table. For such cases, the section handler
282 * function must return the appropriate offset + index to give the absolution
283 * offset for each entry. For example, if the base for a section's header
284 * indicates a base offset of 10, and the index for the entry is 2, then
285 * section handler function should set the offset to 10 + 2 = 12.
288 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
289 u32 sect_type, u32 *offset,
290 void *(*handler)(u32 sect_type, void *section,
291 u32 index, u32 *offset))
299 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
302 state->entry_idx = 0;
303 state->handler = handler;
312 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
315 /* end of a section, look for another section of this type */
316 if (!ice_pkg_enum_section(NULL, state, 0))
319 state->entry_idx = 0;
320 entry = state->handler(state->sect_type, state->sect,
321 state->entry_idx, offset);
328 * ice_hw_ptype_ena - check if the PTYPE is enabled or not
329 * @hw: pointer to the HW structure
330 * @ptype: the hardware PTYPE
332 bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
334 return ptype < ICE_FLOW_PTYPE_MAX &&
335 ice_is_bit_set(hw->hw_ptype, ptype);
339 * ice_marker_ptype_tcam_handler
340 * @sect_type: section type
341 * @section: pointer to section
342 * @index: index of the Marker PType TCAM entry to be returned
343 * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
345 * This is a callback function that can be passed to ice_pkg_enum_entry.
346 * Handles enumeration of individual Marker PType TCAM entries.
349 ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
352 struct ice_marker_ptype_tcam_section *marker_ptype;
357 if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
360 if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
366 marker_ptype = (struct ice_marker_ptype_tcam_section *)section;
367 if (index >= LE16_TO_CPU(marker_ptype->count))
370 return marker_ptype->tcam + index;
374 * ice_fill_hw_ptype - fill the enabled PTYPE bit information
375 * @hw: pointer to the HW structure
378 ice_fill_hw_ptype(struct ice_hw *hw)
380 struct ice_marker_ptype_tcam_entry *tcam;
381 struct ice_seg *seg = hw->seg;
382 struct ice_pkg_enum state;
384 ice_zero_bitmap(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
388 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
391 tcam = (struct ice_marker_ptype_tcam_entry *)
392 ice_pkg_enum_entry(seg, &state,
393 ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
394 ice_marker_ptype_tcam_handler);
396 LE16_TO_CPU(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
397 LE16_TO_CPU(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
398 ice_set_bit(LE16_TO_CPU(tcam->ptype), hw->hw_ptype);
405 * ice_boost_tcam_handler
406 * @sect_type: section type
407 * @section: pointer to section
408 * @index: index of the boost TCAM entry to be returned
409 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
411 * This is a callback function that can be passed to ice_pkg_enum_entry.
412 * Handles enumeration of individual boost TCAM entries.
415 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
417 struct ice_boost_tcam_section *boost;
422 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
425 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
431 boost = (struct ice_boost_tcam_section *)section;
432 if (index >= LE16_TO_CPU(boost->count))
435 return boost->tcam + index;
439 * ice_find_boost_entry
440 * @ice_seg: pointer to the ice segment (non-NULL)
441 * @addr: Boost TCAM address of entry to search for
442 * @entry: returns pointer to the entry
444 * Finds a particular Boost TCAM entry and returns a pointer to that entry
445 * if it is found. The ice_seg parameter must not be NULL since the first call
446 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
448 static enum ice_status
449 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
450 struct ice_boost_tcam_entry **entry)
452 struct ice_boost_tcam_entry *tcam;
453 struct ice_pkg_enum state;
455 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
458 return ICE_ERR_PARAM;
461 tcam = (struct ice_boost_tcam_entry *)
462 ice_pkg_enum_entry(ice_seg, &state,
463 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
464 ice_boost_tcam_handler);
465 if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
478 * ice_label_enum_handler
479 * @sect_type: section type
480 * @section: pointer to section
481 * @index: index of the label entry to be returned
482 * @offset: pointer to receive absolute offset, always zero for label sections
484 * This is a callback function that can be passed to ice_pkg_enum_entry.
485 * Handles enumeration of individual label entries.
488 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
491 struct ice_label_section *labels;
496 if (index > ICE_MAX_LABELS_IN_BUF)
502 labels = (struct ice_label_section *)section;
503 if (index >= LE16_TO_CPU(labels->count))
506 return labels->label + index;
511 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
512 * @type: the section type that will contain the label (0 on subsequent calls)
513 * @state: ice_pkg_enum structure that will hold the state of the enumeration
514 * @value: pointer to a value that will return the label's value if found
516 * Enumerates a list of labels in the package. The caller will call
517 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
518 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
519 * the end of the list has been reached.
522 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
525 struct ice_label *label;
527 /* Check for valid label section on first call */
528 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
531 label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
533 ice_label_enum_handler);
537 *value = LE16_TO_CPU(label->value);
542 * ice_add_tunnel_hint
543 * @hw: pointer to the HW structure
544 * @label_name: label text
545 * @val: value of the tunnel port boost entry
547 static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
549 if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
552 for (i = 0; tnls[i].type != TNL_LAST; i++) {
553 size_t len = strlen(tnls[i].label_prefix);
555 /* Look for matching label start, before continuing */
556 if (strncmp(label_name, tnls[i].label_prefix, len))
559 /* Make sure this label matches our PF. Note that the PF
560 * character ('0' - '7') will be located where our
561 * prefix string's null terminator is located.
563 if ((label_name[len] - '0') == hw->pf_id) {
564 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
565 hw->tnl.tbl[hw->tnl.count].valid = false;
566 hw->tnl.tbl[hw->tnl.count].in_use = false;
567 hw->tnl.tbl[hw->tnl.count].marked = false;
568 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
569 hw->tnl.tbl[hw->tnl.count].port = 0;
579 * @hw: pointer to the HW structure
580 * @val: value of the boost entry
581 * @enable: true if entry needs to be enabled, or false if needs to be disabled
583 static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
585 if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
586 hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
587 hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
594 * @hw: pointer to the HW structure
595 * @ice_seg: pointer to the segment of the package scan (non-NULL)
597 * This function will scan the package and save off relevant information
598 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
599 * since the first call to ice_enum_labels requires a pointer to an actual
602 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
604 struct ice_pkg_enum state;
609 ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
610 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
615 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
619 if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
620 /* check for a tunnel entry */
621 ice_add_tunnel_hint(hw, label_name, val);
623 /* check for a dvm mode entry */
624 else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
625 ice_add_dvm_hint(hw, val, true);
627 /* check for a svm mode entry */
628 else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
629 ice_add_dvm_hint(hw, val, false);
631 label_name = ice_enum_labels(NULL, 0, &state, &val);
634 /* Cache the appropriate boost TCAM entry pointers for tunnels */
635 for (i = 0; i < hw->tnl.count; i++) {
636 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
637 &hw->tnl.tbl[i].boost_entry);
638 if (hw->tnl.tbl[i].boost_entry)
639 hw->tnl.tbl[i].valid = true;
642 /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
643 for (i = 0; i < hw->dvm_upd.count; i++)
644 ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
645 &hw->dvm_upd.tbl[i].boost_entry);
650 #define ICE_DC_KEY 0x1 /* don't care */
651 #define ICE_DC_KEYINV 0x1
652 #define ICE_NM_KEY 0x0 /* never match */
653 #define ICE_NM_KEYINV 0x0
654 #define ICE_0_KEY 0x1 /* match 0 */
655 #define ICE_0_KEYINV 0x0
656 #define ICE_1_KEY 0x0 /* match 1 */
657 #define ICE_1_KEYINV 0x1
660 * ice_gen_key_word - generate 16-bits of a key/mask word
662 * @valid: valid bits mask (change only the valid bits)
663 * @dont_care: don't care mask
664 * @nvr_mtch: never match mask
665 * @key: pointer to an array of where the resulting key portion
666 * @key_inv: pointer to an array of where the resulting key invert portion
668 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
669 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
670 * of key and 8 bits of key invert.
672 * '0' = b01, always match a 0 bit
673 * '1' = b10, always match a 1 bit
674 * '?' = b11, don't care bit (always matches)
675 * '~' = b00, never match bit
679 * dont_care: b0 0 1 1 0 0
680 * never_mtch: b0 0 0 0 1 1
681 * ------------------------------
682 * Result: key: b01 10 11 11 00 00
684 static enum ice_status
685 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
688 u8 in_key = *key, in_key_inv = *key_inv;
691 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
692 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
698 /* encode the 8 bits into 8-bit key and 8-bit key invert */
699 for (i = 0; i < 8; i++) {
703 if (!(valid & 0x1)) { /* change only valid bits */
704 *key |= (in_key & 0x1) << 7;
705 *key_inv |= (in_key_inv & 0x1) << 7;
706 } else if (dont_care & 0x1) { /* don't care bit */
707 *key |= ICE_DC_KEY << 7;
708 *key_inv |= ICE_DC_KEYINV << 7;
709 } else if (nvr_mtch & 0x1) { /* never match bit */
710 *key |= ICE_NM_KEY << 7;
711 *key_inv |= ICE_NM_KEYINV << 7;
712 } else if (val & 0x01) { /* exact 1 match */
713 *key |= ICE_1_KEY << 7;
714 *key_inv |= ICE_1_KEYINV << 7;
715 } else { /* exact 0 match */
716 *key |= ICE_0_KEY << 7;
717 *key_inv |= ICE_0_KEYINV << 7;
732 * ice_bits_max_set - determine if the number of bits set is within a maximum
733 * @mask: pointer to the byte array which is the mask
734 * @size: the number of bytes in the mask
735 * @max: the max number of set bits
737 * This function determines if there are at most 'max' number of bits set in an
738 * array. Returns true if the number for bits set is <= max or will return false
741 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
746 /* check each byte */
747 for (i = 0; i < size; i++) {
748 /* if 0, go to next byte */
752 /* We know there is at least one set bit in this byte because of
753 * the above check; if we already have found 'max' number of
754 * bits set, then we can return failure now.
759 /* count the bits in this byte, checking threshold */
760 count += ice_hweight8(mask[i]);
769 * ice_set_key - generate a variable sized key with multiples of 16-bits
770 * @key: pointer to where the key will be stored
771 * @size: the size of the complete key in bytes (must be even)
772 * @val: array of 8-bit values that makes up the value portion of the key
773 * @upd: array of 8-bit masks that determine what key portion to update
774 * @dc: array of 8-bit masks that make up the don't care mask
775 * @nm: array of 8-bit masks that make up the never match mask
776 * @off: the offset of the first byte in the key to update
777 * @len: the number of bytes in the key update
779 * This function generates a key from a value, a don't care mask and a never
781 * upd, dc, and nm are optional parameters, and can be NULL:
782 * upd == NULL --> upd mask is all 1's (update all bits)
783 * dc == NULL --> dc mask is all 0's (no don't care bits)
784 * nm == NULL --> nm mask is all 0's (no never match bits)
787 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
793 /* size must be a multiple of 2 bytes. */
796 half_size = size / 2;
798 if (off + len > half_size)
801 /* Make sure at most one bit is set in the never match mask. Having more
802 * than one never match mask bit set will cause HW to consume excessive
803 * power otherwise; this is a power management efficiency check.
805 #define ICE_NVR_MTCH_BITS_MAX 1
806 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
809 for (i = 0; i < len; i++)
810 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
811 dc ? dc[i] : 0, nm ? nm[i] : 0,
812 key + off + i, key + half_size + off + i))
819 * ice_acquire_global_cfg_lock
820 * @hw: pointer to the HW structure
821 * @access: access type (read or write)
823 * This function will request ownership of the global config lock for reading
824 * or writing of the package. When attempting to obtain write access, the
825 * caller must check for the following two return values:
827 * ICE_SUCCESS - Means the caller has acquired the global config lock
828 * and can perform writing of the package.
829 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
830 * package or has found that no update was necessary; in
831 * this case, the caller can just skip performing any
832 * update of the package.
834 static enum ice_status
835 ice_acquire_global_cfg_lock(struct ice_hw *hw,
836 enum ice_aq_res_access_type access)
838 enum ice_status status;
840 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
842 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
843 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
845 if (status == ICE_ERR_AQ_NO_WORK)
846 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
852 * ice_release_global_cfg_lock
853 * @hw: pointer to the HW structure
855 * This function will release the global config lock.
857 static void ice_release_global_cfg_lock(struct ice_hw *hw)
859 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
863 * ice_acquire_change_lock
864 * @hw: pointer to the HW structure
865 * @access: access type (read or write)
867 * This function will request ownership of the change lock.
870 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
872 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
874 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
875 ICE_CHANGE_LOCK_TIMEOUT);
879 * ice_release_change_lock
880 * @hw: pointer to the HW structure
882 * This function will release the change lock using the proper Admin Command.
884 void ice_release_change_lock(struct ice_hw *hw)
886 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
888 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
892 * ice_aq_download_pkg
893 * @hw: pointer to the hardware structure
894 * @pkg_buf: the package buffer to transfer
895 * @buf_size: the size of the package buffer
896 * @last_buf: last buffer indicator
897 * @error_offset: returns error offset
898 * @error_info: returns error information
899 * @cd: pointer to command details structure or NULL
901 * Download Package (0x0C40)
903 static enum ice_status
904 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
905 u16 buf_size, bool last_buf, u32 *error_offset,
906 u32 *error_info, struct ice_sq_cd *cd)
908 struct ice_aqc_download_pkg *cmd;
909 struct ice_aq_desc desc;
910 enum ice_status status;
912 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
919 cmd = &desc.params.download_pkg;
920 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
921 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
924 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
926 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
927 if (status == ICE_ERR_AQ_ERROR) {
928 /* Read error from buffer only when the FW returned an error */
929 struct ice_aqc_download_pkg_resp *resp;
931 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
933 *error_offset = LE32_TO_CPU(resp->error_offset);
935 *error_info = LE32_TO_CPU(resp->error_info);
942 * ice_aq_upload_section
943 * @hw: pointer to the hardware structure
944 * @pkg_buf: the package buffer which will receive the section
945 * @buf_size: the size of the package buffer
946 * @cd: pointer to command details structure or NULL
948 * Upload Section (0x0C41)
951 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
952 u16 buf_size, struct ice_sq_cd *cd)
954 struct ice_aq_desc desc;
956 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
957 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
958 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
960 return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
965 * @hw: pointer to the hardware structure
966 * @pkg_buf: the package cmd buffer
967 * @buf_size: the size of the package cmd buffer
968 * @last_buf: last buffer indicator
969 * @error_offset: returns error offset
970 * @error_info: returns error information
971 * @cd: pointer to command details structure or NULL
973 * Update Package (0x0C42)
975 static enum ice_status
976 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
977 bool last_buf, u32 *error_offset, u32 *error_info,
978 struct ice_sq_cd *cd)
980 struct ice_aqc_download_pkg *cmd;
981 struct ice_aq_desc desc;
982 enum ice_status status;
984 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
991 cmd = &desc.params.download_pkg;
992 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
993 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
996 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
998 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
999 if (status == ICE_ERR_AQ_ERROR) {
1000 /* Read error from buffer only when the FW returned an error */
1001 struct ice_aqc_download_pkg_resp *resp;
1003 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
1005 *error_offset = LE32_TO_CPU(resp->error_offset);
1007 *error_info = LE32_TO_CPU(resp->error_info);
1014 * ice_find_seg_in_pkg
1015 * @hw: pointer to the hardware structure
1016 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
1017 * @pkg_hdr: pointer to the package header to be searched
1019 * This function searches a package file for a particular segment type. On
1020 * success it returns a pointer to the segment header, otherwise it will
1023 static struct ice_generic_seg_hdr *
1024 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
1025 struct ice_pkg_hdr *pkg_hdr)
1029 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1030 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
1031 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
1032 pkg_hdr->pkg_format_ver.update,
1033 pkg_hdr->pkg_format_ver.draft);
1035 /* Search all package segments for the requested segment type */
1036 for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
1037 struct ice_generic_seg_hdr *seg;
1039 seg = (struct ice_generic_seg_hdr *)
1040 ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
1042 if (LE32_TO_CPU(seg->seg_type) == seg_type)
1050 * ice_update_pkg_no_lock
1051 * @hw: pointer to the hardware structure
1052 * @bufs: pointer to an array of buffers
1053 * @count: the number of buffers in the array
1055 static enum ice_status
1056 ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1058 enum ice_status status = ICE_SUCCESS;
1061 for (i = 0; i < count; i++) {
1062 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
1063 bool last = ((i + 1) == count);
1066 status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
1067 last, &offset, &info, NULL);
1070 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
1071 status, offset, info);
1081 * @hw: pointer to the hardware structure
1082 * @bufs: pointer to an array of buffers
1083 * @count: the number of buffers in the array
1085 * Obtains change lock and updates package.
1088 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1090 enum ice_status status;
1092 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
1096 status = ice_update_pkg_no_lock(hw, bufs, count);
1098 ice_release_change_lock(hw);
1104 * ice_dwnld_cfg_bufs
1105 * @hw: pointer to the hardware structure
1106 * @bufs: pointer to an array of buffers
1107 * @count: the number of buffers in the array
1109 * Obtains global config lock and downloads the package configuration buffers
1110 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
1111 * found indicates that the rest of the buffers are all metadata buffers.
1113 static enum ice_status
1114 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1116 enum ice_status status;
1117 struct ice_buf_hdr *bh;
1118 u32 offset, info, i;
1120 if (!bufs || !count)
1121 return ICE_ERR_PARAM;
1123 /* If the first buffer's first section has its metadata bit set
1124 * then there are no buffers to be downloaded, and the operation is
1125 * considered a success.
1127 bh = (struct ice_buf_hdr *)bufs;
1128 if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
1131 /* reset pkg_dwnld_status in case this function is called in the
1132 * reset/rebuild flow
1134 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
1136 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
1138 if (status == ICE_ERR_AQ_NO_WORK)
1139 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
1141 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1145 for (i = 0; i < count; i++) {
1146 bool last = ((i + 1) == count);
1149 /* check next buffer for metadata flag */
1150 bh = (struct ice_buf_hdr *)(bufs + i + 1);
1152 /* A set metadata flag in the next buffer will signal
1153 * that the current buffer will be the last buffer
1156 if (LE16_TO_CPU(bh->section_count))
1157 if (LE32_TO_CPU(bh->section_entry[0].type) &
1162 bh = (struct ice_buf_hdr *)(bufs + i);
1164 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
1165 &offset, &info, NULL);
1167 /* Save AQ status from download package */
1168 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1170 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
1171 status, offset, info);
1180 status = ice_set_vlan_mode(hw);
1182 ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
1186 ice_release_global_cfg_lock(hw);
1192 * ice_aq_get_pkg_info_list
1193 * @hw: pointer to the hardware structure
1194 * @pkg_info: the buffer which will receive the information list
1195 * @buf_size: the size of the pkg_info information buffer
1196 * @cd: pointer to command details structure or NULL
1198 * Get Package Info List (0x0C43)
1200 static enum ice_status
1201 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1202 struct ice_aqc_get_pkg_info_resp *pkg_info,
1203 u16 buf_size, struct ice_sq_cd *cd)
1205 struct ice_aq_desc desc;
1207 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1208 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1210 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1215 * @hw: pointer to the hardware structure
1216 * @ice_seg: pointer to the segment of the package to be downloaded
1218 * Handles the download of a complete package.
1220 static enum ice_status
1221 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1223 struct ice_buf_table *ice_buf_tbl;
1224 enum ice_status status;
1226 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1227 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1228 ice_seg->hdr.seg_format_ver.major,
1229 ice_seg->hdr.seg_format_ver.minor,
1230 ice_seg->hdr.seg_format_ver.update,
1231 ice_seg->hdr.seg_format_ver.draft);
1233 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1234 LE32_TO_CPU(ice_seg->hdr.seg_type),
1235 LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1237 ice_buf_tbl = ice_find_buf_table(ice_seg);
1239 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1240 LE32_TO_CPU(ice_buf_tbl->buf_count));
1242 status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1243 LE32_TO_CPU(ice_buf_tbl->buf_count));
1245 ice_post_pkg_dwnld_vlan_mode_cfg(hw);
1252 * @hw: pointer to the hardware structure
1253 * @pkg_hdr: pointer to the driver's package hdr
1255 * Saves off the package details into the HW structure.
1257 static enum ice_status
1258 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1260 struct ice_generic_seg_hdr *seg_hdr;
1262 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1264 return ICE_ERR_PARAM;
1266 seg_hdr = (struct ice_generic_seg_hdr *)
1267 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1269 struct ice_meta_sect *meta;
1270 struct ice_pkg_enum state;
1272 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1274 /* Get package information from the Metadata Section */
1275 meta = (struct ice_meta_sect *)
1276 ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1279 ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1283 hw->pkg_ver = meta->ver;
1284 ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
1285 ICE_NONDMA_TO_NONDMA);
1287 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1288 meta->ver.major, meta->ver.minor, meta->ver.update,
1289 meta->ver.draft, meta->name);
1291 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1292 ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1293 sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
1295 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1296 seg_hdr->seg_format_ver.major,
1297 seg_hdr->seg_format_ver.minor,
1298 seg_hdr->seg_format_ver.update,
1299 seg_hdr->seg_format_ver.draft,
1302 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1311 * @hw: pointer to the hardware structure
1313 * Store details of the package currently loaded in HW into the HW structure.
1315 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1317 struct ice_aqc_get_pkg_info_resp *pkg_info;
1318 enum ice_status status;
1322 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1324 size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1325 pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1327 return ICE_ERR_NO_MEMORY;
1329 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1331 goto init_pkg_free_alloc;
1333 for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
1334 #define ICE_PKG_FLAG_COUNT 4
1335 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1338 if (pkg_info->pkg_info[i].is_active) {
1339 flags[place++] = 'A';
1340 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1341 hw->active_track_id =
1342 LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
1343 ice_memcpy(hw->active_pkg_name,
1344 pkg_info->pkg_info[i].name,
1345 sizeof(pkg_info->pkg_info[i].name),
1346 ICE_NONDMA_TO_NONDMA);
1347 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1349 if (pkg_info->pkg_info[i].is_active_at_boot)
1350 flags[place++] = 'B';
1351 if (pkg_info->pkg_info[i].is_modified)
1352 flags[place++] = 'M';
1353 if (pkg_info->pkg_info[i].is_in_nvm)
1354 flags[place++] = 'N';
1356 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1357 i, pkg_info->pkg_info[i].ver.major,
1358 pkg_info->pkg_info[i].ver.minor,
1359 pkg_info->pkg_info[i].ver.update,
1360 pkg_info->pkg_info[i].ver.draft,
1361 pkg_info->pkg_info[i].name, flags);
1364 init_pkg_free_alloc:
1365 ice_free(hw, pkg_info);
1371 * ice_verify_pkg - verify package
1372 * @pkg: pointer to the package buffer
1373 * @len: size of the package buffer
1375 * Verifies various attributes of the package file, including length, format
1376 * version, and the requirement of at least one segment.
1378 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1383 if (len < ice_struct_size(pkg, seg_offset, 1))
1384 return ICE_ERR_BUF_TOO_SHORT;
1386 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1387 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1388 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1389 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1392 /* pkg must have at least one segment */
1393 seg_count = LE32_TO_CPU(pkg->seg_count);
1397 /* make sure segment array fits in package length */
1398 if (len < ice_struct_size(pkg, seg_offset, seg_count))
1399 return ICE_ERR_BUF_TOO_SHORT;
1401 /* all segments must fit within length */
1402 for (i = 0; i < seg_count; i++) {
1403 u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1404 struct ice_generic_seg_hdr *seg;
1406 /* segment header must fit */
1407 if (len < off + sizeof(*seg))
1408 return ICE_ERR_BUF_TOO_SHORT;
1410 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1412 /* segment body must fit */
1413 if (len < off + LE32_TO_CPU(seg->seg_size))
1414 return ICE_ERR_BUF_TOO_SHORT;
1421 * ice_free_seg - free package segment pointer
1422 * @hw: pointer to the hardware structure
1424 * Frees the package segment pointer in the proper manner, depending on if the
1425 * segment was allocated or just the passed in pointer was stored.
1427 void ice_free_seg(struct ice_hw *hw)
1430 ice_free(hw, hw->pkg_copy);
1431 hw->pkg_copy = NULL;
1438 * ice_init_pkg_regs - initialize additional package registers
1439 * @hw: pointer to the hardware structure
1441 static void ice_init_pkg_regs(struct ice_hw *hw)
1443 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1444 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1445 #define ICE_SW_BLK_IDX 0
1446 if (hw->dcf_enabled)
1449 /* setup Switch block input mask, which is 48-bits in two parts */
1450 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1451 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1455 * ice_chk_pkg_version - check package version for compatibility with driver
1456 * @pkg_ver: pointer to a version structure to check
1458 * Check to make sure that the package about to be downloaded is compatible with
1459 * the driver. To be compatible, the major and minor components of the package
1460 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1463 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1465 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1466 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1467 return ICE_ERR_NOT_SUPPORTED;
1473 * ice_chk_pkg_compat
1474 * @hw: pointer to the hardware structure
1475 * @ospkg: pointer to the package hdr
1476 * @seg: pointer to the package segment hdr
1478 * This function checks the package version compatibility with driver and NVM
1480 static enum ice_status
1481 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1482 struct ice_seg **seg)
1484 struct ice_aqc_get_pkg_info_resp *pkg;
1485 enum ice_status status;
1489 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1491 /* Check package version compatibility */
1492 status = ice_chk_pkg_version(&hw->pkg_ver);
1494 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1498 /* find ICE segment in given package */
1499 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1502 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1506 /* Check if FW is compatible with the OS package */
1507 size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
1508 pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1510 return ICE_ERR_NO_MEMORY;
1512 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1514 goto fw_ddp_compat_free_alloc;
1516 for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
1517 /* loop till we find the NVM package */
1518 if (!pkg->pkg_info[i].is_in_nvm)
1520 if ((*seg)->hdr.seg_format_ver.major !=
1521 pkg->pkg_info[i].ver.major ||
1522 (*seg)->hdr.seg_format_ver.minor >
1523 pkg->pkg_info[i].ver.minor) {
1524 status = ICE_ERR_FW_DDP_MISMATCH;
1525 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1527 /* done processing NVM package so break */
1530 fw_ddp_compat_free_alloc:
1537 * @sect_type: section type
1538 * @section: pointer to section
1539 * @index: index of the field vector entry to be returned
1540 * @offset: ptr to variable that receives the offset in the field vector table
1542 * This is a callback function that can be passed to ice_pkg_enum_entry.
1543 * This function treats the given section as of type ice_sw_fv_section and
1544 * enumerates offset field. "offset" is an index into the field vector table.
1547 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1549 struct ice_sw_fv_section *fv_section =
1550 (struct ice_sw_fv_section *)section;
1552 if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1554 if (index >= LE16_TO_CPU(fv_section->count))
1557 /* "index" passed in to this function is relative to a given
1558 * 4k block. To get to the true index into the field vector
1559 * table need to add the relative index to the base_offset
1560 * field of this section
1562 *offset = LE16_TO_CPU(fv_section->base_offset) + index;
1563 return fv_section->fv + index;
1567 * ice_get_prof_index_max - get the max profile index for used profile
1568 * @hw: pointer to the HW struct
1570 * Calling this function will get the max profile index for used profile
1571 * and store the index number in struct ice_switch_info *switch_info
1572 * in hw for following use.
1574 static int ice_get_prof_index_max(struct ice_hw *hw)
1576 u16 prof_index = 0, j, max_prof_index = 0;
1577 struct ice_pkg_enum state;
1578 struct ice_seg *ice_seg;
1583 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1586 return ICE_ERR_PARAM;
1591 fv = (struct ice_fv *)
1592 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1593 &offset, ice_sw_fv_handler);
1598 /* in the profile that not be used, the prot_id is set to 0xff
1599 * and the off is set to 0x1ff for all the field vectors.
1601 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1602 if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1603 fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1605 if (flag && prof_index > max_prof_index)
1606 max_prof_index = prof_index;
1612 hw->switch_info->max_used_prof_index = max_prof_index;
1618 * ice_init_pkg - initialize/download package
1619 * @hw: pointer to the hardware structure
1620 * @buf: pointer to the package buffer
1621 * @len: size of the package buffer
1623 * This function initializes a package. The package contains HW tables
1624 * required to do packet processing. First, the function extracts package
1625 * information such as version. Then it finds the ice configuration segment
1626 * within the package; this function then saves a copy of the segment pointer
1627 * within the supplied package buffer. Next, the function will cache any hints
1628 * from the package, followed by downloading the package itself. Note, that if
1629 * a previous PF driver has already downloaded the package successfully, then
1630 * the current driver will not have to download the package again.
1632 * The local package contents will be used to query default behavior and to
1633 * update specific sections of the HW's version of the package (e.g. to update
1634 * the parse graph to understand new protocols).
1636 * This function stores a pointer to the package buffer memory, and it is
1637 * expected that the supplied buffer will not be freed immediately. If the
1638 * package buffer needs to be freed, such as when read from a file, use
1639 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1642 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1644 struct ice_pkg_hdr *pkg;
1645 enum ice_status status;
1646 struct ice_seg *seg;
1649 return ICE_ERR_PARAM;
1651 pkg = (struct ice_pkg_hdr *)buf;
1652 status = ice_verify_pkg(pkg, len);
1654 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1659 /* initialize package info */
1660 status = ice_init_pkg_info(hw, pkg);
1664 /* before downloading the package, check package version for
1665 * compatibility with driver
1667 status = ice_chk_pkg_compat(hw, pkg, &seg);
1671 /* initialize package hints and then download package */
1672 ice_init_pkg_hints(hw, seg);
1673 status = ice_download_pkg(hw, seg);
1674 if (status == ICE_ERR_AQ_NO_WORK) {
1675 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1676 status = ICE_SUCCESS;
1679 /* Get information on the package currently loaded in HW, then make sure
1680 * the driver is compatible with this version.
1683 status = ice_get_pkg_info(hw);
1685 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1690 /* on successful package download update other required
1691 * registers to support the package and fill HW tables
1692 * with package content.
1694 ice_init_pkg_regs(hw);
1695 ice_fill_blk_tbls(hw);
1696 ice_fill_hw_ptype(hw);
1697 ice_get_prof_index_max(hw);
1699 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1707 * ice_copy_and_init_pkg - initialize/download a copy of the package
1708 * @hw: pointer to the hardware structure
1709 * @buf: pointer to the package buffer
1710 * @len: size of the package buffer
1712 * This function copies the package buffer, and then calls ice_init_pkg() to
1713 * initialize the copied package contents.
1715 * The copying is necessary if the package buffer supplied is constant, or if
1716 * the memory may disappear shortly after calling this function.
1718 * If the package buffer resides in the data segment and can be modified, the
1719 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1721 * However, if the package buffer needs to be copied first, such as when being
1722 * read from a file, the caller should use ice_copy_and_init_pkg().
1724 * This function will first copy the package buffer, before calling
1725 * ice_init_pkg(). The caller is free to immediately destroy the original
1726 * package buffer, as the new copy will be managed by this function and
1729 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1731 enum ice_status status;
1735 return ICE_ERR_PARAM;
1737 buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1739 status = ice_init_pkg(hw, buf_copy, len);
1741 /* Free the copy, since we failed to initialize the package */
1742 ice_free(hw, buf_copy);
1744 /* Track the copied pkg so we can free it later */
1745 hw->pkg_copy = buf_copy;
1754 * @hw: pointer to the HW structure
1756 * Allocates a package buffer and returns a pointer to the buffer header.
1757 * Note: all package contents must be in Little Endian form.
1759 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1761 struct ice_buf_build *bld;
1762 struct ice_buf_hdr *buf;
1764 bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1768 buf = (struct ice_buf_hdr *)bld;
1769 buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
1775 * ice_get_sw_prof_type - determine switch profile type
1776 * @hw: pointer to the HW structure
1777 * @fv: pointer to the switch field vector
1779 static enum ice_prof_type
1780 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
1784 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1785 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1786 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1787 fv->ew[i].off == ICE_VNI_OFFSET)
1788 return ICE_PROF_TUN_UDP;
1790 /* GRE tunnel will have GRE protocol */
1791 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1792 return ICE_PROF_TUN_GRE;
1794 /* PPPOE tunnel will have PPPOE protocol */
1795 if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
1796 return ICE_PROF_TUN_PPPOE;
1799 return ICE_PROF_NON_TUN;
1803 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1804 * @hw: pointer to hardware structure
1805 * @req_profs: type of profiles requested
1806 * @bm: pointer to memory for returning the bitmap of field vectors
1809 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1812 struct ice_pkg_enum state;
1813 struct ice_seg *ice_seg;
1816 if (req_profs == ICE_PROF_ALL) {
1817 ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
1821 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1822 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1825 enum ice_prof_type prof_type;
1828 fv = (struct ice_fv *)
1829 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1830 &offset, ice_sw_fv_handler);
1834 /* Determine field vector type */
1835 prof_type = ice_get_sw_prof_type(hw, fv);
1837 if (req_profs & prof_type)
1838 ice_set_bit((u16)offset, bm);
1844 * ice_get_sw_fv_list
1845 * @hw: pointer to the HW structure
1846 * @prot_ids: field vector to search for with a given protocol ID
1847 * @ids_cnt: lookup/protocol count
1848 * @bm: bitmap of field vectors to consider
1849 * @fv_list: Head of a list
1851 * Finds all the field vector entries from switch block that contain
1852 * a given protocol ID and returns a list of structures of type
1853 * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1854 * definition and profile ID information
1855 * NOTE: The caller of the function is responsible for freeing the memory
1856 * allocated for every list entry.
1859 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
1860 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1862 struct ice_sw_fv_list_entry *fvl;
1863 struct ice_sw_fv_list_entry *tmp;
1864 struct ice_pkg_enum state;
1865 struct ice_seg *ice_seg;
1869 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1871 if (!ids_cnt || !hw->seg)
1872 return ICE_ERR_PARAM;
1878 fv = (struct ice_fv *)
1879 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1880 &offset, ice_sw_fv_handler);
1885 /* If field vector is not in the bitmap list, then skip this
1888 if (!ice_is_bit_set(bm, (u16)offset))
1891 for (i = 0; i < ids_cnt; i++) {
1894 /* This code assumes that if a switch field vector line
1895 * has a matching protocol, then this line will contain
1896 * the entries necessary to represent every field in
1897 * that protocol header.
1899 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1900 if (fv->ew[j].prot_id == prot_ids[i])
1902 if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1904 if (i + 1 == ids_cnt) {
1905 fvl = (struct ice_sw_fv_list_entry *)
1906 ice_malloc(hw, sizeof(*fvl));
1910 fvl->profile_id = offset;
1911 LIST_ADD(&fvl->list_entry, fv_list);
1916 if (LIST_EMPTY(fv_list))
1921 LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1923 LIST_DEL(&fvl->list_entry);
1927 return ICE_ERR_NO_MEMORY;
1931 * ice_init_prof_result_bm - Initialize the profile result index bitmap
1932 * @hw: pointer to hardware structure
1934 void ice_init_prof_result_bm(struct ice_hw *hw)
1936 struct ice_pkg_enum state;
1937 struct ice_seg *ice_seg;
1940 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1950 fv = (struct ice_fv *)
1951 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1952 &off, ice_sw_fv_handler);
1957 ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1960 /* Determine empty field vector indices, these can be
1961 * used for recipe results. Skip index 0, since it is
1962 * always used for Switch ID.
1964 for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1965 if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1966 fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1968 hw->switch_info->prof_res_bm[off]);
1974 * @hw: pointer to the HW structure
1975 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1977 * Frees a package buffer
1979 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1985 * ice_pkg_buf_reserve_section
1986 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1987 * @count: the number of sections to reserve
1989 * Reserves one or more section table entries in a package buffer. This routine
1990 * can be called multiple times as long as they are made before calling
1991 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1992 * is called once, the number of sections that can be allocated will not be able
1993 * to be increased; not using all reserved sections is fine, but this will
1994 * result in some wasted space in the buffer.
1995 * Note: all package contents must be in Little Endian form.
1997 static enum ice_status
1998 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
2000 struct ice_buf_hdr *buf;
2005 return ICE_ERR_PARAM;
2007 buf = (struct ice_buf_hdr *)&bld->buf;
2009 /* already an active section, can't increase table size */
2010 section_count = LE16_TO_CPU(buf->section_count);
2011 if (section_count > 0)
2014 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
2016 bld->reserved_section_table_entries += count;
2018 data_end = LE16_TO_CPU(buf->data_end) +
2019 FLEX_ARRAY_SIZE(buf, section_entry, count);
2020 buf->data_end = CPU_TO_LE16(data_end);
2026 * ice_pkg_buf_alloc_section
2027 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2028 * @type: the section type value
2029 * @size: the size of the section to reserve (in bytes)
2031 * Reserves memory in the buffer for a section's content and updates the
2032 * buffers' status accordingly. This routine returns a pointer to the first
2033 * byte of the section start within the buffer, which is used to fill in the
2035 * Note: all package contents must be in Little Endian form.
2038 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
2040 struct ice_buf_hdr *buf;
2044 if (!bld || !type || !size)
2047 buf = (struct ice_buf_hdr *)&bld->buf;
2049 /* check for enough space left in buffer */
2050 data_end = LE16_TO_CPU(buf->data_end);
2052 /* section start must align on 4 byte boundary */
2053 data_end = ICE_ALIGN(data_end, 4);
2055 if ((data_end + size) > ICE_MAX_S_DATA_END)
2058 /* check for more available section table entries */
2059 sect_count = LE16_TO_CPU(buf->section_count);
2060 if (sect_count < bld->reserved_section_table_entries) {
2061 void *section_ptr = ((u8 *)buf) + data_end;
2063 buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
2064 buf->section_entry[sect_count].size = CPU_TO_LE16(size);
2065 buf->section_entry[sect_count].type = CPU_TO_LE32(type);
2068 buf->data_end = CPU_TO_LE16(data_end);
2070 buf->section_count = CPU_TO_LE16(sect_count + 1);
2074 /* no free section table entries */
2079 * ice_pkg_buf_alloc_single_section
2080 * @hw: pointer to the HW structure
2081 * @type: the section type value
2082 * @size: the size of the section to reserve (in bytes)
2083 * @section: returns pointer to the section
2085 * Allocates a package buffer with a single section.
2086 * Note: all package contents must be in Little Endian form.
2088 struct ice_buf_build *
2089 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
2092 struct ice_buf_build *buf;
2097 buf = ice_pkg_buf_alloc(hw);
2101 if (ice_pkg_buf_reserve_section(buf, 1))
2102 goto ice_pkg_buf_alloc_single_section_err;
2104 *section = ice_pkg_buf_alloc_section(buf, type, size);
2106 goto ice_pkg_buf_alloc_single_section_err;
2110 ice_pkg_buf_alloc_single_section_err:
2111 ice_pkg_buf_free(hw, buf);
2116 * ice_pkg_buf_get_active_sections
2117 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2119 * Returns the number of active sections. Before using the package buffer
2120 * in an update package command, the caller should make sure that there is at
2121 * least one active section - otherwise, the buffer is not legal and should
2123 * Note: all package contents must be in Little Endian form.
2125 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
2127 struct ice_buf_hdr *buf;
2132 buf = (struct ice_buf_hdr *)&bld->buf;
2133 return LE16_TO_CPU(buf->section_count);
2138 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2140 * Return a pointer to the buffer's header
2142 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
2151 * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
2152 * @hw: pointer to the HW structure
2153 * @port: port to search for
2154 * @index: optionally returns index
2156 * Returns whether a port is already in use as a tunnel, and optionally its
2159 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
2163 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2164 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2174 * ice_tunnel_port_in_use
2175 * @hw: pointer to the HW structure
2176 * @port: port to search for
2177 * @index: optionally returns index
2179 * Returns whether a port is already in use as a tunnel, and optionally its
2182 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
2186 ice_acquire_lock(&hw->tnl_lock);
2187 res = ice_tunnel_port_in_use_hlpr(hw, port, index);
2188 ice_release_lock(&hw->tnl_lock);
2194 * ice_tunnel_get_type
2195 * @hw: pointer to the HW structure
2196 * @port: port to search for
2197 * @type: returns tunnel index
2199 * For a given port number, will return the type of tunnel.
2202 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
2207 ice_acquire_lock(&hw->tnl_lock);
2209 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2210 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2211 *type = hw->tnl.tbl[i].type;
2216 ice_release_lock(&hw->tnl_lock);
2222 * ice_find_free_tunnel_entry
2223 * @hw: pointer to the HW structure
2224 * @type: tunnel type
2225 * @index: optionally returns index
2227 * Returns whether there is a free tunnel entry, and optionally its index
2230 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
2235 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2236 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
2237 hw->tnl.tbl[i].type == type) {
2247 * ice_get_open_tunnel_port - retrieve an open tunnel port
2248 * @hw: pointer to the HW structure
2249 * @type: tunnel type (TNL_ALL will return any open port)
2250 * @port: returns open port
2253 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
2259 ice_acquire_lock(&hw->tnl_lock);
2261 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2262 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2263 (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
2264 *port = hw->tnl.tbl[i].port;
2269 ice_release_lock(&hw->tnl_lock);
2275 * ice_upd_dvm_boost_entry
2276 * @hw: pointer to the HW structure
2277 * @entry: pointer to double vlan boost entry info
2279 static enum ice_status
2280 ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
2282 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2283 enum ice_status status = ICE_ERR_MAX_LIMIT;
2284 struct ice_buf_build *bld;
2287 bld = ice_pkg_buf_alloc(hw);
2289 return ICE_ERR_NO_MEMORY;
2291 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2292 if (ice_pkg_buf_reserve_section(bld, 2))
2293 goto ice_upd_dvm_boost_entry_err;
2295 sect_rx = (struct ice_boost_tcam_section *)
2296 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2297 ice_struct_size(sect_rx, tcam, 1));
2299 goto ice_upd_dvm_boost_entry_err;
2300 sect_rx->count = CPU_TO_LE16(1);
2302 sect_tx = (struct ice_boost_tcam_section *)
2303 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2304 ice_struct_size(sect_tx, tcam, 1));
2306 goto ice_upd_dvm_boost_entry_err;
2307 sect_tx->count = CPU_TO_LE16(1);
2309 /* copy original boost entry to update package buffer */
2310 ice_memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam),
2311 ICE_NONDMA_TO_NONDMA);
2313 /* re-write the don't care and never match bits accordingly */
2314 if (entry->enable) {
2315 /* all bits are don't care */
2320 /* disable, one never match bit, the rest are don't care */
2326 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2327 &val, NULL, &dc, &nm, 0, sizeof(u8));
2329 /* exact copy of entry to Tx section entry */
2330 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
2331 ICE_NONDMA_TO_NONDMA);
2333 status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1);
2335 ice_upd_dvm_boost_entry_err:
2336 ice_pkg_buf_free(hw, bld);
2342 * ice_set_dvm_boost_entries
2343 * @hw: pointer to the HW structure
2345 * Enable double vlan by updating the appropriate boost tcam entries.
2347 enum ice_status ice_set_dvm_boost_entries(struct ice_hw *hw)
2349 enum ice_status status;
2352 for (i = 0; i < hw->dvm_upd.count; i++) {
2353 status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
2363 * @hw: pointer to the HW structure
2364 * @type: type of tunnel
2365 * @port: port of tunnel to create
2367 * Create a tunnel by updating the parse graph in the parser. We do that by
2368 * creating a package buffer with the tunnel info and issuing an update package
2372 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
2374 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2375 enum ice_status status = ICE_ERR_MAX_LIMIT;
2376 struct ice_buf_build *bld;
2379 ice_acquire_lock(&hw->tnl_lock);
2381 if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
2382 hw->tnl.tbl[index].ref++;
2383 status = ICE_SUCCESS;
2384 goto ice_create_tunnel_end;
2387 if (!ice_find_free_tunnel_entry(hw, type, &index)) {
2388 status = ICE_ERR_OUT_OF_RANGE;
2389 goto ice_create_tunnel_end;
2392 bld = ice_pkg_buf_alloc(hw);
2394 status = ICE_ERR_NO_MEMORY;
2395 goto ice_create_tunnel_end;
2398 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2399 if (ice_pkg_buf_reserve_section(bld, 2))
2400 goto ice_create_tunnel_err;
2402 sect_rx = (struct ice_boost_tcam_section *)
2403 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2404 ice_struct_size(sect_rx, tcam, 1));
2406 goto ice_create_tunnel_err;
2407 sect_rx->count = CPU_TO_LE16(1);
2409 sect_tx = (struct ice_boost_tcam_section *)
2410 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2411 ice_struct_size(sect_tx, tcam, 1));
2413 goto ice_create_tunnel_err;
2414 sect_tx->count = CPU_TO_LE16(1);
2416 /* copy original boost entry to update package buffer */
2417 ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2418 sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
2420 /* over-write the never-match dest port key bits with the encoded port
2423 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2424 (u8 *)&port, NULL, NULL, NULL,
2425 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
2426 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
2428 /* exact copy of entry to Tx section entry */
2429 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
2430 ICE_NONDMA_TO_NONDMA);
2432 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2434 hw->tnl.tbl[index].port = port;
2435 hw->tnl.tbl[index].in_use = true;
2436 hw->tnl.tbl[index].ref = 1;
2439 ice_create_tunnel_err:
2440 ice_pkg_buf_free(hw, bld);
2442 ice_create_tunnel_end:
2443 ice_release_lock(&hw->tnl_lock);
2449 * ice_destroy_tunnel
2450 * @hw: pointer to the HW structure
2451 * @port: port of tunnel to destroy (ignored if the all parameter is true)
2452 * @all: flag that states to destroy all tunnels
2454 * Destroys a tunnel or all tunnels by creating an update package buffer
2455 * targeting the specific updates requested and then performing an update
2458 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
2460 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2461 enum ice_status status = ICE_ERR_MAX_LIMIT;
2462 struct ice_buf_build *bld;
2468 ice_acquire_lock(&hw->tnl_lock);
2470 if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
2471 if (hw->tnl.tbl[index].ref > 1) {
2472 hw->tnl.tbl[index].ref--;
2473 status = ICE_SUCCESS;
2474 goto ice_destroy_tunnel_end;
2477 /* determine count */
2478 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2479 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2480 (all || hw->tnl.tbl[i].port == port))
2484 status = ICE_ERR_PARAM;
2485 goto ice_destroy_tunnel_end;
2488 /* size of section - there is at least one entry */
2489 size = ice_struct_size(sect_rx, tcam, count);
2491 bld = ice_pkg_buf_alloc(hw);
2493 status = ICE_ERR_NO_MEMORY;
2494 goto ice_destroy_tunnel_end;
2497 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2498 if (ice_pkg_buf_reserve_section(bld, 2))
2499 goto ice_destroy_tunnel_err;
2501 sect_rx = (struct ice_boost_tcam_section *)
2502 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2505 goto ice_destroy_tunnel_err;
2506 sect_rx->count = CPU_TO_LE16(count);
2508 sect_tx = (struct ice_boost_tcam_section *)
2509 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2512 goto ice_destroy_tunnel_err;
2513 sect_tx->count = CPU_TO_LE16(count);
2515 /* copy original boost entry to update package buffer, one copy to Rx
2516 * section, another copy to the Tx section
2518 for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2519 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2520 (all || hw->tnl.tbl[i].port == port)) {
2521 ice_memcpy(sect_rx->tcam + j,
2522 hw->tnl.tbl[i].boost_entry,
2523 sizeof(*sect_rx->tcam),
2524 ICE_NONDMA_TO_NONDMA);
2525 ice_memcpy(sect_tx->tcam + j,
2526 hw->tnl.tbl[i].boost_entry,
2527 sizeof(*sect_tx->tcam),
2528 ICE_NONDMA_TO_NONDMA);
2529 hw->tnl.tbl[i].marked = true;
2533 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2535 for (i = 0; i < hw->tnl.count &&
2536 i < ICE_TUNNEL_MAX_ENTRIES; i++)
2537 if (hw->tnl.tbl[i].marked) {
2538 hw->tnl.tbl[i].ref = 0;
2539 hw->tnl.tbl[i].port = 0;
2540 hw->tnl.tbl[i].in_use = false;
2541 hw->tnl.tbl[i].marked = false;
2544 ice_destroy_tunnel_err:
2545 ice_pkg_buf_free(hw, bld);
2547 ice_destroy_tunnel_end:
2548 ice_release_lock(&hw->tnl_lock);
2554 * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
2555 * @hw: pointer to the hardware structure
2556 * @blk: hardware block
2558 * @fv_idx: field vector word index
2559 * @prot: variable to receive the protocol ID
2560 * @off: variable to receive the protocol offset
2563 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
2566 struct ice_fv_word *fv_ext;
2568 if (prof >= hw->blk[blk].es.count)
2569 return ICE_ERR_PARAM;
2571 if (fv_idx >= hw->blk[blk].es.fvw)
2572 return ICE_ERR_PARAM;
2574 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2576 *prot = fv_ext[fv_idx].prot_id;
2577 *off = fv_ext[fv_idx].off;
2582 /* PTG Management */
2585 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
2586 * @hw: pointer to the hardware structure
2588 * @ptype: the ptype to search for
2589 * @ptg: pointer to variable that receives the PTG
2591 * This function will search the PTGs for a particular ptype, returning the
2592 * PTG ID that contains it through the PTG parameter, with the value of
2593 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
2595 static enum ice_status
2596 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2598 if (ptype >= ICE_XLT1_CNT || !ptg)
2599 return ICE_ERR_PARAM;
2601 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2606 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
2607 * @hw: pointer to the hardware structure
2609 * @ptg: the PTG to allocate
2611 * This function allocates a given packet type group ID specified by the PTG
2614 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2616 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2620 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
2621 * @hw: pointer to the hardware structure
2623 * @ptype: the ptype to remove
2624 * @ptg: the PTG to remove the ptype from
2626 * This function will remove the ptype from the specific PTG, and move it to
2627 * the default PTG (ICE_DEFAULT_PTG).
2629 static enum ice_status
2630 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2632 struct ice_ptg_ptype **ch;
2633 struct ice_ptg_ptype *p;
2635 if (ptype > ICE_XLT1_CNT - 1)
2636 return ICE_ERR_PARAM;
2638 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2639 return ICE_ERR_DOES_NOT_EXIST;
2641 /* Should not happen if .in_use is set, bad config */
2642 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2645 /* find the ptype within this PTG, and bypass the link over it */
2646 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2647 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2649 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2650 *ch = p->next_ptype;
2654 ch = &p->next_ptype;
2658 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2659 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2665 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
2666 * @hw: pointer to the hardware structure
2668 * @ptype: the ptype to add or move
2669 * @ptg: the PTG to add or move the ptype to
2671 * This function will either add or move a ptype to a particular PTG depending
2672 * on if the ptype is already part of another group. Note that using a
2673 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
2676 static enum ice_status
2677 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2679 enum ice_status status;
2682 if (ptype > ICE_XLT1_CNT - 1)
2683 return ICE_ERR_PARAM;
2685 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2686 return ICE_ERR_DOES_NOT_EXIST;
2688 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2692 /* Is ptype already in the correct PTG? */
2693 if (original_ptg == ptg)
2696 /* Remove from original PTG and move back to the default PTG */
2697 if (original_ptg != ICE_DEFAULT_PTG)
2698 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2700 /* Moving to default PTG? Then we're done with this request */
2701 if (ptg == ICE_DEFAULT_PTG)
2704 /* Add ptype to PTG at beginning of list */
2705 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2706 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2707 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2708 &hw->blk[blk].xlt1.ptypes[ptype];
2710 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2711 hw->blk[blk].xlt1.t[ptype] = ptg;
2716 /* Block / table size info */
2717 struct ice_blk_size_details {
2718 u16 xlt1; /* # XLT1 entries */
2719 u16 xlt2; /* # XLT2 entries */
2720 u16 prof_tcam; /* # profile ID TCAM entries */
2721 u16 prof_id; /* # profile IDs */
2722 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
2723 u16 prof_redir; /* # profile redirection entries */
2724 u16 es; /* # extraction sequence entries */
2725 u16 fvw; /* # field vector words */
2726 u8 overwrite; /* overwrite existing entries allowed */
2727 u8 reverse; /* reverse FV order */
2730 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2733 * XLT1 - Number of entries in XLT1 table
2734 * XLT2 - Number of entries in XLT2 table
2735 * TCAM - Number of entries Profile ID TCAM table
2736 * CDID - Control Domain ID of the hardware block
2737 * PRED - Number of entries in the Profile Redirection Table
2738 * FV - Number of entries in the Field Vector
2739 * FVW - Width (in WORDs) of the Field Vector
2740 * OVR - Overwrite existing table entries
2743 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
2744 /* Overwrite , Reverse FV */
2745 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2747 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2749 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2751 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2753 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2758 ICE_SID_XLT1_OFF = 0,
2761 ICE_SID_PR_REDIR_OFF,
2766 /* Characteristic handling */
2769 * ice_match_prop_lst - determine if properties of two lists match
2770 * @list1: first properties list
2771 * @list2: second properties list
2773 * Count, cookies and the order must match in order to be considered equivalent.
2776 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
2778 struct ice_vsig_prof *tmp1;
2779 struct ice_vsig_prof *tmp2;
2783 /* compare counts */
2784 LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list)
2786 LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
2788 if (!count || count != chk_count)
2791 tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
2792 tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
2794 /* profile cookies must compare, and in the exact same order to take
2795 * into account priority
2798 if (tmp2->profile_cookie != tmp1->profile_cookie)
2801 tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
2802 tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
2808 /* VSIG Management */
2811 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2812 * @hw: pointer to the hardware structure
2814 * @vsi: VSI of interest
2815 * @vsig: pointer to receive the VSI group
2817 * This function will lookup the VSI entry in the XLT2 list and return
2818 * the VSI group its associated with.
2821 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2823 if (!vsig || vsi >= ICE_MAX_VSI)
2824 return ICE_ERR_PARAM;
2826 /* As long as there's a default or valid VSIG associated with the input
2827 * VSI, the functions returns a success. Any handling of VSIG will be
2828 * done by the following add, update or remove functions.
2830 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2836 * ice_vsig_alloc_val - allocate a new VSIG by value
2837 * @hw: pointer to the hardware structure
2839 * @vsig: the VSIG to allocate
2841 * This function will allocate a given VSIG specified by the VSIG parameter.
2843 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2845 u16 idx = vsig & ICE_VSIG_IDX_M;
2847 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2848 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2849 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2852 return ICE_VSIG_VALUE(idx, hw->pf_id);
2856 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2857 * @hw: pointer to the hardware structure
2860 * This function will iterate through the VSIG list and mark the first
2861 * unused entry for the new VSIG entry as used and return that value.
2863 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2867 for (i = 1; i < ICE_MAX_VSIGS; i++)
2868 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2869 return ice_vsig_alloc_val(hw, blk, i);
2871 return ICE_DEFAULT_VSIG;
2875 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2876 * @hw: pointer to the hardware structure
2878 * @chs: characteristic list
2879 * @vsig: returns the VSIG with the matching profiles, if found
2881 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2882 * a group have the same characteristic set. To check if there exists a VSIG
2883 * which has the same characteristics as the input characteristics; this
2884 * function will iterate through the XLT2 list and return the VSIG that has a
2885 * matching configuration. In order to make sure that priorities are accounted
2886 * for, the list must match exactly, including the order in which the
2887 * characteristics are listed.
2889 static enum ice_status
2890 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2891 struct LIST_HEAD_TYPE *chs, u16 *vsig)
2893 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2896 for (i = 0; i < xlt2->count; i++)
2897 if (xlt2->vsig_tbl[i].in_use &&
2898 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2899 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2903 return ICE_ERR_DOES_NOT_EXIST;
2907 * ice_vsig_free - free VSI group
2908 * @hw: pointer to the hardware structure
2910 * @vsig: VSIG to remove
2912 * The function will remove all VSIs associated with the input VSIG and move
2913 * them to the DEFAULT_VSIG and mark the VSIG available.
2915 static enum ice_status
2916 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2918 struct ice_vsig_prof *dtmp, *del;
2919 struct ice_vsig_vsi *vsi_cur;
2922 idx = vsig & ICE_VSIG_IDX_M;
2923 if (idx >= ICE_MAX_VSIGS)
2924 return ICE_ERR_PARAM;
2926 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2927 return ICE_ERR_DOES_NOT_EXIST;
2929 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2931 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2932 /* If the VSIG has at least 1 VSI then iterate through the
2933 * list and remove the VSIs before deleting the group.
2936 /* remove all vsis associated with this VSIG XLT2 entry */
2938 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2940 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2941 vsi_cur->changed = 1;
2942 vsi_cur->next_vsi = NULL;
2946 /* NULL terminate head of VSI list */
2947 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2950 /* free characteristic list */
2951 LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
2952 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2953 ice_vsig_prof, list) {
2954 LIST_DEL(&del->list);
2958 /* if VSIG characteristic list was cleared for reset
2959 * re-initialize the list head
2961 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2967 * ice_vsig_remove_vsi - remove VSI from VSIG
2968 * @hw: pointer to the hardware structure
2970 * @vsi: VSI to remove
2971 * @vsig: VSI group to remove from
2973 * The function will remove the input VSI from its VSI group and move it
2974 * to the DEFAULT_VSIG.
2976 static enum ice_status
2977 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2979 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2982 idx = vsig & ICE_VSIG_IDX_M;
2984 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2985 return ICE_ERR_PARAM;
2987 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2988 return ICE_ERR_DOES_NOT_EXIST;
2990 /* entry already in default VSIG, don't have to remove */
2991 if (idx == ICE_DEFAULT_VSIG)
2994 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2998 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2999 vsi_cur = (*vsi_head);
3001 /* iterate the VSI list, skip over the entry to be removed */
3003 if (vsi_tgt == vsi_cur) {
3004 (*vsi_head) = vsi_cur->next_vsi;
3007 vsi_head = &vsi_cur->next_vsi;
3008 vsi_cur = vsi_cur->next_vsi;
3011 /* verify if VSI was removed from group list */
3013 return ICE_ERR_DOES_NOT_EXIST;
3015 vsi_cur->vsig = ICE_DEFAULT_VSIG;
3016 vsi_cur->changed = 1;
3017 vsi_cur->next_vsi = NULL;
3023 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
3024 * @hw: pointer to the hardware structure
3027 * @vsig: destination VSI group
3029 * This function will move or add the input VSI to the target VSIG.
3030 * The function will find the original VSIG the VSI belongs to and
3031 * move the entry to the DEFAULT_VSIG, update the original VSIG and
3032 * then move entry to the new VSIG.
3034 static enum ice_status
3035 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
3037 struct ice_vsig_vsi *tmp;
3038 enum ice_status status;
3041 idx = vsig & ICE_VSIG_IDX_M;
3043 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
3044 return ICE_ERR_PARAM;
3046 /* if VSIG not in use and VSIG is not default type this VSIG
3049 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
3050 vsig != ICE_DEFAULT_VSIG)
3051 return ICE_ERR_DOES_NOT_EXIST;
3053 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
3057 /* no update required if vsigs match */
3058 if (orig_vsig == vsig)
3061 if (orig_vsig != ICE_DEFAULT_VSIG) {
3062 /* remove entry from orig_vsig and add to default VSIG */
3063 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
3068 if (idx == ICE_DEFAULT_VSIG)
3071 /* Create VSI entry and add VSIG and prop_mask values */
3072 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
3073 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
3075 /* Add new entry to the head of the VSIG list */
3076 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3077 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
3078 &hw->blk[blk].xlt2.vsis[vsi];
3079 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
3080 hw->blk[blk].xlt2.t[vsi] = vsig;
3086 * ice_prof_has_mask_idx - determine if profile index masking is identical
3087 * @hw: pointer to the hardware structure
3089 * @prof: profile to check
3090 * @idx: profile index to check
3091 * @mask: mask to match
3094 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
3097 bool expect_no_mask = false;
3102 /* If mask is 0x0000 or 0xffff, then there is no masking */
3103 if (mask == 0 || mask == 0xffff)
3104 expect_no_mask = true;
3106 /* Scan the enabled masks on this profile, for the specified idx */
3107 for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
3108 hw->blk[blk].masks.count; i++)
3109 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
3110 if (hw->blk[blk].masks.masks[i].in_use &&
3111 hw->blk[blk].masks.masks[i].idx == idx) {
3113 if (hw->blk[blk].masks.masks[i].mask == mask)
3118 if (expect_no_mask) {
3130 * ice_prof_has_mask - determine if profile masking is identical
3131 * @hw: pointer to the hardware structure
3133 * @prof: profile to check
3134 * @masks: masks to match
3137 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
3141 /* es->mask_ena[prof] will have the mask */
3142 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3143 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
3150 * ice_find_prof_id_with_mask - find profile ID for a given field vector
3151 * @hw: pointer to the hardware structure
3153 * @fv: field vector to search for
3154 * @masks: masks for fv
3155 * @prof_id: receives the profile ID
3157 static enum ice_status
3158 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
3159 struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
3161 struct ice_es *es = &hw->blk[blk].es;
3164 /* For FD and RSS, we don't want to re-use an existed profile with the
3165 * same field vector and mask. This will cause rule interference.
3167 if (blk == ICE_BLK_FD || blk == ICE_BLK_RSS)
3168 return ICE_ERR_DOES_NOT_EXIST;
3170 for (i = 0; i < (u8)es->count; i++) {
3171 u16 off = i * es->fvw;
3173 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
3176 /* check if masks settings are the same for this profile */
3177 if (masks && !ice_prof_has_mask(hw, blk, i, masks))
3184 return ICE_ERR_DOES_NOT_EXIST;
3188 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
3189 * @blk: the block type
3190 * @rsrc_type: pointer to variable to receive the resource type
3192 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3196 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
3199 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
3202 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
3205 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
3208 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
3217 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
3218 * @blk: the block type
3219 * @rsrc_type: pointer to variable to receive the resource type
3221 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3225 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
3228 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
3231 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
3234 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
3237 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
3246 * ice_alloc_tcam_ent - allocate hardware TCAM entry
3247 * @hw: pointer to the HW struct
3248 * @blk: the block to allocate the TCAM for
3249 * @btm: true to allocate from bottom of table, false to allocate from top
3250 * @tcam_idx: pointer to variable to receive the TCAM entry
3252 * This function allocates a new entry in a Profile ID TCAM for a specific
3255 static enum ice_status
3256 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
3261 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3262 return ICE_ERR_PARAM;
3264 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
3268 * ice_free_tcam_ent - free hardware TCAM entry
3269 * @hw: pointer to the HW struct
3270 * @blk: the block from which to free the TCAM entry
3271 * @tcam_idx: the TCAM entry to free
3273 * This function frees an entry in a Profile ID TCAM for a specific block.
3275 static enum ice_status
3276 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
3280 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3281 return ICE_ERR_PARAM;
3283 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
3287 * ice_alloc_prof_id - allocate profile ID
3288 * @hw: pointer to the HW struct
3289 * @blk: the block to allocate the profile ID for
3290 * @prof_id: pointer to variable to receive the profile ID
3292 * This function allocates a new profile ID, which also corresponds to a Field
3293 * Vector (Extraction Sequence) entry.
3295 static enum ice_status
3296 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
3298 enum ice_status status;
3302 if (!ice_prof_id_rsrc_type(blk, &res_type))
3303 return ICE_ERR_PARAM;
3305 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
3307 *prof_id = (u8)get_prof;
3313 * ice_free_prof_id - free profile ID
3314 * @hw: pointer to the HW struct
3315 * @blk: the block from which to free the profile ID
3316 * @prof_id: the profile ID to free
3318 * This function frees a profile ID, which also corresponds to a Field Vector.
3320 static enum ice_status
3321 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3323 u16 tmp_prof_id = (u16)prof_id;
3326 if (!ice_prof_id_rsrc_type(blk, &res_type))
3327 return ICE_ERR_PARAM;
3329 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
3333 * ice_prof_inc_ref - increment reference count for profile
3334 * @hw: pointer to the HW struct
3335 * @blk: the block from which to free the profile ID
3336 * @prof_id: the profile ID for which to increment the reference count
3338 static enum ice_status
3339 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3341 if (prof_id > hw->blk[blk].es.count)
3342 return ICE_ERR_PARAM;
3344 hw->blk[blk].es.ref_count[prof_id]++;
3350 * ice_write_prof_mask_reg - write profile mask register
3351 * @hw: pointer to the HW struct
3352 * @blk: hardware block
3353 * @mask_idx: mask index
3354 * @idx: index of the FV which will use the mask
3355 * @mask: the 16-bit mask
3358 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
3366 offset = GLQF_HMASK(mask_idx);
3367 val = (idx << GLQF_HMASK_MSK_INDEX_S) &
3368 GLQF_HMASK_MSK_INDEX_M;
3369 val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
3372 offset = GLQF_FDMASK(mask_idx);
3373 val = (idx << GLQF_FDMASK_MSK_INDEX_S) &
3374 GLQF_FDMASK_MSK_INDEX_M;
3375 val |= (mask << GLQF_FDMASK_MASK_S) &
3379 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
3384 wr32(hw, offset, val);
3385 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
3386 blk, idx, offset, val);
3390 * ice_write_prof_mask_enable_res - write profile mask enable register
3391 * @hw: pointer to the HW struct
3392 * @blk: hardware block
3393 * @prof_id: profile ID
3394 * @enable_mask: enable mask
3397 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
3398 u16 prof_id, u32 enable_mask)
3404 offset = GLQF_HMASK_SEL(prof_id);
3407 offset = GLQF_FDMASK_SEL(prof_id);
3410 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
3415 wr32(hw, offset, enable_mask);
3416 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
3417 blk, prof_id, offset, enable_mask);
3421 * ice_init_prof_masks - initial prof masks
3422 * @hw: pointer to the HW struct
3423 * @blk: hardware block
3425 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
3430 ice_init_lock(&hw->blk[blk].masks.lock);
3432 per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
3434 hw->blk[blk].masks.count = per_pf;
3435 hw->blk[blk].masks.first = hw->pf_id * per_pf;
3437 ice_memset(hw->blk[blk].masks.masks, 0,
3438 sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM);
3440 for (i = hw->blk[blk].masks.first;
3441 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3442 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3446 * ice_init_all_prof_masks - initial all prof masks
3447 * @hw: pointer to the HW struct
3449 void ice_init_all_prof_masks(struct ice_hw *hw)
3451 ice_init_prof_masks(hw, ICE_BLK_RSS);
3452 ice_init_prof_masks(hw, ICE_BLK_FD);
3456 * ice_alloc_prof_mask - allocate profile mask
3457 * @hw: pointer to the HW struct
3458 * @blk: hardware block
3459 * @idx: index of FV which will use the mask
3460 * @mask: the 16-bit mask
3461 * @mask_idx: variable to receive the mask index
3463 static enum ice_status
3464 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
3467 bool found_unused = false, found_copy = false;
3468 enum ice_status status = ICE_ERR_MAX_LIMIT;
3469 u16 unused_idx = 0, copy_idx = 0;
3472 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3473 return ICE_ERR_PARAM;
3475 ice_acquire_lock(&hw->blk[blk].masks.lock);
3477 for (i = hw->blk[blk].masks.first;
3478 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3479 if (hw->blk[blk].masks.masks[i].in_use) {
3480 /* if mask is in use and it exactly duplicates the
3481 * desired mask and index, then in can be reused
3483 if (hw->blk[blk].masks.masks[i].mask == mask &&
3484 hw->blk[blk].masks.masks[i].idx == idx) {
3490 /* save off unused index, but keep searching in case
3491 * there is an exact match later on
3493 if (!found_unused) {
3494 found_unused = true;
3501 else if (found_unused)
3504 goto err_ice_alloc_prof_mask;
3506 /* update mask for a new entry */
3508 hw->blk[blk].masks.masks[i].in_use = true;
3509 hw->blk[blk].masks.masks[i].mask = mask;
3510 hw->blk[blk].masks.masks[i].idx = idx;
3511 hw->blk[blk].masks.masks[i].ref = 0;
3512 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
3515 hw->blk[blk].masks.masks[i].ref++;
3517 status = ICE_SUCCESS;
3519 err_ice_alloc_prof_mask:
3520 ice_release_lock(&hw->blk[blk].masks.lock);
3526 * ice_free_prof_mask - free profile mask
3527 * @hw: pointer to the HW struct
3528 * @blk: hardware block
3529 * @mask_idx: index of mask
3531 static enum ice_status
3532 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
3534 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3535 return ICE_ERR_PARAM;
3537 if (!(mask_idx >= hw->blk[blk].masks.first &&
3538 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
3539 return ICE_ERR_DOES_NOT_EXIST;
3541 ice_acquire_lock(&hw->blk[blk].masks.lock);
3543 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
3544 goto exit_ice_free_prof_mask;
3546 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
3547 hw->blk[blk].masks.masks[mask_idx].ref--;
3548 goto exit_ice_free_prof_mask;
3552 hw->blk[blk].masks.masks[mask_idx].in_use = false;
3553 hw->blk[blk].masks.masks[mask_idx].mask = 0;
3554 hw->blk[blk].masks.masks[mask_idx].idx = 0;
3556 /* update mask as unused entry */
3557 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
3559 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
3561 exit_ice_free_prof_mask:
3562 ice_release_lock(&hw->blk[blk].masks.lock);
3568 * ice_free_prof_masks - free all profile masks for a profile
3569 * @hw: pointer to the HW struct
3570 * @blk: hardware block
3571 * @prof_id: profile ID
3573 static enum ice_status
3574 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
3579 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3580 return ICE_ERR_PARAM;
3582 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
3583 for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
3584 if (mask_bm & BIT(i))
3585 ice_free_prof_mask(hw, blk, i);
3591 * ice_shutdown_prof_masks - releases lock for masking
3592 * @hw: pointer to the HW struct
3593 * @blk: hardware block
3595 * This should be called before unloading the driver
3597 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
3601 ice_acquire_lock(&hw->blk[blk].masks.lock);
3603 for (i = hw->blk[blk].masks.first;
3604 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
3605 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3607 hw->blk[blk].masks.masks[i].in_use = false;
3608 hw->blk[blk].masks.masks[i].idx = 0;
3609 hw->blk[blk].masks.masks[i].mask = 0;
3612 ice_release_lock(&hw->blk[blk].masks.lock);
3613 ice_destroy_lock(&hw->blk[blk].masks.lock);
3617 * ice_shutdown_all_prof_masks - releases all locks for masking
3618 * @hw: pointer to the HW struct
3620 * This should be called before unloading the driver
3622 void ice_shutdown_all_prof_masks(struct ice_hw *hw)
3624 ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
3625 ice_shutdown_prof_masks(hw, ICE_BLK_FD);
3629 * ice_update_prof_masking - set registers according to masking
3630 * @hw: pointer to the HW struct
3631 * @blk: hardware block
3632 * @prof_id: profile ID
3635 static enum ice_status
3636 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
3644 /* Only support FD and RSS masking, otherwise nothing to be done */
3645 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3648 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3649 if (masks[i] && masks[i] != 0xFFFF) {
3650 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
3651 ena_mask |= BIT(idx);
3653 /* not enough bitmaps */
3660 /* free any bitmaps we have allocated */
3661 for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
3662 if (ena_mask & BIT(i))
3663 ice_free_prof_mask(hw, blk, i);
3665 return ICE_ERR_OUT_OF_RANGE;
3668 /* enable the masks for this profile */
3669 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
3671 /* store enabled masks with profile so that they can be freed later */
3672 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
3678 * ice_write_es - write an extraction sequence to hardware
3679 * @hw: pointer to the HW struct
3680 * @blk: the block in which to write the extraction sequence
3681 * @prof_id: the profile ID to write
3682 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
3685 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3686 struct ice_fv_word *fv)
3690 off = prof_id * hw->blk[blk].es.fvw;
3692 ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
3693 sizeof(*fv), ICE_NONDMA_MEM);
3694 hw->blk[blk].es.written[prof_id] = false;
3696 ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
3697 sizeof(*fv), ICE_NONDMA_TO_NONDMA);
3702 * ice_prof_dec_ref - decrement reference count for profile
3703 * @hw: pointer to the HW struct
3704 * @blk: the block from which to free the profile ID
3705 * @prof_id: the profile ID for which to decrement the reference count
3707 static enum ice_status
3708 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3710 if (prof_id > hw->blk[blk].es.count)
3711 return ICE_ERR_PARAM;
3713 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3714 if (!--hw->blk[blk].es.ref_count[prof_id]) {
3715 ice_write_es(hw, blk, prof_id, NULL);
3716 ice_free_prof_masks(hw, blk, prof_id);
3717 return ice_free_prof_id(hw, blk, prof_id);
3724 /* Block / table section IDs */
3725 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3729 ICE_SID_PROFID_TCAM_SW,
3730 ICE_SID_PROFID_REDIR_SW,
3737 ICE_SID_PROFID_TCAM_ACL,
3738 ICE_SID_PROFID_REDIR_ACL,
3745 ICE_SID_PROFID_TCAM_FD,
3746 ICE_SID_PROFID_REDIR_FD,
3753 ICE_SID_PROFID_TCAM_RSS,
3754 ICE_SID_PROFID_REDIR_RSS,
3761 ICE_SID_PROFID_TCAM_PE,
3762 ICE_SID_PROFID_REDIR_PE,
3768 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3769 * @hw: pointer to the hardware structure
3770 * @blk: the HW block to initialize
3772 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3776 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3779 ptg = hw->blk[blk].xlt1.t[pt];
3780 if (ptg != ICE_DEFAULT_PTG) {
3781 ice_ptg_alloc_val(hw, blk, ptg);
3782 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3788 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3789 * @hw: pointer to the hardware structure
3790 * @blk: the HW block to initialize
3792 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3796 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3799 vsig = hw->blk[blk].xlt2.t[vsi];
3801 ice_vsig_alloc_val(hw, blk, vsig);
3802 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3803 /* no changes at this time, since this has been
3804 * initialized from the original package
3806 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3812 * ice_init_sw_db - init software database from HW tables
3813 * @hw: pointer to the hardware structure
3815 static void ice_init_sw_db(struct ice_hw *hw)
3819 for (i = 0; i < ICE_BLK_COUNT; i++) {
3820 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3821 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3826 * ice_fill_tbl - Reads content of a single table type into database
3827 * @hw: pointer to the hardware structure
3828 * @block_id: Block ID of the table to copy
3829 * @sid: Section ID of the table to copy
3831 * Will attempt to read the entire content of a given table of a single block
3832 * into the driver database. We assume that the buffer will always
3833 * be as large or larger than the data contained in the package. If
3834 * this condition is not met, there is most likely an error in the package
3837 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3839 u32 dst_len, sect_len, offset = 0;
3840 struct ice_prof_redir_section *pr;
3841 struct ice_prof_id_section *pid;
3842 struct ice_xlt1_section *xlt1;
3843 struct ice_xlt2_section *xlt2;
3844 struct ice_sw_fv_section *es;
3845 struct ice_pkg_enum state;
3849 /* if the HW segment pointer is null then the first iteration of
3850 * ice_pkg_enum_section() will fail. In this case the HW tables will
3851 * not be filled and return success.
3854 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3858 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
3860 sect = ice_pkg_enum_section(hw->seg, &state, sid);
3864 case ICE_SID_XLT1_SW:
3865 case ICE_SID_XLT1_FD:
3866 case ICE_SID_XLT1_RSS:
3867 case ICE_SID_XLT1_ACL:
3868 case ICE_SID_XLT1_PE:
3869 xlt1 = (struct ice_xlt1_section *)sect;
3871 sect_len = LE16_TO_CPU(xlt1->count) *
3872 sizeof(*hw->blk[block_id].xlt1.t);
3873 dst = hw->blk[block_id].xlt1.t;
3874 dst_len = hw->blk[block_id].xlt1.count *
3875 sizeof(*hw->blk[block_id].xlt1.t);
3877 case ICE_SID_XLT2_SW:
3878 case ICE_SID_XLT2_FD:
3879 case ICE_SID_XLT2_RSS:
3880 case ICE_SID_XLT2_ACL:
3881 case ICE_SID_XLT2_PE:
3882 xlt2 = (struct ice_xlt2_section *)sect;
3883 src = (_FORCE_ u8 *)xlt2->value;
3884 sect_len = LE16_TO_CPU(xlt2->count) *
3885 sizeof(*hw->blk[block_id].xlt2.t);
3886 dst = (u8 *)hw->blk[block_id].xlt2.t;
3887 dst_len = hw->blk[block_id].xlt2.count *
3888 sizeof(*hw->blk[block_id].xlt2.t);
3890 case ICE_SID_PROFID_TCAM_SW:
3891 case ICE_SID_PROFID_TCAM_FD:
3892 case ICE_SID_PROFID_TCAM_RSS:
3893 case ICE_SID_PROFID_TCAM_ACL:
3894 case ICE_SID_PROFID_TCAM_PE:
3895 pid = (struct ice_prof_id_section *)sect;
3896 src = (u8 *)pid->entry;
3897 sect_len = LE16_TO_CPU(pid->count) *
3898 sizeof(*hw->blk[block_id].prof.t);
3899 dst = (u8 *)hw->blk[block_id].prof.t;
3900 dst_len = hw->blk[block_id].prof.count *
3901 sizeof(*hw->blk[block_id].prof.t);
3903 case ICE_SID_PROFID_REDIR_SW:
3904 case ICE_SID_PROFID_REDIR_FD:
3905 case ICE_SID_PROFID_REDIR_RSS:
3906 case ICE_SID_PROFID_REDIR_ACL:
3907 case ICE_SID_PROFID_REDIR_PE:
3908 pr = (struct ice_prof_redir_section *)sect;
3909 src = pr->redir_value;
3910 sect_len = LE16_TO_CPU(pr->count) *
3911 sizeof(*hw->blk[block_id].prof_redir.t);
3912 dst = hw->blk[block_id].prof_redir.t;
3913 dst_len = hw->blk[block_id].prof_redir.count *
3914 sizeof(*hw->blk[block_id].prof_redir.t);
3916 case ICE_SID_FLD_VEC_SW:
3917 case ICE_SID_FLD_VEC_FD:
3918 case ICE_SID_FLD_VEC_RSS:
3919 case ICE_SID_FLD_VEC_ACL:
3920 case ICE_SID_FLD_VEC_PE:
3921 es = (struct ice_sw_fv_section *)sect;
3923 sect_len = (u32)(LE16_TO_CPU(es->count) *
3924 hw->blk[block_id].es.fvw) *
3925 sizeof(*hw->blk[block_id].es.t);
3926 dst = (u8 *)hw->blk[block_id].es.t;
3927 dst_len = (u32)(hw->blk[block_id].es.count *
3928 hw->blk[block_id].es.fvw) *
3929 sizeof(*hw->blk[block_id].es.t);
3935 /* if the section offset exceeds destination length, terminate
3938 if (offset > dst_len)
3941 /* if the sum of section size and offset exceed destination size
3942 * then we are out of bounds of the HW table size for that PF.
3943 * Changing section length to fill the remaining table space
3946 if ((offset + sect_len) > dst_len)
3947 sect_len = dst_len - offset;
3949 ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
3951 sect = ice_pkg_enum_section(NULL, &state, sid);
3956 * ice_fill_blk_tbls - Read package context for tables
3957 * @hw: pointer to the hardware structure
3959 * Reads the current package contents and populates the driver
3960 * database with the data iteratively for all advanced feature
3961 * blocks. Assume that the HW tables have been allocated.
3963 void ice_fill_blk_tbls(struct ice_hw *hw)
3967 for (i = 0; i < ICE_BLK_COUNT; i++) {
3968 enum ice_block blk_id = (enum ice_block)i;
3970 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3971 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3972 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3973 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3974 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3981 * ice_free_prof_map - free profile map
3982 * @hw: pointer to the hardware structure
3983 * @blk_idx: HW block index
3985 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3987 struct ice_es *es = &hw->blk[blk_idx].es;
3988 struct ice_prof_map *del, *tmp;
3990 ice_acquire_lock(&es->prof_map_lock);
3991 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
3992 ice_prof_map, list) {
3993 LIST_DEL(&del->list);
3996 INIT_LIST_HEAD(&es->prof_map);
3997 ice_release_lock(&es->prof_map_lock);
4001 * ice_free_flow_profs - free flow profile entries
4002 * @hw: pointer to the hardware structure
4003 * @blk_idx: HW block index
4005 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
4007 struct ice_flow_prof *p, *tmp;
4009 ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
4010 LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
4011 ice_flow_prof, l_entry) {
4012 struct ice_flow_entry *e, *t;
4014 LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
4015 ice_flow_entry, l_entry)
4016 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
4017 ICE_FLOW_ENTRY_HNDL(e));
4019 LIST_DEL(&p->l_entry);
4021 ice_free(hw, p->acts);
4023 ice_destroy_lock(&p->entries_lock);
4026 ice_release_lock(&hw->fl_profs_locks[blk_idx]);
4028 /* if driver is in reset and tables are being cleared
4029 * re-initialize the flow profile list heads
4031 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
4035 * ice_free_vsig_tbl - free complete VSIG table entries
4036 * @hw: pointer to the hardware structure
4037 * @blk: the HW block on which to free the VSIG table entries
4039 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
4043 if (!hw->blk[blk].xlt2.vsig_tbl)
4046 for (i = 1; i < ICE_MAX_VSIGS; i++)
4047 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
4048 ice_vsig_free(hw, blk, i);
4052 * ice_free_hw_tbls - free hardware table memory
4053 * @hw: pointer to the hardware structure
4055 void ice_free_hw_tbls(struct ice_hw *hw)
4057 struct ice_rss_cfg *r, *rt;
4060 for (i = 0; i < ICE_BLK_COUNT; i++) {
4061 if (hw->blk[i].is_list_init) {
4062 struct ice_es *es = &hw->blk[i].es;
4064 ice_free_prof_map(hw, i);
4065 ice_destroy_lock(&es->prof_map_lock);
4066 ice_free_flow_profs(hw, i);
4067 ice_destroy_lock(&hw->fl_profs_locks[i]);
4069 hw->blk[i].is_list_init = false;
4071 ice_free_vsig_tbl(hw, (enum ice_block)i);
4072 ice_free(hw, hw->blk[i].xlt1.ptypes);
4073 ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
4074 ice_free(hw, hw->blk[i].xlt1.t);
4075 ice_free(hw, hw->blk[i].xlt2.t);
4076 ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
4077 ice_free(hw, hw->blk[i].xlt2.vsis);
4078 ice_free(hw, hw->blk[i].prof.t);
4079 ice_free(hw, hw->blk[i].prof_redir.t);
4080 ice_free(hw, hw->blk[i].es.t);
4081 ice_free(hw, hw->blk[i].es.ref_count);
4082 ice_free(hw, hw->blk[i].es.written);
4083 ice_free(hw, hw->blk[i].es.mask_ena);
4086 LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
4087 ice_rss_cfg, l_entry) {
4088 LIST_DEL(&r->l_entry);
4091 ice_destroy_lock(&hw->rss_locks);
4092 if (!hw->dcf_enabled)
4093 ice_shutdown_all_prof_masks(hw);
4094 ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
4098 * ice_init_flow_profs - init flow profile locks and list heads
4099 * @hw: pointer to the hardware structure
4100 * @blk_idx: HW block index
4102 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
4104 ice_init_lock(&hw->fl_profs_locks[blk_idx]);
4105 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
4109 * ice_clear_hw_tbls - clear HW tables and flow profiles
4110 * @hw: pointer to the hardware structure
4112 void ice_clear_hw_tbls(struct ice_hw *hw)
4116 for (i = 0; i < ICE_BLK_COUNT; i++) {
4117 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
4118 struct ice_prof_tcam *prof = &hw->blk[i].prof;
4119 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
4120 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
4121 struct ice_es *es = &hw->blk[i].es;
4123 if (hw->blk[i].is_list_init) {
4124 ice_free_prof_map(hw, i);
4125 ice_free_flow_profs(hw, i);
4128 ice_free_vsig_tbl(hw, (enum ice_block)i);
4130 ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
4132 ice_memset(xlt1->ptg_tbl, 0,
4133 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
4135 ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
4138 ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
4140 ice_memset(xlt2->vsig_tbl, 0,
4141 xlt2->count * sizeof(*xlt2->vsig_tbl),
4143 ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
4146 ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
4148 ice_memset(prof_redir->t, 0,
4149 prof_redir->count * sizeof(*prof_redir->t),
4152 ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
4154 ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
4156 ice_memset(es->written, 0, es->count * sizeof(*es->written),
4158 ice_memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena),
4164 * ice_init_hw_tbls - init hardware table memory
4165 * @hw: pointer to the hardware structure
4167 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
4171 ice_init_lock(&hw->rss_locks);
4172 INIT_LIST_HEAD(&hw->rss_list_head);
4173 if (!hw->dcf_enabled)
4174 ice_init_all_prof_masks(hw);
4175 for (i = 0; i < ICE_BLK_COUNT; i++) {
4176 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
4177 struct ice_prof_tcam *prof = &hw->blk[i].prof;
4178 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
4179 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
4180 struct ice_es *es = &hw->blk[i].es;
4183 if (hw->blk[i].is_list_init)
4186 ice_init_flow_profs(hw, i);
4187 ice_init_lock(&es->prof_map_lock);
4188 INIT_LIST_HEAD(&es->prof_map);
4189 hw->blk[i].is_list_init = true;
4191 hw->blk[i].overwrite = blk_sizes[i].overwrite;
4192 es->reverse = blk_sizes[i].reverse;
4194 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
4195 xlt1->count = blk_sizes[i].xlt1;
4197 xlt1->ptypes = (struct ice_ptg_ptype *)
4198 ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
4203 xlt1->ptg_tbl = (struct ice_ptg_entry *)
4204 ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
4209 xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
4213 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
4214 xlt2->count = blk_sizes[i].xlt2;
4216 xlt2->vsis = (struct ice_vsig_vsi *)
4217 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
4222 xlt2->vsig_tbl = (struct ice_vsig_entry *)
4223 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
4224 if (!xlt2->vsig_tbl)
4227 for (j = 0; j < xlt2->count; j++)
4228 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
4230 xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
4234 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
4235 prof->count = blk_sizes[i].prof_tcam;
4236 prof->max_prof_id = blk_sizes[i].prof_id;
4237 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
4238 prof->t = (struct ice_prof_tcam_entry *)
4239 ice_calloc(hw, prof->count, sizeof(*prof->t));
4244 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
4245 prof_redir->count = blk_sizes[i].prof_redir;
4246 prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
4247 sizeof(*prof_redir->t));
4252 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
4253 es->count = blk_sizes[i].es;
4254 es->fvw = blk_sizes[i].fvw;
4255 es->t = (struct ice_fv_word *)
4256 ice_calloc(hw, (u32)(es->count * es->fvw),
4261 es->ref_count = (u16 *)
4262 ice_calloc(hw, es->count, sizeof(*es->ref_count));
4267 es->written = (u8 *)
4268 ice_calloc(hw, es->count, sizeof(*es->written));
4273 es->mask_ena = (u32 *)
4274 ice_calloc(hw, es->count, sizeof(*es->mask_ena));
4282 ice_free_hw_tbls(hw);
4283 return ICE_ERR_NO_MEMORY;
4287 * ice_prof_gen_key - generate profile ID key
4288 * @hw: pointer to the HW struct
4289 * @blk: the block in which to write profile ID to
4290 * @ptg: packet type group (PTG) portion of key
4291 * @vsig: VSIG portion of key
4292 * @cdid: CDID portion of key
4293 * @flags: flag portion of key
4294 * @vl_msk: valid mask
4295 * @dc_msk: don't care mask
4296 * @nm_msk: never match mask
4297 * @key: output of profile ID key
4299 static enum ice_status
4300 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
4301 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4302 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
4303 u8 key[ICE_TCAM_KEY_SZ])
4305 struct ice_prof_id_key inkey;
4308 inkey.xlt2_cdid = CPU_TO_LE16(vsig);
4309 inkey.flags = CPU_TO_LE16(flags);
4311 switch (hw->blk[blk].prof.cdid_bits) {
4315 #define ICE_CD_2_M 0xC000U
4316 #define ICE_CD_2_S 14
4317 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
4318 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
4321 #define ICE_CD_4_M 0xF000U
4322 #define ICE_CD_4_S 12
4323 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
4324 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
4327 #define ICE_CD_8_M 0xFF00U
4328 #define ICE_CD_8_S 16
4329 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
4330 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
4333 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
4337 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
4338 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
4342 * ice_tcam_write_entry - write TCAM entry
4343 * @hw: pointer to the HW struct
4344 * @blk: the block in which to write profile ID to
4345 * @idx: the entry index to write to
4346 * @prof_id: profile ID
4347 * @ptg: packet type group (PTG) portion of key
4348 * @vsig: VSIG portion of key
4349 * @cdid: CDID portion of key
4350 * @flags: flag portion of key
4351 * @vl_msk: valid mask
4352 * @dc_msk: don't care mask
4353 * @nm_msk: never match mask
4355 static enum ice_status
4356 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
4357 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
4358 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4359 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
4360 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
4362 struct ice_prof_tcam_entry;
4363 enum ice_status status;
4365 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
4366 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
4368 hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
4369 hw->blk[blk].prof.t[idx].prof_id = prof_id;
4376 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
4377 * @hw: pointer to the hardware structure
4379 * @vsig: VSIG to query
4380 * @refs: pointer to variable to receive the reference count
4382 static enum ice_status
4383 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
4385 u16 idx = vsig & ICE_VSIG_IDX_M;
4386 struct ice_vsig_vsi *ptr;
4390 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
4391 return ICE_ERR_DOES_NOT_EXIST;
4393 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4396 ptr = ptr->next_vsi;
4403 * ice_has_prof_vsig - check to see if VSIG has a specific profile
4404 * @hw: pointer to the hardware structure
4406 * @vsig: VSIG to check against
4407 * @hdl: profile handle
4410 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
4412 u16 idx = vsig & ICE_VSIG_IDX_M;
4413 struct ice_vsig_prof *ent;
4415 LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4416 ice_vsig_prof, list)
4417 if (ent->profile_cookie == hdl)
4420 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
4426 * ice_prof_bld_es - build profile ID extraction sequence changes
4427 * @hw: pointer to the HW struct
4428 * @blk: hardware block
4429 * @bld: the update package buffer build to add to
4430 * @chgs: the list of changes to make in hardware
4432 static enum ice_status
4433 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
4434 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4436 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
4437 struct ice_chs_chg *tmp;
4439 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4440 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
4441 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
4442 struct ice_pkg_es *p;
4445 id = ice_sect_id(blk, ICE_VEC_TBL);
4446 p = (struct ice_pkg_es *)
4447 ice_pkg_buf_alloc_section(bld, id,
4448 ice_struct_size(p, es,
4454 return ICE_ERR_MAX_LIMIT;
4456 p->count = CPU_TO_LE16(1);
4457 p->offset = CPU_TO_LE16(tmp->prof_id);
4459 ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
4460 ICE_NONDMA_TO_NONDMA);
4467 * ice_prof_bld_tcam - build profile ID TCAM changes
4468 * @hw: pointer to the HW struct
4469 * @blk: hardware block
4470 * @bld: the update package buffer build to add to
4471 * @chgs: the list of changes to make in hardware
4473 static enum ice_status
4474 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
4475 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4477 struct ice_chs_chg *tmp;
4479 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4480 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
4481 struct ice_prof_id_section *p;
4484 id = ice_sect_id(blk, ICE_PROF_TCAM);
4485 p = (struct ice_prof_id_section *)
4486 ice_pkg_buf_alloc_section(bld, id,
4492 return ICE_ERR_MAX_LIMIT;
4494 p->count = CPU_TO_LE16(1);
4495 p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
4496 p->entry[0].prof_id = tmp->prof_id;
4498 ice_memcpy(p->entry[0].key,
4499 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
4500 sizeof(hw->blk[blk].prof.t->key),
4501 ICE_NONDMA_TO_NONDMA);
4508 * ice_prof_bld_xlt1 - build XLT1 changes
4509 * @blk: hardware block
4510 * @bld: the update package buffer build to add to
4511 * @chgs: the list of changes to make in hardware
4513 static enum ice_status
4514 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
4515 struct LIST_HEAD_TYPE *chgs)
4517 struct ice_chs_chg *tmp;
4519 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4520 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
4521 struct ice_xlt1_section *p;
4524 id = ice_sect_id(blk, ICE_XLT1);
4525 p = (struct ice_xlt1_section *)
4526 ice_pkg_buf_alloc_section(bld, id,
4532 return ICE_ERR_MAX_LIMIT;
4534 p->count = CPU_TO_LE16(1);
4535 p->offset = CPU_TO_LE16(tmp->ptype);
4536 p->value[0] = tmp->ptg;
4543 * ice_prof_bld_xlt2 - build XLT2 changes
4544 * @blk: hardware block
4545 * @bld: the update package buffer build to add to
4546 * @chgs: the list of changes to make in hardware
4548 static enum ice_status
4549 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4550 struct LIST_HEAD_TYPE *chgs)
4552 struct ice_chs_chg *tmp;
4554 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4555 struct ice_xlt2_section *p;
4558 switch (tmp->type) {
4562 id = ice_sect_id(blk, ICE_XLT2);
4563 p = (struct ice_xlt2_section *)
4564 ice_pkg_buf_alloc_section(bld, id,
4570 return ICE_ERR_MAX_LIMIT;
4572 p->count = CPU_TO_LE16(1);
4573 p->offset = CPU_TO_LE16(tmp->vsi);
4574 p->value[0] = CPU_TO_LE16(tmp->vsig);
4585 * ice_upd_prof_hw - update hardware using the change list
4586 * @hw: pointer to the HW struct
4587 * @blk: hardware block
4588 * @chgs: the list of changes to make in hardware
4590 static enum ice_status
4591 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4592 struct LIST_HEAD_TYPE *chgs)
4594 struct ice_buf_build *b;
4595 struct ice_chs_chg *tmp;
4596 enum ice_status status;
4604 /* count number of sections we need */
4605 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4606 switch (tmp->type) {
4607 case ICE_PTG_ES_ADD:
4625 sects = xlt1 + xlt2 + tcam + es;
4630 /* Build update package buffer */
4631 b = ice_pkg_buf_alloc(hw);
4633 return ICE_ERR_NO_MEMORY;
4635 status = ice_pkg_buf_reserve_section(b, sects);
4639 /* Preserve order of table update: ES, TCAM, PTG, VSIG */
4641 status = ice_prof_bld_es(hw, blk, b, chgs);
4647 status = ice_prof_bld_tcam(hw, blk, b, chgs);
4653 status = ice_prof_bld_xlt1(blk, b, chgs);
4659 status = ice_prof_bld_xlt2(blk, b, chgs);
4664 /* After package buffer build check if the section count in buffer is
4665 * non-zero and matches the number of sections detected for package
4668 pkg_sects = ice_pkg_buf_get_active_sections(b);
4669 if (!pkg_sects || pkg_sects != sects) {
4670 status = ICE_ERR_INVAL_SIZE;
4674 /* update package */
4675 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4676 if (status == ICE_ERR_AQ_ERROR)
4677 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
4680 ice_pkg_buf_free(hw, b);
4685 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
4686 * @hw: pointer to the HW struct
4687 * @prof_id: profile ID
4688 * @mask_sel: mask select
4690 * This function enable any of the masks selected by the mask select parameter
4691 * for the profile specified.
4693 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
4695 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
4697 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
4698 GLQF_FDMASK_SEL(prof_id), mask_sel);
4701 struct ice_fd_src_dst_pair {
4707 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
4708 /* These are defined in pairs */
4709 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
4710 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
4712 { ICE_PROT_IPV4_IL, 2, 12 },
4713 { ICE_PROT_IPV4_IL, 2, 16 },
4715 { ICE_PROT_IPV4_IL_IL, 2, 12 },
4716 { ICE_PROT_IPV4_IL_IL, 2, 16 },
4718 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
4719 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
4721 { ICE_PROT_IPV6_IL, 8, 8 },
4722 { ICE_PROT_IPV6_IL, 8, 24 },
4724 { ICE_PROT_IPV6_IL_IL, 8, 8 },
4725 { ICE_PROT_IPV6_IL_IL, 8, 24 },
4727 { ICE_PROT_TCP_IL, 1, 0 },
4728 { ICE_PROT_TCP_IL, 1, 2 },
4730 { ICE_PROT_UDP_OF, 1, 0 },
4731 { ICE_PROT_UDP_OF, 1, 2 },
4733 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
4734 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
4736 { ICE_PROT_SCTP_IL, 1, 0 },
4737 { ICE_PROT_SCTP_IL, 1, 2 }
4740 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
4743 * ice_update_fd_swap - set register appropriately for a FD FV extraction
4744 * @hw: pointer to the HW struct
4745 * @prof_id: profile ID
4746 * @es: extraction sequence (length of array is determined by the block)
4748 static enum ice_status
4749 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
4751 ice_declare_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4752 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
4753 #define ICE_FD_FV_NOT_FOUND (-2)
4754 s8 first_free = ICE_FD_FV_NOT_FOUND;
4755 u8 used[ICE_MAX_FV_WORDS] = { 0 };
4760 ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4762 /* This code assumes that the Flow Director field vectors are assigned
4763 * from the end of the FV indexes working towards the zero index, that
4764 * only complete fields will be included and will be consecutive, and
4765 * that there are no gaps between valid indexes.
4768 /* Determine swap fields present */
4769 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
4770 /* Find the first free entry, assuming right to left population.
4771 * This is where we can start adding additional pairs if needed.
4773 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4777 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4778 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4779 es[i].off == ice_fd_pairs[j].off) {
4780 ice_set_bit(j, pair_list);
4785 orig_free = first_free;
4787 /* determine missing swap fields that need to be added */
4788 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4789 u8 bit1 = ice_is_bit_set(pair_list, i + 1);
4790 u8 bit0 = ice_is_bit_set(pair_list, i);
4795 /* add the appropriate 'paired' entry */
4801 /* check for room */
4802 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4803 return ICE_ERR_MAX_LIMIT;
4805 /* place in extraction sequence */
4806 for (k = 0; k < ice_fd_pairs[index].count; k++) {
4807 es[first_free - k].prot_id =
4808 ice_fd_pairs[index].prot_id;
4809 es[first_free - k].off =
4810 ice_fd_pairs[index].off + (k * 2);
4813 return ICE_ERR_OUT_OF_RANGE;
4815 /* keep track of non-relevant fields */
4816 mask_sel |= BIT(first_free - k);
4819 pair_start[index] = first_free;
4820 first_free -= ice_fd_pairs[index].count;
4824 /* fill in the swap array */
4825 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4827 u8 indexes_used = 1;
4829 /* assume flat at this index */
4830 #define ICE_SWAP_VALID 0x80
4831 used[si] = si | ICE_SWAP_VALID;
4833 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4838 /* check for a swap location */
4839 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4840 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4841 es[si].off == ice_fd_pairs[j].off) {
4844 /* determine the appropriate matching field */
4845 idx = j + ((j % 2) ? -1 : 1);
4847 indexes_used = ice_fd_pairs[idx].count;
4848 for (k = 0; k < indexes_used; k++) {
4849 used[si - k] = (pair_start[idx] - k) |
4859 /* for each set of 4 swap and 4 inset indexes, write the appropriate
4862 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4866 for (k = 0; k < 4; k++) {
4870 if (used[idx] && !(mask_sel & BIT(idx))) {
4871 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4872 #define ICE_INSET_DFLT 0x9f
4873 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4877 /* write the appropriate swap register set */
4878 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4880 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4881 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4883 /* write the appropriate inset register set */
4884 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4886 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4887 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4890 /* initially clear the mask select for this profile */
4891 ice_update_fd_mask(hw, prof_id, 0);
4896 /* The entries here needs to match the order of enum ice_ptype_attrib */
4897 static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4898 { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
4899 { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
4900 { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
4901 { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
4905 * ice_get_ptype_attrib_info - get ptype attribute information
4906 * @type: attribute type
4907 * @info: pointer to variable to the attribute information
4910 ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4911 struct ice_ptype_attrib_info *info)
4913 *info = ice_ptype_attributes[type];
4917 * ice_add_prof_attrib - add any PTG with attributes to profile
4918 * @prof: pointer to the profile to which PTG entries will be added
4919 * @ptg: PTG to be added
4920 * @ptype: PTYPE that needs to be looked up
4921 * @attr: array of attributes that will be considered
4922 * @attr_cnt: number of elements in the attribute array
4924 static enum ice_status
4925 ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4926 const struct ice_ptype_attributes *attr, u16 attr_cnt)
4931 for (i = 0; i < attr_cnt; i++) {
4932 if (attr[i].ptype == ptype) {
4935 prof->ptg[prof->ptg_cnt] = ptg;
4936 ice_get_ptype_attrib_info(attr[i].attrib,
4937 &prof->attr[prof->ptg_cnt]);
4939 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4940 return ICE_ERR_MAX_LIMIT;
4945 return ICE_ERR_DOES_NOT_EXIST;
4951 * ice_add_prof - add profile
4952 * @hw: pointer to the HW struct
4953 * @blk: hardware block
4954 * @id: profile tracking ID
4955 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4956 * @attr: array of attributes
4957 * @attr_cnt: number of elements in attrib array
4958 * @es: extraction sequence (length of array is determined by the block)
4959 * @masks: mask for extraction sequence
4961 * This function registers a profile, which matches a set of PTYPES with a
4962 * particular extraction sequence. While the hardware profile is allocated
4963 * it will not be written until the first call to ice_add_flow that specifies
4964 * the ID value used here.
4967 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4968 const struct ice_ptype_attributes *attr, u16 attr_cnt,
4969 struct ice_fv_word *es, u16 *masks)
4971 u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4972 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
4973 struct ice_prof_map *prof;
4974 enum ice_status status;
4978 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
4980 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4982 /* search for existing profile */
4983 status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4985 /* allocate profile ID */
4986 status = ice_alloc_prof_id(hw, blk, &prof_id);
4988 goto err_ice_add_prof;
4989 if (blk == ICE_BLK_FD) {
4990 /* For Flow Director block, the extraction sequence may
4991 * need to be altered in the case where there are paired
4992 * fields that have no match. This is necessary because
4993 * for Flow Director, src and dest fields need to paired
4994 * for filter programming and these values are swapped
4997 status = ice_update_fd_swap(hw, prof_id, es);
4999 goto err_ice_add_prof;
5001 status = ice_update_prof_masking(hw, blk, prof_id, masks);
5003 goto err_ice_add_prof;
5005 /* and write new es */
5006 ice_write_es(hw, blk, prof_id, es);
5009 ice_prof_inc_ref(hw, blk, prof_id);
5011 /* add profile info */
5013 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
5015 goto err_ice_add_prof;
5017 prof->profile_cookie = id;
5018 prof->prof_id = prof_id;
5022 /* build list of ptgs */
5023 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
5026 if (!ptypes[byte]) {
5032 /* Examine 8 bits per byte */
5033 ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
5038 ptype = byte * BITS_PER_BYTE + bit;
5040 /* The package should place all ptypes in a non-zero
5041 * PTG, so the following call should never fail.
5043 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
5046 /* If PTG is already added, skip and continue */
5047 if (ice_is_bit_set(ptgs_used, ptg))
5050 ice_set_bit(ptg, ptgs_used);
5051 /* Check to see there are any attributes for this
5052 * ptype, and add them if found.
5054 status = ice_add_prof_attrib(prof, ptg, ptype, attr,
5056 if (status == ICE_ERR_MAX_LIMIT)
5059 /* This is simple a ptype/PTG with no
5062 prof->ptg[prof->ptg_cnt] = ptg;
5063 prof->attr[prof->ptg_cnt].flags = 0;
5064 prof->attr[prof->ptg_cnt].mask = 0;
5066 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
5075 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
5076 status = ICE_SUCCESS;
5079 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5084 * ice_search_prof_id - Search for a profile tracking ID
5085 * @hw: pointer to the HW struct
5086 * @blk: hardware block
5087 * @id: profile tracking ID
5089 * This will search for a profile tracking ID which was previously added.
5090 * The profile map lock should be held before calling this function.
5092 struct ice_prof_map *
5093 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
5095 struct ice_prof_map *entry = NULL;
5096 struct ice_prof_map *map;
5098 LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
5099 if (map->profile_cookie == id) {
5108 * ice_vsig_prof_id_count - count profiles in a VSIG
5109 * @hw: pointer to the HW struct
5110 * @blk: hardware block
5111 * @vsig: VSIG to remove the profile from
5114 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
5116 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
5117 struct ice_vsig_prof *p;
5119 LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5120 ice_vsig_prof, list)
5127 * ice_rel_tcam_idx - release a TCAM index
5128 * @hw: pointer to the HW struct
5129 * @blk: hardware block
5130 * @idx: the index to release
5132 static enum ice_status
5133 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
5135 /* Masks to invoke a never match entry */
5136 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5137 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
5138 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
5139 enum ice_status status;
5141 /* write the TCAM entry */
5142 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
5147 /* release the TCAM entry */
5148 status = ice_free_tcam_ent(hw, blk, idx);
5154 * ice_rem_prof_id - remove one profile from a VSIG
5155 * @hw: pointer to the HW struct
5156 * @blk: hardware block
5157 * @prof: pointer to profile structure to remove
5159 static enum ice_status
5160 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
5161 struct ice_vsig_prof *prof)
5163 enum ice_status status;
5166 for (i = 0; i < prof->tcam_count; i++)
5167 if (prof->tcam[i].in_use) {
5168 prof->tcam[i].in_use = false;
5169 status = ice_rel_tcam_idx(hw, blk,
5170 prof->tcam[i].tcam_idx);
5172 return ICE_ERR_HW_TABLE;
5179 * ice_rem_vsig - remove VSIG
5180 * @hw: pointer to the HW struct
5181 * @blk: hardware block
5182 * @vsig: the VSIG to remove
5183 * @chg: the change list
5185 static enum ice_status
5186 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5187 struct LIST_HEAD_TYPE *chg)
5189 u16 idx = vsig & ICE_VSIG_IDX_M;
5190 struct ice_vsig_vsi *vsi_cur;
5191 struct ice_vsig_prof *d, *t;
5192 enum ice_status status;
5194 /* remove TCAM entries */
5195 LIST_FOR_EACH_ENTRY_SAFE(d, t,
5196 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5197 ice_vsig_prof, list) {
5198 status = ice_rem_prof_id(hw, blk, d);
5206 /* Move all VSIS associated with this VSIG to the default VSIG */
5207 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
5208 /* If the VSIG has at least 1 VSI then iterate through the list
5209 * and remove the VSIs before deleting the group.
5213 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
5214 struct ice_chs_chg *p;
5216 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5218 return ICE_ERR_NO_MEMORY;
5220 p->type = ICE_VSIG_REM;
5221 p->orig_vsig = vsig;
5222 p->vsig = ICE_DEFAULT_VSIG;
5223 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
5225 LIST_ADD(&p->list_entry, chg);
5230 return ice_vsig_free(hw, blk, vsig);
5234 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
5235 * @hw: pointer to the HW struct
5236 * @blk: hardware block
5237 * @vsig: VSIG to remove the profile from
5238 * @hdl: profile handle indicating which profile to remove
5239 * @chg: list to receive a record of changes
5241 static enum ice_status
5242 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5243 struct LIST_HEAD_TYPE *chg)
5245 u16 idx = vsig & ICE_VSIG_IDX_M;
5246 struct ice_vsig_prof *p, *t;
5247 enum ice_status status;
5249 LIST_FOR_EACH_ENTRY_SAFE(p, t,
5250 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5251 ice_vsig_prof, list)
5252 if (p->profile_cookie == hdl) {
5253 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
5254 /* this is the last profile, remove the VSIG */
5255 return ice_rem_vsig(hw, blk, vsig, chg);
5257 status = ice_rem_prof_id(hw, blk, p);
5265 return ICE_ERR_DOES_NOT_EXIST;
5269 * ice_rem_flow_all - remove all flows with a particular profile
5270 * @hw: pointer to the HW struct
5271 * @blk: hardware block
5272 * @id: profile tracking ID
5274 static enum ice_status
5275 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
5277 struct ice_chs_chg *del, *tmp;
5278 struct LIST_HEAD_TYPE chg;
5279 enum ice_status status;
5282 INIT_LIST_HEAD(&chg);
5284 for (i = 1; i < ICE_MAX_VSIGS; i++)
5285 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
5286 if (ice_has_prof_vsig(hw, blk, i, id)) {
5287 status = ice_rem_prof_id_vsig(hw, blk, i, id,
5290 goto err_ice_rem_flow_all;
5294 status = ice_upd_prof_hw(hw, blk, &chg);
5296 err_ice_rem_flow_all:
5297 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5298 LIST_DEL(&del->list_entry);
5306 * ice_rem_prof - remove profile
5307 * @hw: pointer to the HW struct
5308 * @blk: hardware block
5309 * @id: profile tracking ID
5311 * This will remove the profile specified by the ID parameter, which was
5312 * previously created through ice_add_prof. If any existing entries
5313 * are associated with this profile, they will be removed as well.
5315 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
5317 struct ice_prof_map *pmap;
5318 enum ice_status status;
5320 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5322 pmap = ice_search_prof_id(hw, blk, id);
5324 status = ICE_ERR_DOES_NOT_EXIST;
5325 goto err_ice_rem_prof;
5328 /* remove all flows with this profile */
5329 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
5331 goto err_ice_rem_prof;
5333 /* dereference profile, and possibly remove */
5334 ice_prof_dec_ref(hw, blk, pmap->prof_id);
5336 LIST_DEL(&pmap->list);
5340 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5345 * ice_get_prof - get profile
5346 * @hw: pointer to the HW struct
5347 * @blk: hardware block
5348 * @hdl: profile handle
5351 static enum ice_status
5352 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
5353 struct LIST_HEAD_TYPE *chg)
5355 enum ice_status status = ICE_SUCCESS;
5356 struct ice_prof_map *map;
5357 struct ice_chs_chg *p;
5360 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5361 /* Get the details on the profile specified by the handle ID */
5362 map = ice_search_prof_id(hw, blk, hdl);
5364 status = ICE_ERR_DOES_NOT_EXIST;
5365 goto err_ice_get_prof;
5368 for (i = 0; i < map->ptg_cnt; i++)
5369 if (!hw->blk[blk].es.written[map->prof_id]) {
5370 /* add ES to change list */
5371 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5373 status = ICE_ERR_NO_MEMORY;
5374 goto err_ice_get_prof;
5377 p->type = ICE_PTG_ES_ADD;
5379 p->ptg = map->ptg[i];
5380 p->attr = map->attr[i];
5384 p->prof_id = map->prof_id;
5386 hw->blk[blk].es.written[map->prof_id] = true;
5388 LIST_ADD(&p->list_entry, chg);
5392 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5393 /* let caller clean up the change list */
5398 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
5399 * @hw: pointer to the HW struct
5400 * @blk: hardware block
5401 * @vsig: VSIG from which to copy the list
5404 * This routine makes a copy of the list of profiles in the specified VSIG.
5406 static enum ice_status
5407 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5408 struct LIST_HEAD_TYPE *lst)
5410 struct ice_vsig_prof *ent1, *ent2;
5411 u16 idx = vsig & ICE_VSIG_IDX_M;
5413 LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5414 ice_vsig_prof, list) {
5415 struct ice_vsig_prof *p;
5417 /* copy to the input list */
5418 p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
5419 ICE_NONDMA_TO_NONDMA);
5421 goto err_ice_get_profs_vsig;
5423 LIST_ADD_TAIL(&p->list, lst);
5428 err_ice_get_profs_vsig:
5429 LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
5430 LIST_DEL(&ent1->list);
5434 return ICE_ERR_NO_MEMORY;
5438 * ice_add_prof_to_lst - add profile entry to a list
5439 * @hw: pointer to the HW struct
5440 * @blk: hardware block
5441 * @lst: the list to be added to
5442 * @hdl: profile handle of entry to add
5444 static enum ice_status
5445 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
5446 struct LIST_HEAD_TYPE *lst, u64 hdl)
5448 enum ice_status status = ICE_SUCCESS;
5449 struct ice_prof_map *map;
5450 struct ice_vsig_prof *p;
5453 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5454 map = ice_search_prof_id(hw, blk, hdl);
5456 status = ICE_ERR_DOES_NOT_EXIST;
5457 goto err_ice_add_prof_to_lst;
5460 p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
5462 status = ICE_ERR_NO_MEMORY;
5463 goto err_ice_add_prof_to_lst;
5466 p->profile_cookie = map->profile_cookie;
5467 p->prof_id = map->prof_id;
5468 p->tcam_count = map->ptg_cnt;
5470 for (i = 0; i < map->ptg_cnt; i++) {
5471 p->tcam[i].prof_id = map->prof_id;
5472 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
5473 p->tcam[i].ptg = map->ptg[i];
5474 p->tcam[i].attr = map->attr[i];
5477 LIST_ADD(&p->list, lst);
5479 err_ice_add_prof_to_lst:
5480 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5485 * ice_move_vsi - move VSI to another VSIG
5486 * @hw: pointer to the HW struct
5487 * @blk: hardware block
5488 * @vsi: the VSI to move
5489 * @vsig: the VSIG to move the VSI to
5490 * @chg: the change list
5492 static enum ice_status
5493 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
5494 struct LIST_HEAD_TYPE *chg)
5496 enum ice_status status;
5497 struct ice_chs_chg *p;
5500 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5502 return ICE_ERR_NO_MEMORY;
5504 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
5506 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
5513 p->type = ICE_VSI_MOVE;
5515 p->orig_vsig = orig_vsig;
5518 LIST_ADD(&p->list_entry, chg);
5524 * ice_set_tcam_flags - set TCAM flag don't care mask
5525 * @mask: mask for flags
5526 * @dc_mask: pointer to the don't care mask
5528 static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
5532 /* flags are lowest u16 */
5533 flag_word = (u16 *)dc_mask;
5538 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
5539 * @hw: pointer to the HW struct
5540 * @idx: the index of the TCAM entry to remove
5541 * @chg: the list of change structures to search
5544 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
5546 struct ice_chs_chg *pos, *tmp;
5548 LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry)
5549 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
5550 LIST_DEL(&tmp->list_entry);
5556 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
5557 * @hw: pointer to the HW struct
5558 * @blk: hardware block
5559 * @enable: true to enable, false to disable
5560 * @vsig: the VSIG of the TCAM entry
5561 * @tcam: pointer the TCAM info structure of the TCAM to disable
5562 * @chg: the change list
5564 * This function appends an enable or disable TCAM entry in the change log
5566 static enum ice_status
5567 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
5568 u16 vsig, struct ice_tcam_inf *tcam,
5569 struct LIST_HEAD_TYPE *chg)
5571 enum ice_status status;
5572 struct ice_chs_chg *p;
5574 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5575 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5576 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5578 /* if disabling, free the TCAM */
5580 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
5582 /* if we have already created a change for this TCAM entry, then
5583 * we need to remove that entry, in order to prevent writing to
5584 * a TCAM entry we no longer will have ownership of.
5586 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
5592 /* for re-enabling, reallocate a TCAM */
5593 /* for entries with empty attribute masks, allocate entry from
5594 * the bottom of the TCAM table; otherwise, allocate from the
5595 * top of the table in order to give it higher priority
5597 status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
5602 /* add TCAM to change list */
5603 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5605 return ICE_ERR_NO_MEMORY;
5607 /* set don't care masks for TCAM flags */
5608 ice_set_tcam_flags(tcam->attr.mask, dc_msk);
5610 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
5611 tcam->ptg, vsig, 0, tcam->attr.flags,
5612 vl_msk, dc_msk, nm_msk);
5614 goto err_ice_prof_tcam_ena_dis;
5618 p->type = ICE_TCAM_ADD;
5619 p->add_tcam_idx = true;
5620 p->prof_id = tcam->prof_id;
5623 p->tcam_idx = tcam->tcam_idx;
5626 LIST_ADD(&p->list_entry, chg);
5630 err_ice_prof_tcam_ena_dis:
5636 * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use
5637 * @ptg_attr: pointer to the PTG and attribute pair to check
5638 * @ptgs_used: bitmap that denotes which PTGs are in use
5639 * @attr_used: array of PTG and attributes pairs already used
5640 * @attr_cnt: count of entries in the attr_used array
5643 ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, ice_bitmap_t *ptgs_used,
5644 struct ice_tcam_inf *attr_used[], u16 attr_cnt)
5648 if (!ice_is_bit_set(ptgs_used, ptg_attr->ptg))
5651 /* the PTG is used, so now look for correct attributes */
5652 for (i = 0; i < attr_cnt; i++)
5653 if (attr_used[i]->ptg == ptg_attr->ptg &&
5654 attr_used[i]->attr.flags == ptg_attr->attr.flags &&
5655 attr_used[i]->attr.mask == ptg_attr->attr.mask)
5662 * ice_adj_prof_priorities - adjust profile based on priorities
5663 * @hw: pointer to the HW struct
5664 * @blk: hardware block
5665 * @vsig: the VSIG for which to adjust profile priorities
5666 * @chg: the change list
5668 static enum ice_status
5669 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5670 struct LIST_HEAD_TYPE *chg)
5672 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
5673 struct ice_tcam_inf **attr_used;
5674 enum ice_status status = ICE_SUCCESS;
5675 struct ice_vsig_prof *t;
5676 u16 attr_used_cnt = 0;
5679 #define ICE_MAX_PTG_ATTRS 1024
5680 attr_used = (struct ice_tcam_inf **)ice_calloc(hw, ICE_MAX_PTG_ATTRS,
5681 sizeof(*attr_used));
5683 return ICE_ERR_NO_MEMORY;
5685 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
5686 idx = vsig & ICE_VSIG_IDX_M;
5688 /* Priority is based on the order in which the profiles are added. The
5689 * newest added profile has highest priority and the oldest added
5690 * profile has the lowest priority. Since the profile property list for
5691 * a VSIG is sorted from newest to oldest, this code traverses the list
5692 * in order and enables the first of each PTG that it finds (that is not
5693 * already enabled); it also disables any duplicate PTGs that it finds
5694 * in the older profiles (that are currently enabled).
5697 LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5698 ice_vsig_prof, list) {
5701 for (i = 0; i < t->tcam_count; i++) {
5704 /* Scan the priorities from newest to oldest.
5705 * Make sure that the newest profiles take priority.
5707 used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used,
5708 attr_used, attr_used_cnt);
5710 if (used && t->tcam[i].in_use) {
5711 /* need to mark this PTG as never match, as it
5712 * was already in use and therefore duplicate
5713 * (and lower priority)
5715 status = ice_prof_tcam_ena_dis(hw, blk, false,
5720 goto err_ice_adj_prof_priorities;
5721 } else if (!used && !t->tcam[i].in_use) {
5722 /* need to enable this PTG, as it in not in use
5723 * and not enabled (highest priority)
5725 status = ice_prof_tcam_ena_dis(hw, blk, true,
5730 goto err_ice_adj_prof_priorities;
5733 /* keep track of used ptgs */
5734 ice_set_bit(t->tcam[i].ptg, ptgs_used);
5735 if (attr_used_cnt < ICE_MAX_PTG_ATTRS)
5736 attr_used[attr_used_cnt++] = &t->tcam[i];
5738 ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n");
5742 err_ice_adj_prof_priorities:
5743 ice_free(hw, attr_used);
5748 * ice_add_prof_id_vsig - add profile to VSIG
5749 * @hw: pointer to the HW struct
5750 * @blk: hardware block
5751 * @vsig: the VSIG to which this profile is to be added
5752 * @hdl: the profile handle indicating the profile to add
5753 * @rev: true to add entries to the end of the list
5754 * @chg: the change list
5756 static enum ice_status
5757 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5758 bool rev, struct LIST_HEAD_TYPE *chg)
5760 /* Masks that ignore flags */
5761 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5762 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5763 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5764 enum ice_status status = ICE_SUCCESS;
5765 struct ice_prof_map *map;
5766 struct ice_vsig_prof *t;
5767 struct ice_chs_chg *p;
5770 /* Error, if this VSIG already has this profile */
5771 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5772 return ICE_ERR_ALREADY_EXISTS;
5774 /* new VSIG profile structure */
5775 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5777 return ICE_ERR_NO_MEMORY;
5779 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5780 /* Get the details on the profile specified by the handle ID */
5781 map = ice_search_prof_id(hw, blk, hdl);
5783 status = ICE_ERR_DOES_NOT_EXIST;
5784 goto err_ice_add_prof_id_vsig;
5787 t->profile_cookie = map->profile_cookie;
5788 t->prof_id = map->prof_id;
5789 t->tcam_count = map->ptg_cnt;
5791 /* create TCAM entries */
5792 for (i = 0; i < map->ptg_cnt; i++) {
5795 /* add TCAM to change list */
5796 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5798 status = ICE_ERR_NO_MEMORY;
5799 goto err_ice_add_prof_id_vsig;
5802 /* allocate the TCAM entry index */
5803 /* for entries with empty attribute masks, allocate entry from
5804 * the bottom of the TCAM table; otherwise, allocate from the
5805 * top of the table in order to give it higher priority
5807 status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
5811 goto err_ice_add_prof_id_vsig;
5814 t->tcam[i].ptg = map->ptg[i];
5815 t->tcam[i].prof_id = map->prof_id;
5816 t->tcam[i].tcam_idx = tcam_idx;
5817 t->tcam[i].attr = map->attr[i];
5818 t->tcam[i].in_use = true;
5820 p->type = ICE_TCAM_ADD;
5821 p->add_tcam_idx = true;
5822 p->prof_id = t->tcam[i].prof_id;
5823 p->ptg = t->tcam[i].ptg;
5825 p->tcam_idx = t->tcam[i].tcam_idx;
5827 /* set don't care masks for TCAM flags */
5828 ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
5830 /* write the TCAM entry */
5831 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5833 t->tcam[i].ptg, vsig, 0,
5834 t->tcam[i].attr.flags, vl_msk,
5838 goto err_ice_add_prof_id_vsig;
5842 LIST_ADD(&p->list_entry, chg);
5845 /* add profile to VSIG */
5846 vsig_idx = vsig & ICE_VSIG_IDX_M;
5848 LIST_ADD_TAIL(&t->list,
5849 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5852 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5854 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5857 err_ice_add_prof_id_vsig:
5858 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5859 /* let caller clean up the change list */
5865 * ice_create_prof_id_vsig - add a new VSIG with a single profile
5866 * @hw: pointer to the HW struct
5867 * @blk: hardware block
5868 * @vsi: the initial VSI that will be in VSIG
5869 * @hdl: the profile handle of the profile that will be added to the VSIG
5870 * @chg: the change list
5872 static enum ice_status
5873 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5874 struct LIST_HEAD_TYPE *chg)
5876 enum ice_status status;
5877 struct ice_chs_chg *p;
5880 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5882 return ICE_ERR_NO_MEMORY;
5884 new_vsig = ice_vsig_alloc(hw, blk);
5886 status = ICE_ERR_HW_TABLE;
5887 goto err_ice_create_prof_id_vsig;
5890 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5892 goto err_ice_create_prof_id_vsig;
5894 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5896 goto err_ice_create_prof_id_vsig;
5898 p->type = ICE_VSIG_ADD;
5900 p->orig_vsig = ICE_DEFAULT_VSIG;
5903 LIST_ADD(&p->list_entry, chg);
5907 err_ice_create_prof_id_vsig:
5908 /* let caller clean up the change list */
5914 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
5915 * @hw: pointer to the HW struct
5916 * @blk: hardware block
5917 * @vsi: the initial VSI that will be in VSIG
5918 * @lst: the list of profile that will be added to the VSIG
5919 * @new_vsig: return of new VSIG
5920 * @chg: the change list
5922 static enum ice_status
5923 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5924 struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
5925 struct LIST_HEAD_TYPE *chg)
5927 struct ice_vsig_prof *t;
5928 enum ice_status status;
5931 vsig = ice_vsig_alloc(hw, blk);
5933 return ICE_ERR_HW_TABLE;
5935 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5939 LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
5940 /* Reverse the order here since we are copying the list */
5941 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5953 * ice_find_prof_vsig - find a VSIG with a specific profile handle
5954 * @hw: pointer to the HW struct
5955 * @blk: hardware block
5956 * @hdl: the profile handle of the profile to search for
5957 * @vsig: returns the VSIG with the matching profile
5960 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5962 struct ice_vsig_prof *t;
5963 struct LIST_HEAD_TYPE lst;
5964 enum ice_status status;
5966 INIT_LIST_HEAD(&lst);
5968 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5972 t->profile_cookie = hdl;
5973 LIST_ADD(&t->list, &lst);
5975 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5980 return status == ICE_SUCCESS;
5984 * ice_add_vsi_flow - add VSI flow
5985 * @hw: pointer to the HW struct
5986 * @blk: hardware block
5988 * @vsig: target VSIG to include the input VSI
5990 * Calling this function will add the VSI to a given VSIG and
5991 * update the HW tables accordingly. This call can be used to
5992 * add multiple VSIs to a VSIG if we know beforehand that those
5993 * VSIs have the same characteristics of the VSIG. This will
5994 * save time in generating a new VSIG and TCAMs till a match is
5995 * found and subsequent rollback when a matching VSIG is found.
5998 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
6000 struct ice_chs_chg *tmp, *del;
6001 struct LIST_HEAD_TYPE chg;
6002 enum ice_status status;
6004 /* if target VSIG is default the move is invalid */
6005 if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
6006 return ICE_ERR_PARAM;
6008 INIT_LIST_HEAD(&chg);
6010 /* move VSI to the VSIG that matches */
6011 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6012 /* update hardware if success */
6014 status = ice_upd_prof_hw(hw, blk, &chg);
6016 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
6017 LIST_DEL(&del->list_entry);
6025 * ice_add_prof_id_flow - add profile flow
6026 * @hw: pointer to the HW struct
6027 * @blk: hardware block
6028 * @vsi: the VSI to enable with the profile specified by ID
6029 * @hdl: profile handle
6031 * Calling this function will update the hardware tables to enable the
6032 * profile indicated by the ID parameter for the VSIs specified in the VSI
6033 * array. Once successfully called, the flow will be enabled.
6036 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
6038 struct ice_vsig_prof *tmp1, *del1;
6039 struct LIST_HEAD_TYPE union_lst;
6040 struct ice_chs_chg *tmp, *del;
6041 struct LIST_HEAD_TYPE chg;
6042 enum ice_status status;
6045 INIT_LIST_HEAD(&union_lst);
6046 INIT_LIST_HEAD(&chg);
6049 status = ice_get_prof(hw, blk, hdl, &chg);
6053 /* determine if VSI is already part of a VSIG */
6054 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
6055 if (!status && vsig) {
6063 /* make sure that there is no overlap/conflict between the new
6064 * characteristics and the existing ones; we don't support that
6067 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
6068 status = ICE_ERR_ALREADY_EXISTS;
6069 goto err_ice_add_prof_id_flow;
6072 /* last VSI in the VSIG? */
6073 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
6075 goto err_ice_add_prof_id_flow;
6076 only_vsi = (ref == 1);
6078 /* create a union of the current profiles and the one being
6081 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
6083 goto err_ice_add_prof_id_flow;
6085 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
6087 goto err_ice_add_prof_id_flow;
6089 /* search for an existing VSIG with an exact charc match */
6090 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
6092 /* move VSI to the VSIG that matches */
6093 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6095 goto err_ice_add_prof_id_flow;
6097 /* VSI has been moved out of or_vsig. If the or_vsig had
6098 * only that VSI it is now empty and can be removed.
6101 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
6103 goto err_ice_add_prof_id_flow;
6105 } else if (only_vsi) {
6106 /* If the original VSIG only contains one VSI, then it
6107 * will be the requesting VSI. In this case the VSI is
6108 * not sharing entries and we can simply add the new
6109 * profile to the VSIG.
6111 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
6114 goto err_ice_add_prof_id_flow;
6116 /* Adjust priorities */
6117 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
6119 goto err_ice_add_prof_id_flow;
6121 /* No match, so we need a new VSIG */
6122 status = ice_create_vsig_from_lst(hw, blk, vsi,
6126 goto err_ice_add_prof_id_flow;
6128 /* Adjust priorities */
6129 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
6131 goto err_ice_add_prof_id_flow;
6134 /* need to find or add a VSIG */
6135 /* search for an existing VSIG with an exact charc match */
6136 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
6137 /* found an exact match */
6138 /* add or move VSI to the VSIG that matches */
6139 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6141 goto err_ice_add_prof_id_flow;
6143 /* we did not find an exact match */
6144 /* we need to add a VSIG */
6145 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
6148 goto err_ice_add_prof_id_flow;
6152 /* update hardware */
6154 status = ice_upd_prof_hw(hw, blk, &chg);
6156 err_ice_add_prof_id_flow:
6157 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
6158 LIST_DEL(&del->list_entry);
6162 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
6163 LIST_DEL(&del1->list);
6171 * ice_rem_prof_from_list - remove a profile from list
6172 * @hw: pointer to the HW struct
6173 * @lst: list to remove the profile from
6174 * @hdl: the profile handle indicating the profile to remove
6176 static enum ice_status
6177 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
6179 struct ice_vsig_prof *ent, *tmp;
6181 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list)
6182 if (ent->profile_cookie == hdl) {
6183 LIST_DEL(&ent->list);
6188 return ICE_ERR_DOES_NOT_EXIST;
6192 * ice_rem_prof_id_flow - remove flow
6193 * @hw: pointer to the HW struct
6194 * @blk: hardware block
6195 * @vsi: the VSI from which to remove the profile specified by ID
6196 * @hdl: profile tracking handle
6198 * Calling this function will update the hardware tables to remove the
6199 * profile indicated by the ID parameter for the VSIs specified in the VSI
6200 * array. Once successfully called, the flow will be disabled.
6203 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
6205 struct ice_vsig_prof *tmp1, *del1;
6206 struct LIST_HEAD_TYPE chg, copy;
6207 struct ice_chs_chg *tmp, *del;
6208 enum ice_status status;
6211 INIT_LIST_HEAD(©);
6212 INIT_LIST_HEAD(&chg);
6214 /* determine if VSI is already part of a VSIG */
6215 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
6216 if (!status && vsig) {
6222 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
6223 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
6225 goto err_ice_rem_prof_id_flow;
6226 only_vsi = (ref == 1);
6229 /* If the original VSIG only contains one reference,
6230 * which will be the requesting VSI, then the VSI is not
6231 * sharing entries and we can simply remove the specific
6232 * characteristics from the VSIG.
6236 /* If there are no profiles left for this VSIG,
6237 * then simply remove the VSIG.
6239 status = ice_rem_vsig(hw, blk, vsig, &chg);
6241 goto err_ice_rem_prof_id_flow;
6243 status = ice_rem_prof_id_vsig(hw, blk, vsig,
6246 goto err_ice_rem_prof_id_flow;
6248 /* Adjust priorities */
6249 status = ice_adj_prof_priorities(hw, blk, vsig,
6252 goto err_ice_rem_prof_id_flow;
6256 /* Make a copy of the VSIG's list of Profiles */
6257 status = ice_get_profs_vsig(hw, blk, vsig, ©);
6259 goto err_ice_rem_prof_id_flow;
6261 /* Remove specified profile entry from the list */
6262 status = ice_rem_prof_from_list(hw, ©, hdl);
6264 goto err_ice_rem_prof_id_flow;
6266 if (LIST_EMPTY(©)) {
6267 status = ice_move_vsi(hw, blk, vsi,
6268 ICE_DEFAULT_VSIG, &chg);
6270 goto err_ice_rem_prof_id_flow;
6272 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
6274 /* found an exact match */
6275 /* add or move VSI to the VSIG that matches */
6276 /* Search for a VSIG with a matching profile
6280 /* Found match, move VSI to the matching VSIG */
6281 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6283 goto err_ice_rem_prof_id_flow;
6285 /* since no existing VSIG supports this
6286 * characteristic pattern, we need to create a
6287 * new VSIG and TCAM entries
6289 status = ice_create_vsig_from_lst(hw, blk, vsi,
6293 goto err_ice_rem_prof_id_flow;
6295 /* Adjust priorities */
6296 status = ice_adj_prof_priorities(hw, blk, vsig,
6299 goto err_ice_rem_prof_id_flow;
6303 status = ICE_ERR_DOES_NOT_EXIST;
6306 /* update hardware tables */
6308 status = ice_upd_prof_hw(hw, blk, &chg);
6310 err_ice_rem_prof_id_flow:
6311 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
6312 LIST_DEL(&del->list_entry);
6316 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) {
6317 LIST_DEL(&del1->list);