1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_common.h"
6 #include "ice_flex_pipe.h"
7 #include "ice_protocol_type.h"
10 /* To support tunneling entries by PF, the package will append the PF number to
11 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
13 static const struct ice_tunnel_type_scan tnls[] = {
14 { TNL_VXLAN, "TNL_VXLAN_PF" },
15 { TNL_GENEVE, "TNL_GENEVE_PF" },
19 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
23 ICE_SID_XLT_KEY_BUILDER_SW,
26 ICE_SID_PROFID_TCAM_SW,
27 ICE_SID_PROFID_REDIR_SW,
29 ICE_SID_CDID_KEY_BUILDER_SW,
36 ICE_SID_XLT_KEY_BUILDER_ACL,
39 ICE_SID_PROFID_TCAM_ACL,
40 ICE_SID_PROFID_REDIR_ACL,
42 ICE_SID_CDID_KEY_BUILDER_ACL,
43 ICE_SID_CDID_REDIR_ACL
49 ICE_SID_XLT_KEY_BUILDER_FD,
52 ICE_SID_PROFID_TCAM_FD,
53 ICE_SID_PROFID_REDIR_FD,
55 ICE_SID_CDID_KEY_BUILDER_FD,
62 ICE_SID_XLT_KEY_BUILDER_RSS,
65 ICE_SID_PROFID_TCAM_RSS,
66 ICE_SID_PROFID_REDIR_RSS,
68 ICE_SID_CDID_KEY_BUILDER_RSS,
69 ICE_SID_CDID_REDIR_RSS
75 ICE_SID_XLT_KEY_BUILDER_PE,
78 ICE_SID_PROFID_TCAM_PE,
79 ICE_SID_PROFID_REDIR_PE,
81 ICE_SID_CDID_KEY_BUILDER_PE,
87 * ice_sect_id - returns section ID
91 * This helper function returns the proper section ID given a block type and a
94 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
96 return ice_sect_lkup[blk][sect];
101 * @buf: pointer to the ice buffer
103 * This helper function validates a buffer's header.
105 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
107 struct ice_buf_hdr *hdr;
111 hdr = (struct ice_buf_hdr *)buf->buf;
113 section_count = LE16_TO_CPU(hdr->section_count);
114 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
117 data_end = LE16_TO_CPU(hdr->data_end);
118 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
126 * @ice_seg: pointer to the ice segment
128 * Returns the address of the buffer table within the ice segment.
130 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
132 struct ice_nvm_table *nvms;
134 nvms = (struct ice_nvm_table *)(ice_seg->device_table +
135 LE32_TO_CPU(ice_seg->device_table_count));
137 return (_FORCE_ struct ice_buf_table *)
138 (nvms->vers + LE32_TO_CPU(nvms->table_count));
143 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
144 * @state: pointer to the enum state
146 * This function will enumerate all the buffers in the ice segment. The first
147 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
148 * ice_seg is set to NULL which continues the enumeration. When the function
149 * returns a NULL pointer, then the end of the buffers has been reached, or an
150 * unexpected value has been detected (for example an invalid section count or
151 * an invalid buffer end value).
153 static struct ice_buf_hdr *
154 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
157 state->buf_table = ice_find_buf_table(ice_seg);
158 if (!state->buf_table)
162 return ice_pkg_val_buf(state->buf_table->buf_array);
165 if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
166 return ice_pkg_val_buf(state->buf_table->buf_array +
173 * ice_pkg_advance_sect
174 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
175 * @state: pointer to the enum state
177 * This helper function will advance the section within the ice segment,
178 * also advancing the buffer if needed.
181 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
183 if (!ice_seg && !state->buf)
186 if (!ice_seg && state->buf)
187 if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
190 state->buf = ice_pkg_enum_buf(ice_seg, state);
194 /* start of new buffer, reset section index */
200 * ice_pkg_enum_section
201 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
202 * @state: pointer to the enum state
203 * @sect_type: section type to enumerate
205 * This function will enumerate all the sections of a particular type in the
206 * ice segment. The first call is made with the ice_seg parameter non-NULL;
207 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
208 * When the function returns a NULL pointer, then the end of the matching
209 * sections has been reached.
212 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
218 state->type = sect_type;
220 if (!ice_pkg_advance_sect(ice_seg, state))
223 /* scan for next matching section */
224 while (state->buf->section_entry[state->sect_idx].type !=
225 CPU_TO_LE32(state->type))
226 if (!ice_pkg_advance_sect(NULL, state))
229 /* validate section */
230 offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
231 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
234 size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
235 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
238 /* make sure the section fits in the buffer */
239 if (offset + size > ICE_PKG_BUF_SIZE)
243 LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
245 /* calc pointer to this section */
246 state->sect = ((u8 *)state->buf) +
247 LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
254 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
255 * @state: pointer to the enum state
256 * @sect_type: section type to enumerate
257 * @offset: pointer to variable that receives the offset in the table (optional)
258 * @handler: function that handles access to the entries into the section type
260 * This function will enumerate all the entries in particular section type in
261 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
262 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
263 * When the function returns a NULL pointer, then the end of the entries has
266 * Since each section may have a different header and entry size, the handler
267 * function is needed to determine the number and location entries in each
270 * The offset parameter is optional, but should be used for sections that
271 * contain an offset for each section table. For such cases, the section handler
272 * function must return the appropriate offset + index to give the absolution
273 * offset for each entry. For example, if the base for a section's header
274 * indicates a base offset of 10, and the index for the entry is 2, then
275 * section handler function should set the offset to 10 + 2 = 12.
278 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
279 u32 sect_type, u32 *offset,
280 void *(*handler)(u32 sect_type, void *section,
281 u32 index, u32 *offset))
289 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
292 state->entry_idx = 0;
293 state->handler = handler;
302 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
305 /* end of a section, look for another section of this type */
306 if (!ice_pkg_enum_section(NULL, state, 0))
309 state->entry_idx = 0;
310 entry = state->handler(state->sect_type, state->sect,
311 state->entry_idx, offset);
318 * ice_boost_tcam_handler
319 * @sect_type: section type
320 * @section: pointer to section
321 * @index: index of the boost TCAM entry to be returned
322 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
324 * This is a callback function that can be passed to ice_pkg_enum_entry.
325 * Handles enumeration of individual boost TCAM entries.
328 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
330 struct ice_boost_tcam_section *boost;
335 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
338 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
344 boost = (struct ice_boost_tcam_section *)section;
345 if (index >= LE16_TO_CPU(boost->count))
348 return boost->tcam + index;
352 * ice_find_boost_entry
353 * @ice_seg: pointer to the ice segment (non-NULL)
354 * @addr: Boost TCAM address of entry to search for
355 * @entry: returns pointer to the entry
357 * Finds a particular Boost TCAM entry and returns a pointer to that entry
358 * if it is found. The ice_seg parameter must not be NULL since the first call
359 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
361 static enum ice_status
362 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
363 struct ice_boost_tcam_entry **entry)
365 struct ice_boost_tcam_entry *tcam;
366 struct ice_pkg_enum state;
368 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
371 return ICE_ERR_PARAM;
374 tcam = (struct ice_boost_tcam_entry *)
375 ice_pkg_enum_entry(ice_seg, &state,
376 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
377 ice_boost_tcam_handler);
378 if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
391 * ice_label_enum_handler
392 * @sect_type: section type
393 * @section: pointer to section
394 * @index: index of the label entry to be returned
395 * @offset: pointer to receive absolute offset, always zero for label sections
397 * This is a callback function that can be passed to ice_pkg_enum_entry.
398 * Handles enumeration of individual label entries.
401 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
404 struct ice_label_section *labels;
409 if (index > ICE_MAX_LABELS_IN_BUF)
415 labels = (struct ice_label_section *)section;
416 if (index >= LE16_TO_CPU(labels->count))
419 return labels->label + index;
424 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
425 * @type: the section type that will contain the label (0 on subsequent calls)
426 * @state: ice_pkg_enum structure that will hold the state of the enumeration
427 * @value: pointer to a value that will return the label's value if found
429 * Enumerates a list of labels in the package. The caller will call
430 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
431 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
432 * the end of the list has been reached.
435 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
438 struct ice_label *label;
440 /* Check for valid label section on first call */
441 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
444 label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
446 ice_label_enum_handler);
450 *value = LE16_TO_CPU(label->value);
456 * @hw: pointer to the HW structure
457 * @ice_seg: pointer to the segment of the package scan (non-NULL)
459 * This function will scan the package and save off relevant information
460 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
461 * since the first call to ice_enum_labels requires a pointer to an actual
464 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
466 struct ice_pkg_enum state;
471 ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
476 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
479 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
480 for (i = 0; tnls[i].type != TNL_LAST; i++) {
481 size_t len = strlen(tnls[i].label_prefix);
483 /* Look for matching label start, before continuing */
484 if (strncmp(label_name, tnls[i].label_prefix, len))
487 /* Make sure this label matches our PF. Note that the PF
488 * character ('0' - '7') will be located where our
489 * prefix string's null terminator is located.
491 if ((label_name[len] - '0') == hw->pf_id) {
492 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
493 hw->tnl.tbl[hw->tnl.count].valid = false;
494 hw->tnl.tbl[hw->tnl.count].in_use = false;
495 hw->tnl.tbl[hw->tnl.count].marked = false;
496 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
497 hw->tnl.tbl[hw->tnl.count].port = 0;
503 label_name = ice_enum_labels(NULL, 0, &state, &val);
506 /* Cache the appropriate boost TCAM entry pointers */
507 for (i = 0; i < hw->tnl.count; i++) {
508 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
509 &hw->tnl.tbl[i].boost_entry);
510 if (hw->tnl.tbl[i].boost_entry)
511 hw->tnl.tbl[i].valid = true;
517 #define ICE_DC_KEY 0x1 /* don't care */
518 #define ICE_DC_KEYINV 0x1
519 #define ICE_NM_KEY 0x0 /* never match */
520 #define ICE_NM_KEYINV 0x0
521 #define ICE_0_KEY 0x1 /* match 0 */
522 #define ICE_0_KEYINV 0x0
523 #define ICE_1_KEY 0x0 /* match 1 */
524 #define ICE_1_KEYINV 0x1
527 * ice_gen_key_word - generate 16-bits of a key/mask word
529 * @valid: valid bits mask (change only the valid bits)
530 * @dont_care: don't care mask
531 * @nvr_mtch: never match mask
532 * @key: pointer to an array of where the resulting key portion
533 * @key_inv: pointer to an array of where the resulting key invert portion
535 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
536 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
537 * of key and 8 bits of key invert.
539 * '0' = b01, always match a 0 bit
540 * '1' = b10, always match a 1 bit
541 * '?' = b11, don't care bit (always matches)
542 * '~' = b00, never match bit
546 * dont_care: b0 0 1 1 0 0
547 * never_mtch: b0 0 0 0 1 1
548 * ------------------------------
549 * Result: key: b01 10 11 11 00 00
551 static enum ice_status
552 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
555 u8 in_key = *key, in_key_inv = *key_inv;
558 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
559 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
565 /* encode the 8 bits into 8-bit key and 8-bit key invert */
566 for (i = 0; i < 8; i++) {
570 if (!(valid & 0x1)) { /* change only valid bits */
571 *key |= (in_key & 0x1) << 7;
572 *key_inv |= (in_key_inv & 0x1) << 7;
573 } else if (dont_care & 0x1) { /* don't care bit */
574 *key |= ICE_DC_KEY << 7;
575 *key_inv |= ICE_DC_KEYINV << 7;
576 } else if (nvr_mtch & 0x1) { /* never match bit */
577 *key |= ICE_NM_KEY << 7;
578 *key_inv |= ICE_NM_KEYINV << 7;
579 } else if (val & 0x01) { /* exact 1 match */
580 *key |= ICE_1_KEY << 7;
581 *key_inv |= ICE_1_KEYINV << 7;
582 } else { /* exact 0 match */
583 *key |= ICE_0_KEY << 7;
584 *key_inv |= ICE_0_KEYINV << 7;
599 * ice_bits_max_set - determine if the number of bits set is within a maximum
600 * @mask: pointer to the byte array which is the mask
601 * @size: the number of bytes in the mask
602 * @max: the max number of set bits
604 * This function determines if there are at most 'max' number of bits set in an
605 * array. Returns true if the number for bits set is <= max or will return false
608 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
613 /* check each byte */
614 for (i = 0; i < size; i++) {
615 /* if 0, go to next byte */
619 /* We know there is at least one set bit in this byte because of
620 * the above check; if we already have found 'max' number of
621 * bits set, then we can return failure now.
626 /* count the bits in this byte, checking threshold */
627 for (j = 0; j < BITS_PER_BYTE; j++) {
628 count += (mask[i] & (0x1 << j)) ? 1 : 0;
638 * ice_set_key - generate a variable sized key with multiples of 16-bits
639 * @key: pointer to where the key will be stored
640 * @size: the size of the complete key in bytes (must be even)
641 * @val: array of 8-bit values that makes up the value portion of the key
642 * @upd: array of 8-bit masks that determine what key portion to update
643 * @dc: array of 8-bit masks that make up the don't care mask
644 * @nm: array of 8-bit masks that make up the never match mask
645 * @off: the offset of the first byte in the key to update
646 * @len: the number of bytes in the key update
648 * This function generates a key from a value, a don't care mask and a never
650 * upd, dc, and nm are optional parameters, and can be NULL:
651 * upd == NULL --> udp mask is all 1's (update all bits)
652 * dc == NULL --> dc mask is all 0's (no don't care bits)
653 * nm == NULL --> nm mask is all 0's (no never match bits)
656 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
662 /* size must be a multiple of 2 bytes. */
665 half_size = size / 2;
667 if (off + len > half_size)
670 /* Make sure at most one bit is set in the never match mask. Having more
671 * than one never match mask bit set will cause HW to consume excessive
672 * power otherwise; this is a power management efficiency check.
674 #define ICE_NVR_MTCH_BITS_MAX 1
675 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
678 for (i = 0; i < len; i++)
679 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
680 dc ? dc[i] : 0, nm ? nm[i] : 0,
681 key + off + i, key + half_size + off + i))
688 * ice_acquire_global_cfg_lock
689 * @hw: pointer to the HW structure
690 * @access: access type (read or write)
692 * This function will request ownership of the global config lock for reading
693 * or writing of the package. When attempting to obtain write access, the
694 * caller must check for the following two return values:
696 * ICE_SUCCESS - Means the caller has acquired the global config lock
697 * and can perform writing of the package.
698 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
699 * package or has found that no update was necessary; in
700 * this case, the caller can just skip performing any
701 * update of the package.
703 static enum ice_status
704 ice_acquire_global_cfg_lock(struct ice_hw *hw,
705 enum ice_aq_res_access_type access)
707 enum ice_status status;
709 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
711 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
712 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
714 if (status == ICE_ERR_AQ_NO_WORK)
715 ice_debug(hw, ICE_DBG_PKG,
716 "Global config lock: No work to do\n");
722 * ice_release_global_cfg_lock
723 * @hw: pointer to the HW structure
725 * This function will release the global config lock.
727 static void ice_release_global_cfg_lock(struct ice_hw *hw)
729 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
733 * ice_acquire_change_lock
734 * @hw: pointer to the HW structure
735 * @access: access type (read or write)
737 * This function will request ownership of the change lock.
740 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
742 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
744 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
745 ICE_CHANGE_LOCK_TIMEOUT);
749 * ice_release_change_lock
750 * @hw: pointer to the HW structure
752 * This function will release the change lock using the proper Admin Command.
754 void ice_release_change_lock(struct ice_hw *hw)
756 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
758 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
762 * ice_aq_download_pkg
763 * @hw: pointer to the hardware structure
764 * @pkg_buf: the package buffer to transfer
765 * @buf_size: the size of the package buffer
766 * @last_buf: last buffer indicator
767 * @error_offset: returns error offset
768 * @error_info: returns error information
769 * @cd: pointer to command details structure or NULL
771 * Download Package (0x0C40)
773 static enum ice_status
774 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
775 u16 buf_size, bool last_buf, u32 *error_offset,
776 u32 *error_info, struct ice_sq_cd *cd)
778 struct ice_aqc_download_pkg *cmd;
779 struct ice_aq_desc desc;
780 enum ice_status status;
782 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
789 cmd = &desc.params.download_pkg;
790 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
791 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
794 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
796 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
797 if (status == ICE_ERR_AQ_ERROR) {
798 /* Read error from buffer only when the FW returned an error */
799 struct ice_aqc_download_pkg_resp *resp;
801 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
803 *error_offset = LE32_TO_CPU(resp->error_offset);
805 *error_info = LE32_TO_CPU(resp->error_info);
814 * @hw: pointer to the hardware structure
815 * @pkg_buf: the package cmd buffer
816 * @buf_size: the size of the package cmd buffer
817 * @last_buf: last buffer indicator
818 * @error_offset: returns error offset
819 * @error_info: returns error information
820 * @cd: pointer to command details structure or NULL
822 * Update Package (0x0C42)
824 static enum ice_status
825 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
826 bool last_buf, u32 *error_offset, u32 *error_info,
827 struct ice_sq_cd *cd)
829 struct ice_aqc_download_pkg *cmd;
830 struct ice_aq_desc desc;
831 enum ice_status status;
833 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
840 cmd = &desc.params.download_pkg;
841 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
842 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
845 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
847 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
848 if (status == ICE_ERR_AQ_ERROR) {
849 /* Read error from buffer only when the FW returned an error */
850 struct ice_aqc_download_pkg_resp *resp;
852 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
854 *error_offset = LE32_TO_CPU(resp->error_offset);
856 *error_info = LE32_TO_CPU(resp->error_info);
863 * ice_find_seg_in_pkg
864 * @hw: pointer to the hardware structure
865 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
866 * @pkg_hdr: pointer to the package header to be searched
868 * This function searches a package file for a particular segment type. On
869 * success it returns a pointer to the segment header, otherwise it will
872 static struct ice_generic_seg_hdr *
873 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
874 struct ice_pkg_hdr *pkg_hdr)
878 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
879 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
880 pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
881 pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
883 /* Search all package segments for the requested segment type */
884 for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
885 struct ice_generic_seg_hdr *seg;
887 seg = (struct ice_generic_seg_hdr *)
888 ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
890 if (LE32_TO_CPU(seg->seg_type) == seg_type)
899 * @hw: pointer to the hardware structure
900 * @bufs: pointer to an array of buffers
901 * @count: the number of buffers in the array
903 * Obtains change lock and updates package.
906 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
908 enum ice_status status;
911 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
915 for (i = 0; i < count; i++) {
916 bool last = ((i + 1) == count);
918 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
920 status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
921 last, &offset, &info, NULL);
924 ice_debug(hw, ICE_DBG_PKG,
925 "Update pkg failed: err %d off %d inf %d\n",
926 status, offset, info);
931 ice_release_change_lock(hw);
938 * @hw: pointer to the hardware structure
939 * @bufs: pointer to an array of buffers
940 * @count: the number of buffers in the array
942 * Obtains global config lock and downloads the package configuration buffers
943 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
944 * found indicates that the rest of the buffers are all metadata buffers.
946 static enum ice_status
947 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
949 enum ice_status status;
950 struct ice_buf_hdr *bh;
954 return ICE_ERR_PARAM;
956 /* If the first buffer's first section has its metadata bit set
957 * then there are no buffers to be downloaded, and the operation is
958 * considered a success.
960 bh = (struct ice_buf_hdr *)bufs;
961 if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
964 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
968 for (i = 0; i < count; i++) {
969 bool last = ((i + 1) == count);
972 /* check next buffer for metadata flag */
973 bh = (struct ice_buf_hdr *)(bufs + i + 1);
975 /* A set metadata flag in the next buffer will signal
976 * that the current buffer will be the last buffer
979 if (LE16_TO_CPU(bh->section_count))
980 if (LE32_TO_CPU(bh->section_entry[0].type) &
985 bh = (struct ice_buf_hdr *)(bufs + i);
987 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
988 &offset, &info, NULL);
990 ice_debug(hw, ICE_DBG_PKG,
991 "Pkg download failed: err %d off %d inf %d\n",
992 status, offset, info);
1000 ice_release_global_cfg_lock(hw);
1006 * ice_aq_get_pkg_info_list
1007 * @hw: pointer to the hardware structure
1008 * @pkg_info: the buffer which will receive the information list
1009 * @buf_size: the size of the pkg_info information buffer
1010 * @cd: pointer to command details structure or NULL
1012 * Get Package Info List (0x0C43)
1014 static enum ice_status
1015 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1016 struct ice_aqc_get_pkg_info_resp *pkg_info,
1017 u16 buf_size, struct ice_sq_cd *cd)
1019 struct ice_aq_desc desc;
1021 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1022 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1024 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1029 * @hw: pointer to the hardware structure
1030 * @ice_seg: pointer to the segment of the package to be downloaded
1032 * Handles the download of a complete package.
1034 static enum ice_status
1035 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1037 struct ice_buf_table *ice_buf_tbl;
1039 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1040 ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
1041 ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
1042 ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
1044 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1045 LE32_TO_CPU(ice_seg->hdr.seg_type),
1046 LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
1048 ice_buf_tbl = ice_find_buf_table(ice_seg);
1050 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1051 LE32_TO_CPU(ice_buf_tbl->buf_count));
1053 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1054 LE32_TO_CPU(ice_buf_tbl->buf_count));
1059 * @hw: pointer to the hardware structure
1060 * @pkg_hdr: pointer to the driver's package hdr
1062 * Saves off the package details into the HW structure.
1064 static enum ice_status
1065 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1067 struct ice_global_metadata_seg *meta_seg;
1068 struct ice_generic_seg_hdr *seg_hdr;
1070 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1072 return ICE_ERR_PARAM;
1074 meta_seg = (struct ice_global_metadata_seg *)
1075 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
1077 hw->pkg_ver = meta_seg->pkg_ver;
1078 ice_memcpy(hw->pkg_name, meta_seg->pkg_name,
1079 sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA);
1081 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1082 meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
1083 meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
1084 meta_seg->pkg_name);
1086 ice_debug(hw, ICE_DBG_INIT,
1087 "Did not find metadata segment in driver package\n");
1091 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1093 hw->ice_pkg_ver = seg_hdr->seg_ver;
1094 ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
1095 sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA);
1097 ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
1098 seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
1099 seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
1102 ice_debug(hw, ICE_DBG_INIT,
1103 "Did not find ice segment in driver package\n");
1112 * @hw: pointer to the hardware structure
1114 * Store details of the package currently loaded in HW into the HW structure.
1116 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1118 struct ice_aqc_get_pkg_info_resp *pkg_info;
1119 enum ice_status status;
1123 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1125 size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
1127 pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1129 return ICE_ERR_NO_MEMORY;
1131 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1133 goto init_pkg_free_alloc;
1135 for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
1136 #define ICE_PKG_FLAG_COUNT 4
1137 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1140 if (pkg_info->pkg_info[i].is_active) {
1141 flags[place++] = 'A';
1142 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1143 ice_memcpy(hw->active_pkg_name,
1144 pkg_info->pkg_info[i].name,
1145 sizeof(hw->active_pkg_name),
1146 ICE_NONDMA_TO_NONDMA);
1147 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1149 if (pkg_info->pkg_info[i].is_active_at_boot)
1150 flags[place++] = 'B';
1151 if (pkg_info->pkg_info[i].is_modified)
1152 flags[place++] = 'M';
1153 if (pkg_info->pkg_info[i].is_in_nvm)
1154 flags[place++] = 'N';
1156 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1157 i, pkg_info->pkg_info[i].ver.major,
1158 pkg_info->pkg_info[i].ver.minor,
1159 pkg_info->pkg_info[i].ver.update,
1160 pkg_info->pkg_info[i].ver.draft,
1161 pkg_info->pkg_info[i].name, flags);
1164 init_pkg_free_alloc:
1165 ice_free(hw, pkg_info);
1172 * ice_verify_pkg - verify package
1173 * @pkg: pointer to the package buffer
1174 * @len: size of the package buffer
1176 * Verifies various attributes of the package file, including length, format
1177 * version, and the requirement of at least one segment.
1179 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1184 if (len < sizeof(*pkg))
1185 return ICE_ERR_BUF_TOO_SHORT;
1187 if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1188 pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1189 pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
1190 pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
1193 /* pkg must have at least one segment */
1194 seg_count = LE32_TO_CPU(pkg->seg_count);
1198 /* make sure segment array fits in package length */
1199 if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
1200 return ICE_ERR_BUF_TOO_SHORT;
1202 /* all segments must fit within length */
1203 for (i = 0; i < seg_count; i++) {
1204 u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1205 struct ice_generic_seg_hdr *seg;
1207 /* segment header must fit */
1208 if (len < off + sizeof(*seg))
1209 return ICE_ERR_BUF_TOO_SHORT;
1211 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1213 /* segment body must fit */
1214 if (len < off + LE32_TO_CPU(seg->seg_size))
1215 return ICE_ERR_BUF_TOO_SHORT;
1222 * ice_free_seg - free package segment pointer
1223 * @hw: pointer to the hardware structure
1225 * Frees the package segment pointer in the proper manner, depending on if the
1226 * segment was allocated or just the passed in pointer was stored.
1228 void ice_free_seg(struct ice_hw *hw)
1231 ice_free(hw, hw->pkg_copy);
1232 hw->pkg_copy = NULL;
1239 * ice_init_fd_mask_regs - initialize Flow Director mask registers
1240 * @hw: pointer to the HW struct
1242 * This function sets up the Flow Director mask registers to allow for complete
1243 * masking off of any of the 24 Field Vector words. After this call, mask 0 will
1244 * mask off all of FV index 0, mask 1 will mask off all of FV index 1, etc.
1246 static void ice_init_fd_mask_regs(struct ice_hw *hw)
1250 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
1251 wr32(hw, GLQF_FDMASK(i), i);
1252 ice_debug(hw, ICE_DBG_INIT, "init fd mask(%d): %x = %x\n", i,
1258 * ice_init_pkg_regs - initialize additional package registers
1259 * @hw: pointer to the hardware structure
1261 static void ice_init_pkg_regs(struct ice_hw *hw)
1263 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1264 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1265 #define ICE_SW_BLK_IDX 0
1267 /* setup Switch block input mask, which is 48-bits in two parts */
1268 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1269 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1270 /* setup default flow director masks */
1271 ice_init_fd_mask_regs(hw);
1275 * ice_chk_pkg_version - check package version for compatibility with driver
1276 * @hw: pointer to the hardware structure
1277 * @pkg_ver: pointer to a version structure to check
1279 * Check to make sure that the package about to be downloaded is compatible with
1280 * the driver. To be compatible, the major and minor components of the package
1281 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1284 static enum ice_status
1285 ice_chk_pkg_version(struct ice_hw *hw, struct ice_pkg_ver *pkg_ver)
1287 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1288 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) {
1289 ice_info(hw, "ERROR: Incompatible package: %d.%d.%d.%d - requires package version: %d.%d.*.*\n",
1290 pkg_ver->major, pkg_ver->minor, pkg_ver->update,
1291 pkg_ver->draft, ICE_PKG_SUPP_VER_MAJ,
1292 ICE_PKG_SUPP_VER_MNR);
1294 return ICE_ERR_NOT_SUPPORTED;
1301 * ice_init_pkg - initialize/download package
1302 * @hw: pointer to the hardware structure
1303 * @buf: pointer to the package buffer
1304 * @len: size of the package buffer
1306 * This function initializes a package. The package contains HW tables
1307 * required to do packet processing. First, the function extracts package
1308 * information such as version. Then it finds the ice configuration segment
1309 * within the package; this function then saves a copy of the segment pointer
1310 * within the supplied package buffer. Next, the function will cache any hints
1311 * from the package, followed by downloading the package itself. Note, that if
1312 * a previous PF driver has already downloaded the package successfully, then
1313 * the current driver will not have to download the package again.
1315 * The local package contents will be used to query default behavior and to
1316 * update specific sections of the HW's version of the package (e.g. to update
1317 * the parse graph to understand new protocols).
1319 * This function stores a pointer to the package buffer memory, and it is
1320 * expected that the supplied buffer will not be freed immediately. If the
1321 * package buffer needs to be freed, such as when read from a file, use
1322 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1325 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1327 struct ice_pkg_hdr *pkg;
1328 enum ice_status status;
1329 struct ice_seg *seg;
1332 return ICE_ERR_PARAM;
1334 pkg = (struct ice_pkg_hdr *)buf;
1335 status = ice_verify_pkg(pkg, len);
1337 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1342 /* initialize package info */
1343 status = ice_init_pkg_info(hw, pkg);
1347 /* before downloading the package, check package version for
1348 * compatibility with driver
1350 status = ice_chk_pkg_version(hw, &hw->pkg_ver);
1354 /* find segment in given package */
1355 seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
1357 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1361 /* initialize package hints and then download package */
1362 ice_init_pkg_hints(hw, seg);
1363 status = ice_download_pkg(hw, seg);
1364 if (status == ICE_ERR_AQ_NO_WORK) {
1365 ice_debug(hw, ICE_DBG_INIT,
1366 "package previously loaded - no work.\n");
1367 status = ICE_SUCCESS;
1370 /* Get information on the package currently loaded in HW, then make sure
1371 * the driver is compatible with this version.
1374 status = ice_get_pkg_info(hw);
1376 status = ice_chk_pkg_version(hw, &hw->active_pkg_ver);
1381 /* on successful package download update other required
1382 * registers to support the package and fill HW tables
1383 * with package content.
1385 ice_init_pkg_regs(hw);
1386 ice_fill_blk_tbls(hw);
1388 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1396 * ice_copy_and_init_pkg - initialize/download a copy of the package
1397 * @hw: pointer to the hardware structure
1398 * @buf: pointer to the package buffer
1399 * @len: size of the package buffer
1401 * This function copies the package buffer, and then calls ice_init_pkg() to
1402 * initialize the copied package contents.
1404 * The copying is necessary if the package buffer supplied is constant, or if
1405 * the memory may disappear shortly after calling this function.
1407 * If the package buffer resides in the data segment and can be modified, the
1408 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1410 * However, if the package buffer needs to be copied first, such as when being
1411 * read from a file, the caller should use ice_copy_and_init_pkg().
1413 * This function will first copy the package buffer, before calling
1414 * ice_init_pkg(). The caller is free to immediately destroy the original
1415 * package buffer, as the new copy will be managed by this function and
1418 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1420 enum ice_status status;
1424 return ICE_ERR_PARAM;
1426 buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1428 status = ice_init_pkg(hw, buf_copy, len);
1430 /* Free the copy, since we failed to initialize the package */
1431 ice_free(hw, buf_copy);
1433 /* Track the copied pkg so we can free it later */
1434 hw->pkg_copy = buf_copy;
1443 * @hw: pointer to the HW structure
1445 * Allocates a package buffer and returns a pointer to the buffer header.
1446 * Note: all package contents must be in Little Endian form.
1448 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1450 struct ice_buf_build *bld;
1451 struct ice_buf_hdr *buf;
1453 bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1457 buf = (struct ice_buf_hdr *)bld;
1458 buf->data_end = CPU_TO_LE16(sizeof(*buf) -
1459 sizeof(buf->section_entry[0]));
1465 * @sect_type: section type
1466 * @section: pointer to section
1467 * @index: index of the field vector entry to be returned
1468 * @offset: ptr to variable that receives the offset in the field vector table
1470 * This is a callback function that can be passed to ice_pkg_enum_entry.
1471 * This function treats the given section as of type ice_sw_fv_section and
1472 * enumerates offset field. "offset" is an index into the field vector
1476 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1478 struct ice_sw_fv_section *fv_section =
1479 (struct ice_sw_fv_section *)section;
1481 if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1483 if (index >= LE16_TO_CPU(fv_section->count))
1486 /* "index" passed in to this function is relative to a given
1487 * 4k block. To get to the true index into the field vector
1488 * table need to add the relative index to the base_offset
1489 * field of this section
1491 *offset = LE16_TO_CPU(fv_section->base_offset) + index;
1492 return fv_section->fv + index;
1496 * ice_get_sw_fv_list
1497 * @hw: pointer to the HW structure
1498 * @prot_ids: field vector to search for with a given protocol ID
1499 * @ids_cnt: lookup/protocol count
1500 * @fv_list: Head of a list
1502 * Finds all the field vector entries from switch block that contain
1503 * a given protocol ID and returns a list of structures of type
1504 * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1505 * definition and profile ID information
1506 * NOTE: The caller of the function is responsible for freeing the memory
1507 * allocated for every list entry.
1510 ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt,
1511 struct LIST_HEAD_TYPE *fv_list)
1513 struct ice_sw_fv_list_entry *fvl;
1514 struct ice_sw_fv_list_entry *tmp;
1515 struct ice_pkg_enum state;
1516 struct ice_seg *ice_seg;
1520 if (!ids_cnt || !hw->seg)
1521 return ICE_ERR_PARAM;
1527 fv = (struct ice_fv *)
1528 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1529 &offset, ice_sw_fv_handler);
1531 for (i = 0; i < ids_cnt && fv; i++) {
1534 /* This code assumes that if a switch field vector line
1535 * has a matching protocol, then this line will contain
1536 * the entries necessary to represent every field in
1537 * that protocol header.
1539 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1540 if (fv->ew[j].prot_id == prot_ids[i])
1542 if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1544 if (i + 1 == ids_cnt) {
1545 fvl = (struct ice_sw_fv_list_entry *)
1546 ice_malloc(hw, sizeof(*fvl));
1550 fvl->profile_id = offset;
1551 LIST_ADD(&fvl->list_entry, fv_list);
1557 if (LIST_EMPTY(fv_list))
1562 LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1564 LIST_DEL(&fvl->list_entry);
1568 return ICE_ERR_NO_MEMORY;
1573 * @hw: pointer to the HW structure
1574 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1576 * Frees a package buffer
1578 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1584 * ice_pkg_buf_reserve_section
1585 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1586 * @count: the number of sections to reserve
1588 * Reserves one or more section table entries in a package buffer. This routine
1589 * can be called multiple times as long as they are made before calling
1590 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1591 * is called once, the number of sections that can be allocated will not be able
1592 * to be increased; not using all reserved sections is fine, but this will
1593 * result in some wasted space in the buffer.
1594 * Note: all package contents must be in Little Endian form.
1596 static enum ice_status
1597 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1599 struct ice_buf_hdr *buf;
1604 return ICE_ERR_PARAM;
1606 buf = (struct ice_buf_hdr *)&bld->buf;
1608 /* already an active section, can't increase table size */
1609 section_count = LE16_TO_CPU(buf->section_count);
1610 if (section_count > 0)
1613 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1615 bld->reserved_section_table_entries += count;
1617 data_end = LE16_TO_CPU(buf->data_end) +
1618 (count * sizeof(buf->section_entry[0]));
1619 buf->data_end = CPU_TO_LE16(data_end);
1625 * ice_pkg_buf_alloc_section
1626 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1627 * @type: the section type value
1628 * @size: the size of the section to reserve (in bytes)
1630 * Reserves memory in the buffer for a section's content and updates the
1631 * buffers' status accordingly. This routine returns a pointer to the first
1632 * byte of the section start within the buffer, which is used to fill in the
1634 * Note: all package contents must be in Little Endian form.
1637 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1639 struct ice_buf_hdr *buf;
1643 if (!bld || !type || !size)
1646 buf = (struct ice_buf_hdr *)&bld->buf;
1648 /* check for enough space left in buffer */
1649 data_end = LE16_TO_CPU(buf->data_end);
1651 /* section start must align on 4 byte boundary */
1652 data_end = ICE_ALIGN(data_end, 4);
1654 if ((data_end + size) > ICE_MAX_S_DATA_END)
1657 /* check for more available section table entries */
1658 sect_count = LE16_TO_CPU(buf->section_count);
1659 if (sect_count < bld->reserved_section_table_entries) {
1660 void *section_ptr = ((u8 *)buf) + data_end;
1662 buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
1663 buf->section_entry[sect_count].size = CPU_TO_LE16(size);
1664 buf->section_entry[sect_count].type = CPU_TO_LE32(type);
1667 buf->data_end = CPU_TO_LE16(data_end);
1669 buf->section_count = CPU_TO_LE16(sect_count + 1);
1673 /* no free section table entries */
1678 * ice_pkg_buf_get_active_sections
1679 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1681 * Returns the number of active sections. Before using the package buffer
1682 * in an update package command, the caller should make sure that there is at
1683 * least one active section - otherwise, the buffer is not legal and should
1685 * Note: all package contents must be in Little Endian form.
1687 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1689 struct ice_buf_hdr *buf;
1694 buf = (struct ice_buf_hdr *)&bld->buf;
1695 return LE16_TO_CPU(buf->section_count);
1699 * ice_pkg_buf_header
1700 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1702 * Return a pointer to the buffer's header
1704 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1713 * ice_tunnel_port_in_use
1714 * @hw: pointer to the HW structure
1715 * @port: port to search for
1716 * @index: optionally returns index
1718 * Returns whether a port is already in use as a tunnel, and optionally its
1721 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
1725 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1726 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
1736 * ice_tunnel_get_type
1737 * @hw: pointer to the HW structure
1738 * @port: port to search for
1739 * @type: returns tunnel index
1741 * For a given port number, will return the type of tunnel.
1744 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
1748 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1749 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
1750 *type = hw->tnl.tbl[i].type;
1758 * ice_find_free_tunnel_entry
1759 * @hw: pointer to the HW structure
1760 * @type: tunnel type
1761 * @index: optionally returns index
1763 * Returns whether there is a free tunnel entry, and optionally its index
1766 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1771 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1772 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
1773 hw->tnl.tbl[i].type == type) {
1784 * @hw: pointer to the HW structure
1785 * @type: type of tunnel
1786 * @port: port to use for vxlan tunnel
1791 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
1793 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1794 enum ice_status status = ICE_ERR_MAX_LIMIT;
1795 struct ice_buf_build *bld;
1798 if (ice_tunnel_port_in_use(hw, port, NULL))
1799 return ICE_ERR_ALREADY_EXISTS;
1801 if (!ice_find_free_tunnel_entry(hw, type, &index))
1802 return ICE_ERR_OUT_OF_RANGE;
1804 bld = ice_pkg_buf_alloc(hw);
1806 return ICE_ERR_NO_MEMORY;
1808 /* allocate 2 sections, one for RX parser, one for TX parser */
1809 if (ice_pkg_buf_reserve_section(bld, 2))
1810 goto ice_create_tunnel_err;
1812 sect_rx = (struct ice_boost_tcam_section *)
1813 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1816 goto ice_create_tunnel_err;
1817 sect_rx->count = CPU_TO_LE16(1);
1819 sect_tx = (struct ice_boost_tcam_section *)
1820 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1823 goto ice_create_tunnel_err;
1824 sect_tx->count = CPU_TO_LE16(1);
1826 /* copy original boost entry to update package buffer */
1827 ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1828 sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
1830 /* over-write the never-match dest port key bits with the encoded port
1833 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
1834 (u8 *)&port, NULL, NULL, NULL,
1835 offsetof(struct ice_boost_key_value, hv_dst_port_key),
1836 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
1838 /* exact copy of entry to TX section entry */
1839 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
1840 ICE_NONDMA_TO_NONDMA);
1842 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1844 hw->tnl.tbl[index].port = port;
1845 hw->tnl.tbl[index].in_use = true;
1848 ice_create_tunnel_err:
1849 ice_pkg_buf_free(hw, bld);
1855 * ice_destroy_tunnel
1856 * @hw: pointer to the HW structure
1857 * @port: port of tunnel to destroy (ignored if the all parameter is true)
1858 * @all: flag that states to destroy all tunnels
1860 * Destroys a tunnel or all tunnels by creating an update package buffer
1861 * targeting the specific updates requested and then performing an update
1864 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
1866 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1867 enum ice_status status = ICE_ERR_MAX_LIMIT;
1868 struct ice_buf_build *bld;
1873 /* determine count */
1874 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1875 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
1876 (all || hw->tnl.tbl[i].port == port))
1880 return ICE_ERR_PARAM;
1882 /* size of section - there is at least one entry */
1883 size = (count - 1) * sizeof(*sect_rx->tcam) + sizeof(*sect_rx);
1885 bld = ice_pkg_buf_alloc(hw);
1887 return ICE_ERR_NO_MEMORY;
1889 /* allocate 2 sections, one for RX parser, one for TX parser */
1890 if (ice_pkg_buf_reserve_section(bld, 2))
1891 goto ice_destroy_tunnel_err;
1893 sect_rx = (struct ice_boost_tcam_section *)
1894 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1897 goto ice_destroy_tunnel_err;
1898 sect_rx->count = CPU_TO_LE16(1);
1900 sect_tx = (struct ice_boost_tcam_section *)
1901 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1904 goto ice_destroy_tunnel_err;
1905 sect_tx->count = CPU_TO_LE16(1);
1907 /* copy original boost entry to update package buffer, one copy to RX
1908 * section, another copy to the TX section
1910 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1911 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
1912 (all || hw->tnl.tbl[i].port == port)) {
1913 ice_memcpy(sect_rx->tcam + i,
1914 hw->tnl.tbl[i].boost_entry,
1915 sizeof(*sect_rx->tcam),
1916 ICE_NONDMA_TO_NONDMA);
1917 ice_memcpy(sect_tx->tcam + i,
1918 hw->tnl.tbl[i].boost_entry,
1919 sizeof(*sect_tx->tcam),
1920 ICE_NONDMA_TO_NONDMA);
1921 hw->tnl.tbl[i].marked = true;
1924 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1926 for (i = 0; i < hw->tnl.count &&
1927 i < ICE_TUNNEL_MAX_ENTRIES; i++)
1928 if (hw->tnl.tbl[i].marked) {
1929 hw->tnl.tbl[i].port = 0;
1930 hw->tnl.tbl[i].in_use = false;
1931 hw->tnl.tbl[i].marked = false;
1934 ice_destroy_tunnel_err:
1935 ice_pkg_buf_free(hw, bld);
1941 * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
1942 * @hw: pointer to the hardware structure
1943 * @blk: hardware block
1945 * @fv_idx: field vector word index
1946 * @prot: variable to receive the protocol ID
1947 * @off: variable to receive the protocol offset
1950 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u8 fv_idx,
1953 struct ice_fv_word *fv_ext;
1955 if (prof >= hw->blk[blk].es.count)
1956 return ICE_ERR_PARAM;
1958 if (fv_idx >= hw->blk[blk].es.fvw)
1959 return ICE_ERR_PARAM;
1961 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
1963 *prot = fv_ext[fv_idx].prot_id;
1964 *off = fv_ext[fv_idx].off;
1969 /* PTG Management */
1973 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
1974 * @hw: pointer to the hardware structure
1976 * @ptype: the ptype to search for
1977 * @ptg: pointer to variable that receives the PTG
1979 * This function will search the PTGs for a particular ptype, returning the
1980 * PTG ID that contains it through the ptg parameter, with the value of
1981 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
1983 static enum ice_status
1984 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
1986 if (ptype >= ICE_XLT1_CNT || !ptg)
1987 return ICE_ERR_PARAM;
1989 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
1994 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
1995 * @hw: pointer to the hardware structure
1997 * @ptg: the ptg to allocate
1999 * This function allocates a given packet type group ID specified by the ptg
2003 void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2005 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2009 * ice_ptg_alloc - Find a free entry and allocates a new packet type group ID
2010 * @hw: pointer to the hardware structure
2013 * This function allocates and returns a new packet type group ID. Note
2014 * that 0 is the default packet type group, so successfully created PTGs will
2015 * have a non-zero ID value; which means a 0 return value indicates an error.
2017 static u8 ice_ptg_alloc(struct ice_hw *hw, enum ice_block blk)
2021 /* Skip the default PTG of 0 */
2022 for (i = 1; i < ICE_MAX_PTGS; i++)
2023 if (!hw->blk[blk].xlt1.ptg_tbl[i].in_use) {
2024 /* found a free PTG ID */
2025 ice_ptg_alloc_val(hw, blk, i);
2033 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
2034 * @hw: pointer to the hardware structure
2036 * @ptype: the ptype to remove
2037 * @ptg: the ptg to remove the ptype from
2039 * This function will remove the ptype from the specific ptg, and move it to
2040 * the default PTG (ICE_DEFAULT_PTG).
2042 static enum ice_status
2043 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2045 struct ice_ptg_ptype **ch;
2046 struct ice_ptg_ptype *p;
2048 if (ptype > ICE_XLT1_CNT - 1)
2049 return ICE_ERR_PARAM;
2051 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2052 return ICE_ERR_DOES_NOT_EXIST;
2054 /* Should not happen if .in_use is set, bad config */
2055 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2058 /* find the ptype within this PTG, and bypass the link over it */
2059 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2060 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2062 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2063 *ch = p->next_ptype;
2067 ch = &p->next_ptype;
2071 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2072 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2078 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
2079 * @hw: pointer to the hardware structure
2081 * @ptype: the ptype to add or move
2082 * @ptg: the ptg to add or move the ptype to
2084 * This function will either add or move a ptype to a particular PTG depending
2085 * on if the ptype is already part of another group. Note that using a
2086 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
2089 static enum ice_status
2090 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2092 enum ice_status status;
2095 if (ptype > ICE_XLT1_CNT - 1)
2096 return ICE_ERR_PARAM;
2098 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2099 return ICE_ERR_DOES_NOT_EXIST;
2101 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2105 /* Is ptype already in the correct PTG? */
2106 if (original_ptg == ptg)
2109 /* Remove from original PTG and move back to the default PTG */
2110 if (original_ptg != ICE_DEFAULT_PTG)
2111 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2113 /* Moving to default PTG? Then we're done with this request */
2114 if (ptg == ICE_DEFAULT_PTG)
2117 /* Add ptype to PTG at beginning of list */
2118 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2119 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2120 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2121 &hw->blk[blk].xlt1.ptypes[ptype];
2123 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2124 hw->blk[blk].xlt1.t[ptype] = ptg;
2129 /* Block / table size info */
2130 struct ice_blk_size_details {
2131 u16 xlt1; /* # XLT1 entries */
2132 u16 xlt2; /* # XLT2 entries */
2133 u16 prof_tcam; /* # profile ID TCAM entries */
2134 u16 prof_id; /* # profile IDs */
2135 u8 prof_cdid_bits; /* # cdid one-hot bits used in key */
2136 u16 prof_redir; /* # profile redirection entries */
2137 u16 es; /* # extraction sequence entries */
2138 u16 fvw; /* # field vector words */
2139 u8 overwrite; /* overwrite existing entries allowed */
2140 u8 reverse; /* reverse FV order */
2143 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2146 * XLT1 - Number of entries in XLT1 table
2147 * XLT2 - Number of entries in XLT2 table
2148 * TCAM - Number of entries Profile ID TCAM table
2149 * CDID - Control Domain ID of the hardware block
2150 * PRED - Number of entries in the Profile Redirection Table
2151 * FV - Number of entries in the Field Vector
2152 * FVW - Width (in WORDs) of the Field Vector
2153 * OVR - Overwrite existing table entries
2156 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
2157 /* Overwrite , Reverse FV */
2158 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2160 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2162 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2164 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2166 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2171 ICE_SID_XLT1_OFF = 0,
2174 ICE_SID_PR_REDIR_OFF,
2179 /* Characteristic handling */
2182 * ice_match_prop_lst - determine if properties of two lists match
2183 * @list1: first properties list
2184 * @list2: second properties list
2186 * Count, cookies and the order must match in order to be considered equivalent.
2189 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
2191 struct ice_vsig_prof *tmp1;
2192 struct ice_vsig_prof *tmp2;
2196 /* compare counts */
2197 LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) {
2200 LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) {
2203 if (!count || count != chk_count)
2206 tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
2207 tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
2209 /* profile cookies must compare, and in the exact same order to take
2210 * into account priority
2213 if (tmp2->profile_cookie != tmp1->profile_cookie)
2216 tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
2217 tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
2223 /* VSIG Management */
2227 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2228 * @hw: pointer to the hardware structure
2230 * @vsi: VSI of interest
2231 * @vsig: pointer to receive the VSI group
2233 * This function will lookup the VSI entry in the XLT2 list and return
2234 * the VSI group its associated with.
2237 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2239 if (!vsig || vsi >= ICE_MAX_VSI)
2240 return ICE_ERR_PARAM;
2242 /* As long as there's a default or valid VSIG associated with the input
2243 * VSI, the functions returns a success. Any handling of VSIG will be
2244 * done by the following add, update or remove functions.
2246 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2252 * ice_vsig_alloc_val - allocate a new VSIG by value
2253 * @hw: pointer to the hardware structure
2255 * @vsig: the vsig to allocate
2257 * This function will allocate a given VSIG specified by the vsig parameter.
2259 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2261 u16 idx = vsig & ICE_VSIG_IDX_M;
2263 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2264 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2265 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2268 return ICE_VSIG_VALUE(idx, hw->pf_id);
2272 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2273 * @hw: pointer to the hardware structure
2276 * This function will iterate through the VSIG list and mark the first
2277 * unused entry for the new VSIG entry as used and return that value.
2279 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2283 for (i = 1; i < ICE_MAX_VSIGS; i++)
2284 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2285 return ice_vsig_alloc_val(hw, blk, i);
2287 return ICE_DEFAULT_VSIG;
2291 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2292 * @hw: pointer to the hardware structure
2294 * @chs: characteristic list
2295 * @vsig: returns the VSIG with the matching profiles, if found
2297 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2298 * a group have the same characteristic set. To check if there exists a VSIG
2299 * which has the same characteristics as the input characteristics; this
2300 * function will iterate through the XLT2 list and return the VSIG that has a
2301 * matching configuration. In order to make sure that priorities are accounted
2302 * for, the list must match exactly, including the order in which the
2303 * characteristics are listed.
2305 static enum ice_status
2306 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2307 struct LIST_HEAD_TYPE *chs, u16 *vsig)
2309 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2312 for (i = 0; i < xlt2->count; i++) {
2313 if (xlt2->vsig_tbl[i].in_use &&
2314 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2315 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2320 return ICE_ERR_DOES_NOT_EXIST;
2324 * ice_vsig_free - free VSI group
2325 * @hw: pointer to the hardware structure
2327 * @vsig: VSIG to remove
2329 * The function will remove all VSIs associated with the input VSIG and move
2330 * them to the DEFAULT_VSIG and mark the VSIG available.
2332 static enum ice_status
2333 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2335 struct ice_vsig_prof *dtmp, *del;
2336 struct ice_vsig_vsi *vsi_cur;
2339 idx = vsig & ICE_VSIG_IDX_M;
2340 if (idx >= ICE_MAX_VSIGS)
2341 return ICE_ERR_PARAM;
2343 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2344 return ICE_ERR_DOES_NOT_EXIST;
2346 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2348 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2349 /* If the VSIG has at least 1 VSI then iterate through the
2350 * list and remove the VSIs before deleting the group.
2353 /* remove all vsis associated with this VSIG XLT2 entry */
2355 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2357 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2358 vsi_cur->changed = 1;
2359 vsi_cur->next_vsi = NULL;
2363 /* NULL terminate head of VSI list */
2364 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2367 /* free characteristic list */
2368 LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
2369 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2370 ice_vsig_prof, list) {
2371 LIST_DEL(&del->list);
2375 /* if VSIG characteristic list was cleared for reset
2376 * re-initialize the list head
2378 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2384 * ice_vsig_remove_vsi - remove VSI from VSIG
2385 * @hw: pointer to the hardware structure
2387 * @vsi: VSI to remove
2388 * @vsig: VSI group to remove from
2390 * The function will remove the input VSI from its VSI group and move it
2391 * to the DEFAULT_VSIG.
2393 static enum ice_status
2394 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2396 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2399 idx = vsig & ICE_VSIG_IDX_M;
2401 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2402 return ICE_ERR_PARAM;
2404 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2405 return ICE_ERR_DOES_NOT_EXIST;
2407 /* entry already in default VSIG, don't have to remove */
2408 if (idx == ICE_DEFAULT_VSIG)
2411 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2415 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2416 vsi_cur = (*vsi_head);
2418 /* iterate the VSI list, skip over the entry to be removed */
2420 if (vsi_tgt == vsi_cur) {
2421 (*vsi_head) = vsi_cur->next_vsi;
2424 vsi_head = &vsi_cur->next_vsi;
2425 vsi_cur = vsi_cur->next_vsi;
2428 /* verify if VSI was removed from group list */
2430 return ICE_ERR_DOES_NOT_EXIST;
2432 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2433 vsi_cur->changed = 1;
2434 vsi_cur->next_vsi = NULL;
2440 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2441 * @hw: pointer to the hardware structure
2444 * @vsig: destination VSI group
2446 * This function will move or add the input VSI to the target VSIG.
2447 * The function will find the original VSIG the VSI belongs to and
2448 * move the entry to the DEFAULT_VSIG, update the original VSIG and
2449 * then move entry to the new VSIG.
2451 static enum ice_status
2452 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2454 struct ice_vsig_vsi *tmp;
2455 enum ice_status status;
2458 idx = vsig & ICE_VSIG_IDX_M;
2460 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2461 return ICE_ERR_PARAM;
2463 /* if VSIG not in use and VSIG is not default type this VSIG
2466 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2467 vsig != ICE_DEFAULT_VSIG)
2468 return ICE_ERR_DOES_NOT_EXIST;
2470 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2474 /* no update required if vsigs match */
2475 if (orig_vsig == vsig)
2478 if (orig_vsig != ICE_DEFAULT_VSIG) {
2479 /* remove entry from orig_vsig and add to default VSIG */
2480 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2485 if (idx == ICE_DEFAULT_VSIG)
2488 /* Create VSI entry and add VSIG and prop_mask values */
2489 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2490 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2492 /* Add new entry to the head of the VSIG list */
2493 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2494 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2495 &hw->blk[blk].xlt2.vsis[vsi];
2496 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2497 hw->blk[blk].xlt2.t[vsi] = vsig;
2503 * ice_prof_has_mask_idx - determine if profile index masking is identical
2504 * @hw: pointer to the hardware structure
2506 * @prof: profile to check
2507 * @idx: profile index to check
2508 * @masks: masks to match
2511 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
2514 bool expect_no_mask = false;
2519 /* If mask is 0x0000 or 0xffff, then there is no masking */
2520 if (mask == 0 || mask == 0xffff)
2521 expect_no_mask = true;
2523 /* Scan the enabled masks on this profile, for the specified idx */
2524 for (i = 0; i < ICE_PROFILE_MASK_COUNT; i++)
2525 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
2526 if (hw->blk[blk].masks.masks[i].in_use &&
2527 hw->blk[blk].masks.masks[i].idx == idx) {
2529 if (hw->blk[blk].masks.masks[i].mask == mask)
2534 if (expect_no_mask) {
2546 * ice_prof_has_mask - determine if profile masking is identical
2547 * @hw: pointer to the hardware structure
2549 * @prof: profile to check
2550 * @masks: masks to match
2553 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
2557 /* es->mask_ena[prof] will have the mask */
2558 for (i = 0; i < hw->blk[blk].es.fvw; i++)
2559 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
2566 * ice_find_prof_id_with_mask - find profile ID for a given field vector
2567 * @hw: pointer to the hardware structure
2569 * @fv: field vector to search for
2570 * @masks: masks for fv
2571 * @prof_id: receives the profile ID
2573 static enum ice_status
2574 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
2575 struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
2577 struct ice_es *es = &hw->blk[blk].es;
2580 for (i = 0; i < es->count; i++) {
2581 u16 off = i * es->fvw;
2584 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2587 /* check if masks settings are the same for this profile */
2588 if (!ice_prof_has_mask(hw, blk, i, masks))
2595 return ICE_ERR_DOES_NOT_EXIST;
2599 * ice_find_prof_id - find profile ID for a given field vector
2600 * @hw: pointer to the hardware structure
2602 * @fv: field vector to search for
2603 * @prof_id: receives the profile ID
2605 static enum ice_status
2606 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
2607 struct ice_fv_word *fv, u8 *prof_id)
2609 struct ice_es *es = &hw->blk[blk].es;
2612 for (i = 0; i < es->count; i++) {
2615 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2622 return ICE_ERR_DOES_NOT_EXIST;
2626 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
2627 * @blk: the block type
2628 * @rsrc_type: pointer to variable to receive the resource type
2630 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2634 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
2637 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
2640 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2643 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2646 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
2655 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
2656 * @blk: the block type
2657 * @rsrc_type: pointer to variable to receive the resource type
2659 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2663 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
2666 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
2669 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2672 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2675 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
2684 * ice_alloc_tcam_ent - allocate hardware TCAM entry
2685 * @hw: pointer to the HW struct
2686 * @blk: the block to allocate the TCAM for
2687 * @tcam_idx: pointer to variable to receive the TCAM entry
2689 * This function allocates a new entry in a Profile ID TCAM for a specific
2692 static enum ice_status
2693 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
2697 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2698 return ICE_ERR_PARAM;
2700 return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
2704 * ice_free_tcam_ent - free hardware TCAM entry
2705 * @hw: pointer to the HW struct
2706 * @blk: the block from which to free the TCAM entry
2707 * @tcam_idx: the TCAM entry to free
2709 * This function frees an entry in a Profile ID TCAM for a specific block.
2711 static enum ice_status
2712 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2716 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2717 return ICE_ERR_PARAM;
2719 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2723 * ice_alloc_prof_id - allocate profile ID
2724 * @hw: pointer to the HW struct
2725 * @blk: the block to allocate the profile ID for
2726 * @prof_id: pointer to variable to receive the profile ID
2728 * This function allocates a new profile ID, which also corresponds to a Field
2729 * Vector (Extraction Sequence) entry.
2731 static enum ice_status
2732 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2734 enum ice_status status;
2738 if (!ice_prof_id_rsrc_type(blk, &res_type))
2739 return ICE_ERR_PARAM;
2741 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2743 *prof_id = (u8)get_prof;
2749 * ice_free_prof_id - free profile ID
2750 * @hw: pointer to the HW struct
2751 * @blk: the block from which to free the profile ID
2752 * @prof_id: the profile ID to free
2754 * This function frees a profile ID, which also corresponds to a Field Vector.
2756 static enum ice_status
2757 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2759 u16 tmp_prof_id = (u16)prof_id;
2762 if (!ice_prof_id_rsrc_type(blk, &res_type))
2763 return ICE_ERR_PARAM;
2765 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2769 * ice_prof_inc_ref - increment reference count for profile
2770 * @hw: pointer to the HW struct
2771 * @blk: the block from which to free the profile ID
2772 * @prof_id: the profile ID for which to increment the reference count
2774 static enum ice_status
2775 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2777 if (prof_id > hw->blk[blk].es.count)
2778 return ICE_ERR_PARAM;
2780 hw->blk[blk].es.ref_count[prof_id]++;
2786 * ice_write_prof_mask_reg - write profile mask register
2787 * @hw: pointer to the HW struct
2788 * @blk: hardware block
2789 * @mask_idx: mask index
2790 * @idx: index of the FV which will use the mask
2791 * @mask: the 16-bit mask
2794 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
2802 offset = GLQF_HMASK(mask_idx);
2803 val = (idx << GLQF_HMASK_MSK_INDEX_S) &
2804 GLQF_HMASK_MSK_INDEX_M;
2805 val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
2808 offset = GLQF_FDMASK(mask_idx);
2809 val = (idx << GLQF_FDMASK_MSK_INDEX_S) &
2810 GLQF_FDMASK_MSK_INDEX_M;
2811 val |= (mask << GLQF_FDMASK_MASK_S) &
2815 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2820 wr32(hw, offset, val);
2821 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
2822 blk, idx, offset, val);
2826 * ice_write_prof_mask_enable_res - write profile mask enable register
2827 * @hw: pointer to the HW struct
2828 * @blk: hardware block
2829 * @prof_id: profile id
2830 * @enable_mask: enable mask
2833 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
2834 u16 prof_id, u32 enable_mask)
2840 offset = GLQF_HMASK_SEL(prof_id);
2843 offset = GLQF_FDMASK_SEL(prof_id);
2846 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2851 wr32(hw, offset, enable_mask);
2852 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
2853 blk, prof_id, offset, enable_mask);
2857 * ice_init_prof_masks - initial prof masks
2858 * @hw: pointer to the HW struct
2859 * @blk: hardware block
2861 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
2863 #define MAX_NUM_PORTS 8
2864 u16 num_ports = MAX_NUM_PORTS;
2867 ice_init_lock(&hw->blk[blk].masks.lock);
2869 hw->blk[blk].masks.count = ICE_PROFILE_MASK_COUNT / num_ports;
2870 hw->blk[blk].masks.first = hw->pf_id * hw->blk[blk].masks.count;
2872 ice_memset(hw->blk[blk].masks.masks, 0,
2873 sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM);
2875 for (i = hw->blk[blk].masks.first;
2876 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
2877 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
2881 * ice_init_all_prof_masks - initial all prof masks
2882 * @hw: pointer to the HW struct
2884 void ice_init_all_prof_masks(struct ice_hw *hw)
2886 ice_init_prof_masks(hw, ICE_BLK_RSS);
2887 ice_init_prof_masks(hw, ICE_BLK_FD);
2891 * ice_alloc_prof_mask - allocate profile mask
2892 * @hw: pointer to the HW struct
2893 * @blk: hardware block
2894 * @idx: index of FV which will use the mask
2895 * @mask: the 16-bit mask
2896 * @mask_idx: variable to receive the mask index
2898 static enum ice_status
2899 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
2902 bool found_unused = false, found_copy = false;
2903 enum ice_status status = ICE_ERR_MAX_LIMIT;
2904 u16 unused_idx = 0, copy_idx = 0;
2907 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2908 return ICE_ERR_PARAM;
2910 ice_acquire_lock(&hw->blk[blk].masks.lock);
2912 for (i = hw->blk[blk].masks.first;
2913 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
2914 if (hw->blk[blk].masks.masks[i].in_use) {
2915 /* if mask is in use and it exactly duplicates the
2916 * desired mask and index, then in can be reused
2918 if (hw->blk[blk].masks.masks[i].mask == mask &&
2919 hw->blk[blk].masks.masks[i].idx == idx) {
2925 /* save off unused index, but keep searching in case
2926 * there is an exact match later on
2928 if (!found_unused) {
2929 found_unused = true;
2936 else if (found_unused)
2939 goto err_ice_alloc_prof_mask;
2941 /* update mask for a new entry */
2943 hw->blk[blk].masks.masks[i].in_use = true;
2944 hw->blk[blk].masks.masks[i].mask = mask;
2945 hw->blk[blk].masks.masks[i].idx = idx;
2946 hw->blk[blk].masks.masks[i].ref = 0;
2947 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
2950 hw->blk[blk].masks.masks[i].ref++;
2952 status = ICE_SUCCESS;
2954 err_ice_alloc_prof_mask:
2955 ice_release_lock(&hw->blk[blk].masks.lock);
2961 * ice_free_prof_mask - free profile mask
2962 * @hw: pointer to the HW struct
2963 * @blk: hardware block
2964 * @mask_idx: index of mask
2966 static enum ice_status
2967 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
2969 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2970 return ICE_ERR_PARAM;
2972 if (!(mask_idx >= hw->blk[blk].masks.first &&
2973 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
2974 return ICE_ERR_DOES_NOT_EXIST;
2976 ice_acquire_lock(&hw->blk[blk].masks.lock);
2978 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
2979 goto exit_ice_free_prof_mask;
2981 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
2982 hw->blk[blk].masks.masks[mask_idx].ref--;
2983 goto exit_ice_free_prof_mask;
2987 hw->blk[blk].masks.masks[mask_idx].in_use = false;
2988 hw->blk[blk].masks.masks[mask_idx].mask = 0;
2989 hw->blk[blk].masks.masks[mask_idx].idx = 0;
2991 /* update mask as unused entry */
2992 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d", blk, mask_idx);
2993 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
2995 exit_ice_free_prof_mask:
2996 ice_release_lock(&hw->blk[blk].masks.lock);
3002 * ice_free_prof_masks - free all profile masks for a profile
3003 * @hw: pointer to the HW struct
3004 * @blk: hardware block
3005 * @prof_id: profile id
3007 static enum ice_status
3008 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
3013 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3014 return ICE_ERR_PARAM;
3016 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
3017 for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
3018 if (mask_bm & BIT(i))
3019 ice_free_prof_mask(hw, blk, i);
3025 * ice_shutdown_prof_masks - releases lock for masking
3026 * @hw: pointer to the HW struct
3027 * @blk: hardware block
3029 * This should be called before unloading the driver
3031 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
3035 ice_acquire_lock(&hw->blk[blk].masks.lock);
3037 for (i = hw->blk[blk].masks.first;
3038 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
3039 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3041 hw->blk[blk].masks.masks[i].in_use = false;
3042 hw->blk[blk].masks.masks[i].idx = 0;
3043 hw->blk[blk].masks.masks[i].mask = 0;
3046 ice_release_lock(&hw->blk[blk].masks.lock);
3047 ice_destroy_lock(&hw->blk[blk].masks.lock);
3051 * ice_shutdown_all_prof_masks - releases all locks for masking
3052 * @hw: pointer to the HW struct
3053 * @blk: hardware block
3055 * This should be called before unloading the driver
3057 void ice_shutdown_all_prof_masks(struct ice_hw *hw)
3059 ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
3060 ice_shutdown_prof_masks(hw, ICE_BLK_FD);
3064 * ice_update_prof_masking - set registers according to masking
3065 * @hw: pointer to the HW struct
3066 * @blk: hardware block
3067 * @prof_id: profile id
3071 static enum ice_status
3072 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
3073 struct ice_fv_word *es, u16 *masks)
3080 /* Only support FD and RSS masking, otherwise nothing to be done */
3081 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3084 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3085 if (masks[i] && masks[i] != 0xFFFF) {
3086 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
3087 ena_mask |= BIT(idx);
3089 /* not enough bitmaps */
3096 /* free any bitmaps we have allocated */
3097 for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
3098 if (ena_mask & BIT(i))
3099 ice_free_prof_mask(hw, blk, i);
3101 return ICE_ERR_OUT_OF_RANGE;
3104 /* enable the masks for this profile */
3105 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
3107 /* store enabled masks with profile so that they can be freed later */
3108 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
3114 * ice_write_es - write an extraction sequence to hardware
3115 * @hw: pointer to the HW struct
3116 * @blk: the block in which to write the extraction sequence
3117 * @prof_id: the profile ID to write
3118 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
3121 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3122 struct ice_fv_word *fv)
3126 off = prof_id * hw->blk[blk].es.fvw;
3128 ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
3129 sizeof(*fv), ICE_NONDMA_MEM);
3130 hw->blk[blk].es.written[prof_id] = false;
3132 ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
3133 sizeof(*fv), ICE_NONDMA_TO_NONDMA);
3138 * ice_prof_dec_ref - decrement reference count for profile
3139 * @hw: pointer to the HW struct
3140 * @blk: the block from which to free the profile ID
3141 * @prof_id: the profile ID for which to decrement the reference count
3143 static enum ice_status
3144 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3146 if (prof_id > hw->blk[blk].es.count)
3147 return ICE_ERR_PARAM;
3149 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3150 if (!--hw->blk[blk].es.ref_count[prof_id]) {
3151 ice_write_es(hw, blk, prof_id, NULL);
3152 ice_free_prof_masks(hw, blk, prof_id);
3153 return ice_free_prof_id(hw, blk, prof_id);
3160 /* Block / table section IDs */
3161 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3165 ICE_SID_PROFID_TCAM_SW,
3166 ICE_SID_PROFID_REDIR_SW,
3173 ICE_SID_PROFID_TCAM_ACL,
3174 ICE_SID_PROFID_REDIR_ACL,
3181 ICE_SID_PROFID_TCAM_FD,
3182 ICE_SID_PROFID_REDIR_FD,
3189 ICE_SID_PROFID_TCAM_RSS,
3190 ICE_SID_PROFID_REDIR_RSS,
3197 ICE_SID_PROFID_TCAM_PE,
3198 ICE_SID_PROFID_REDIR_PE,
3204 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3205 * @hw: pointer to the hardware structure
3206 * @blk: the HW block to initialize
3209 void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3213 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3216 ptg = hw->blk[blk].xlt1.t[pt];
3217 if (ptg != ICE_DEFAULT_PTG) {
3218 ice_ptg_alloc_val(hw, blk, ptg);
3219 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3225 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3226 * @hw: pointer to the hardware structure
3227 * @blk: the HW block to initialize
3229 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3233 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3236 vsig = hw->blk[blk].xlt2.t[vsi];
3238 ice_vsig_alloc_val(hw, blk, vsig);
3239 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3240 /* no changes at this time, since this has been
3241 * initialized from the original package
3243 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3249 * ice_init_sw_db - init software database from HW tables
3250 * @hw: pointer to the hardware structure
3252 static void ice_init_sw_db(struct ice_hw *hw)
3256 for (i = 0; i < ICE_BLK_COUNT; i++) {
3257 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3258 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3263 * ice_fill_tbl - Reads content of a single table type into database
3264 * @hw: pointer to the hardware structure
3265 * @block_id: Block ID of the table to copy
3266 * @sid: Section ID of the table to copy
3268 * Will attempt to read the entire content of a given table of a single block
3269 * into the driver database. We assume that the buffer will always
3270 * be as large or larger than the data contained in the package. If
3271 * this condition is not met, there is most likely an error in the package
3274 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3276 u32 dst_len, sect_len, offset = 0;
3277 struct ice_prof_redir_section *pr;
3278 struct ice_prof_id_section *pid;
3279 struct ice_xlt1_section *xlt1;
3280 struct ice_xlt2_section *xlt2;
3281 struct ice_sw_fv_section *es;
3282 struct ice_pkg_enum state;
3286 /* if the HW segment pointer is null then the first iteration of
3287 * ice_pkg_enum_section() will fail. In this case the Hw tables will
3288 * not be filled and return success.
3291 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3295 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
3297 sect = ice_pkg_enum_section(hw->seg, &state, sid);
3301 case ICE_SID_XLT1_SW:
3302 case ICE_SID_XLT1_FD:
3303 case ICE_SID_XLT1_RSS:
3304 case ICE_SID_XLT1_ACL:
3305 case ICE_SID_XLT1_PE:
3306 xlt1 = (struct ice_xlt1_section *)sect;
3308 sect_len = LE16_TO_CPU(xlt1->count) *
3309 sizeof(*hw->blk[block_id].xlt1.t);
3310 dst = hw->blk[block_id].xlt1.t;
3311 dst_len = hw->blk[block_id].xlt1.count *
3312 sizeof(*hw->blk[block_id].xlt1.t);
3314 case ICE_SID_XLT2_SW:
3315 case ICE_SID_XLT2_FD:
3316 case ICE_SID_XLT2_RSS:
3317 case ICE_SID_XLT2_ACL:
3318 case ICE_SID_XLT2_PE:
3319 xlt2 = (struct ice_xlt2_section *)sect;
3320 src = (_FORCE_ u8 *)xlt2->value;
3321 sect_len = LE16_TO_CPU(xlt2->count) *
3322 sizeof(*hw->blk[block_id].xlt2.t);
3323 dst = (u8 *)hw->blk[block_id].xlt2.t;
3324 dst_len = hw->blk[block_id].xlt2.count *
3325 sizeof(*hw->blk[block_id].xlt2.t);
3327 case ICE_SID_PROFID_TCAM_SW:
3328 case ICE_SID_PROFID_TCAM_FD:
3329 case ICE_SID_PROFID_TCAM_RSS:
3330 case ICE_SID_PROFID_TCAM_ACL:
3331 case ICE_SID_PROFID_TCAM_PE:
3332 pid = (struct ice_prof_id_section *)sect;
3333 src = (u8 *)pid->entry;
3334 sect_len = LE16_TO_CPU(pid->count) *
3335 sizeof(*hw->blk[block_id].prof.t);
3336 dst = (u8 *)hw->blk[block_id].prof.t;
3337 dst_len = hw->blk[block_id].prof.count *
3338 sizeof(*hw->blk[block_id].prof.t);
3340 case ICE_SID_PROFID_REDIR_SW:
3341 case ICE_SID_PROFID_REDIR_FD:
3342 case ICE_SID_PROFID_REDIR_RSS:
3343 case ICE_SID_PROFID_REDIR_ACL:
3344 case ICE_SID_PROFID_REDIR_PE:
3345 pr = (struct ice_prof_redir_section *)sect;
3346 src = pr->redir_value;
3347 sect_len = LE16_TO_CPU(pr->count) *
3348 sizeof(*hw->blk[block_id].prof_redir.t);
3349 dst = hw->blk[block_id].prof_redir.t;
3350 dst_len = hw->blk[block_id].prof_redir.count *
3351 sizeof(*hw->blk[block_id].prof_redir.t);
3353 case ICE_SID_FLD_VEC_SW:
3354 case ICE_SID_FLD_VEC_FD:
3355 case ICE_SID_FLD_VEC_RSS:
3356 case ICE_SID_FLD_VEC_ACL:
3357 case ICE_SID_FLD_VEC_PE:
3358 es = (struct ice_sw_fv_section *)sect;
3360 sect_len = (u32)(LE16_TO_CPU(es->count) *
3361 hw->blk[block_id].es.fvw) *
3362 sizeof(*hw->blk[block_id].es.t);
3363 dst = (u8 *)hw->blk[block_id].es.t;
3364 dst_len = (u32)(hw->blk[block_id].es.count *
3365 hw->blk[block_id].es.fvw) *
3366 sizeof(*hw->blk[block_id].es.t);
3372 /* if the section offset exceeds destination length, terminate
3375 if (offset > dst_len)
3378 /* if the sum of section size and offset exceed destination size
3379 * then we are out of bounds of the Hw table size for that PF.
3380 * Changing section length to fill the remaining table space
3383 if ((offset + sect_len) > dst_len)
3384 sect_len = dst_len - offset;
3386 ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
3388 sect = ice_pkg_enum_section(NULL, &state, sid);
3393 * ice_fill_blk_tbls - Read package context for tables
3394 * @hw: pointer to the hardware structure
3396 * Reads the current package contents and populates the driver
3397 * database with the data iteratively for all advanced feature
3398 * blocks. Assume that the Hw tables have been allocated.
3400 void ice_fill_blk_tbls(struct ice_hw *hw)
3404 for (i = 0; i < ICE_BLK_COUNT; i++) {
3405 enum ice_block blk_id = (enum ice_block)i;
3407 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3408 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3409 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3410 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3411 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3418 * ice_free_prof_map - free profile map
3419 * @hw: pointer to the hardware structure
3420 * @blk_idx: HW block index
3422 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3424 struct ice_es *es = &hw->blk[blk_idx].es;
3425 struct ice_prof_map *del, *tmp;
3427 ice_acquire_lock(&es->prof_map_lock);
3428 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
3429 ice_prof_map, list) {
3430 LIST_DEL(&del->list);
3433 INIT_LIST_HEAD(&es->prof_map);
3434 ice_release_lock(&es->prof_map_lock);
3438 * ice_free_flow_profs - free flow profile entries
3439 * @hw: pointer to the hardware structure
3440 * @blk_idx: HW block index
3442 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3444 struct ice_flow_prof *p, *tmp;
3446 ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
3447 LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
3448 ice_flow_prof, l_entry) {
3449 struct ice_flow_entry *e, *t;
3451 LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
3452 ice_flow_entry, l_entry)
3453 ice_flow_rem_entry(hw, ICE_FLOW_ENTRY_HNDL(e));
3455 LIST_DEL(&p->l_entry);
3457 ice_free(hw, p->acts);
3460 ice_release_lock(&hw->fl_profs_locks[blk_idx]);
3462 /* if driver is in reset and tables are being cleared
3463 * re-initialize the flow profile list heads
3465 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3469 * ice_free_vsig_tbl - free complete VSIG table entries
3470 * @hw: pointer to the hardware structure
3471 * @blk: the HW block on which to free the VSIG table entries
3473 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3477 if (!hw->blk[blk].xlt2.vsig_tbl)
3480 for (i = 1; i < ICE_MAX_VSIGS; i++)
3481 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3482 ice_vsig_free(hw, blk, i);
3486 * ice_free_hw_tbls - free hardware table memory
3487 * @hw: pointer to the hardware structure
3489 void ice_free_hw_tbls(struct ice_hw *hw)
3491 struct ice_rss_cfg *r, *rt;
3494 for (i = 0; i < ICE_BLK_COUNT; i++) {
3495 if (hw->blk[i].is_list_init) {
3496 struct ice_es *es = &hw->blk[i].es;
3498 ice_free_prof_map(hw, i);
3499 ice_destroy_lock(&es->prof_map_lock);
3500 ice_free_flow_profs(hw, i);
3501 ice_destroy_lock(&hw->fl_profs_locks[i]);
3503 hw->blk[i].is_list_init = false;
3505 ice_free_vsig_tbl(hw, (enum ice_block)i);
3506 ice_free(hw, hw->blk[i].xlt1.ptypes);
3507 ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
3508 ice_free(hw, hw->blk[i].xlt1.t);
3509 ice_free(hw, hw->blk[i].xlt2.t);
3510 ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
3511 ice_free(hw, hw->blk[i].xlt2.vsis);
3512 ice_free(hw, hw->blk[i].prof.t);
3513 ice_free(hw, hw->blk[i].prof_redir.t);
3514 ice_free(hw, hw->blk[i].es.t);
3515 ice_free(hw, hw->blk[i].es.ref_count);
3516 ice_free(hw, hw->blk[i].es.written);
3517 ice_free(hw, hw->blk[i].es.mask_ena);
3520 LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
3521 ice_rss_cfg, l_entry) {
3522 LIST_DEL(&r->l_entry);
3525 ice_destroy_lock(&hw->rss_locks);
3526 ice_shutdown_all_prof_masks(hw);
3527 ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
3531 * ice_init_flow_profs - init flow profile locks and list heads
3532 * @hw: pointer to the hardware structure
3533 * @blk_idx: HW block index
3535 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3537 ice_init_lock(&hw->fl_profs_locks[blk_idx]);
3538 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3542 * ice_init_hw_tbls - init hardware table memory
3543 * @hw: pointer to the hardware structure
3545 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3549 ice_init_lock(&hw->rss_locks);
3550 INIT_LIST_HEAD(&hw->rss_list_head);
3551 ice_init_all_prof_masks(hw);
3552 for (i = 0; i < ICE_BLK_COUNT; i++) {
3553 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3554 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3555 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3556 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3557 struct ice_es *es = &hw->blk[i].es;
3560 if (hw->blk[i].is_list_init)
3563 ice_init_flow_profs(hw, i);
3564 ice_init_lock(&es->prof_map_lock);
3565 INIT_LIST_HEAD(&es->prof_map);
3566 hw->blk[i].is_list_init = true;
3568 hw->blk[i].overwrite = blk_sizes[i].overwrite;
3569 es->reverse = blk_sizes[i].reverse;
3571 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3572 xlt1->count = blk_sizes[i].xlt1;
3574 xlt1->ptypes = (struct ice_ptg_ptype *)
3575 ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
3580 xlt1->ptg_tbl = (struct ice_ptg_entry *)
3581 ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
3586 xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
3590 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3591 xlt2->count = blk_sizes[i].xlt2;
3593 xlt2->vsis = (struct ice_vsig_vsi *)
3594 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
3599 xlt2->vsig_tbl = (struct ice_vsig_entry *)
3600 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
3601 if (!xlt2->vsig_tbl)
3604 for (j = 0; j < xlt2->count; j++)
3605 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3607 xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
3611 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3612 prof->count = blk_sizes[i].prof_tcam;
3613 prof->max_prof_id = blk_sizes[i].prof_id;
3614 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3615 prof->t = (struct ice_prof_tcam_entry *)
3616 ice_calloc(hw, prof->count, sizeof(*prof->t));
3621 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3622 prof_redir->count = blk_sizes[i].prof_redir;
3623 prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
3624 sizeof(*prof_redir->t));
3629 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3630 es->count = blk_sizes[i].es;
3631 es->fvw = blk_sizes[i].fvw;
3632 es->t = (struct ice_fv_word *)
3633 ice_calloc(hw, (u32)(es->count * es->fvw),
3638 es->ref_count = (u16 *)
3639 ice_calloc(hw, es->count, sizeof(*es->ref_count));
3641 es->written = (u8 *)
3642 ice_calloc(hw, es->count, sizeof(*es->written));
3643 es->mask_ena = (u32 *)
3644 ice_calloc(hw, es->count, sizeof(*es->mask_ena));
3651 ice_free_hw_tbls(hw);
3652 return ICE_ERR_NO_MEMORY;
3656 * ice_prof_gen_key - generate profile ID key
3657 * @hw: pointer to the HW struct
3658 * @blk: the block in which to write profile ID to
3659 * @ptg: packet type group (PTG) portion of key
3660 * @vsig: VSIG portion of key
3661 * @cdid: cdid portion of key
3662 * @flags: flag portion of key
3663 * @vl_msk: valid mask
3664 * @dc_msk: don't care mask
3665 * @nm_msk: never match mask
3666 * @key: output of profile ID key
3668 static enum ice_status
3669 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3670 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3671 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3672 u8 key[ICE_TCAM_KEY_SZ])
3674 struct ice_prof_id_key inkey;
3677 inkey.xlt2_cdid = CPU_TO_LE16(vsig);
3678 inkey.flags = CPU_TO_LE16(flags);
3680 switch (hw->blk[blk].prof.cdid_bits) {
3684 #define ICE_CD_2_M 0xC000U
3685 #define ICE_CD_2_S 14
3686 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
3687 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
3690 #define ICE_CD_4_M 0xF000U
3691 #define ICE_CD_4_S 12
3692 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
3693 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
3696 #define ICE_CD_8_M 0xFF00U
3697 #define ICE_CD_8_S 16
3698 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
3699 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
3702 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3706 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3707 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3711 * ice_tcam_write_entry - write TCAM entry
3712 * @hw: pointer to the HW struct
3713 * @blk: the block in which to write profile ID to
3714 * @idx: the entry index to write to
3715 * @prof_id: profile ID
3716 * @ptg: packet type group (PTG) portion of key
3717 * @vsig: VSIG portion of key
3718 * @cdid: cdid portion of key
3719 * @flags: flag portion of key
3720 * @vl_msk: valid mask
3721 * @dc_msk: don't care mask
3722 * @nm_msk: never match mask
3724 static enum ice_status
3725 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3726 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3727 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3728 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3729 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3731 struct ice_prof_tcam_entry;
3732 enum ice_status status;
3734 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3735 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3737 hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
3738 hw->blk[blk].prof.t[idx].prof_id = prof_id;
3745 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
3746 * @hw: pointer to the hardware structure
3748 * @vsig: VSIG to query
3749 * @refs: pointer to variable to receive the reference count
3751 static enum ice_status
3752 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3754 u16 idx = vsig & ICE_VSIG_IDX_M;
3755 struct ice_vsig_vsi *ptr;
3758 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3759 return ICE_ERR_DOES_NOT_EXIST;
3761 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3764 ptr = ptr->next_vsi;
3771 * ice_get_ptg - get or allocate a ptg for a ptype
3772 * @hw: pointer to the hardware structure
3774 * @ptype: the ptype to retrieve the PTG for
3775 * @ptg: receives the PTG of the ptype
3776 * @add: receive boolean indicating whether PTG was added or not
3778 static enum ice_status
3779 ice_get_ptg(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg,
3782 enum ice_status status;
3784 *ptg = ICE_DEFAULT_PTG;
3787 status = ice_ptg_find_ptype(hw, blk, ptype, ptg);
3791 if (*ptg == ICE_DEFAULT_PTG) {
3792 /* need to allocate a PTG, and add ptype to it */
3793 *ptg = ice_ptg_alloc(hw, blk);
3794 if (*ptg == ICE_DEFAULT_PTG)
3795 return ICE_ERR_HW_TABLE;
3797 status = ice_ptg_add_mv_ptype(hw, blk, ptype, *ptg);
3799 return ICE_ERR_HW_TABLE;
3808 * ice_has_prof_vsig - check to see if VSIG has a specific profile
3809 * @hw: pointer to the hardware structure
3811 * @vsig: VSIG to check against
3812 * @hdl: profile handle
3815 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3817 u16 idx = vsig & ICE_VSIG_IDX_M;
3818 struct ice_vsig_prof *ent;
3820 LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3821 ice_vsig_prof, list) {
3822 if (ent->profile_cookie == hdl)
3826 ice_debug(hw, ICE_DBG_INIT,
3827 "Characteristic list for VSI group %d not found.\n",
3833 * ice_prof_bld_es - build profile ID extraction sequence changes
3834 * @hw: pointer to the HW struct
3835 * @blk: hardware block
3836 * @bld: the update package buffer build to add to
3837 * @chgs: the list of changes to make in hardware
3839 static enum ice_status
3840 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3841 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
3843 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3844 struct ice_chs_chg *tmp;
3846 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
3847 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3848 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3849 struct ice_pkg_es *p;
3852 id = ice_sect_id(blk, ICE_VEC_TBL);
3853 p = (struct ice_pkg_es *)
3854 ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
3859 return ICE_ERR_MAX_LIMIT;
3861 p->count = CPU_TO_LE16(1);
3862 p->offset = CPU_TO_LE16(tmp->prof_id);
3864 ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
3865 ICE_NONDMA_TO_NONDMA);
3873 * ice_prof_bld_tcam - build profile ID TCAM changes
3874 * @hw: pointer to the HW struct
3875 * @blk: hardware block
3876 * @bld: the update package buffer build to add to
3877 * @chgs: the list of changes to make in hardware
3879 static enum ice_status
3880 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
3881 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
3883 struct ice_chs_chg *tmp;
3885 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
3886 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
3887 struct ice_prof_id_section *p;
3890 id = ice_sect_id(blk, ICE_PROF_TCAM);
3891 p = (struct ice_prof_id_section *)
3892 ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
3895 return ICE_ERR_MAX_LIMIT;
3897 p->count = CPU_TO_LE16(1);
3898 p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
3899 p->entry[0].prof_id = tmp->prof_id;
3901 ice_memcpy(p->entry[0].key,
3902 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
3903 sizeof(hw->blk[blk].prof.t->key),
3904 ICE_NONDMA_TO_NONDMA);
3912 * ice_prof_bld_xlt1 - build XLT1 changes
3913 * @blk: hardware block
3914 * @bld: the update package buffer build to add to
3915 * @chgs: the list of changes to make in hardware
3917 static enum ice_status
3918 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
3919 struct LIST_HEAD_TYPE *chgs)
3921 struct ice_chs_chg *tmp;
3923 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
3924 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
3925 struct ice_xlt1_section *p;
3928 id = ice_sect_id(blk, ICE_XLT1);
3929 p = (struct ice_xlt1_section *)
3930 ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
3933 return ICE_ERR_MAX_LIMIT;
3935 p->count = CPU_TO_LE16(1);
3936 p->offset = CPU_TO_LE16(tmp->ptype);
3937 p->value[0] = tmp->ptg;
3945 * ice_prof_bld_xlt2 - build XLT2 changes
3946 * @blk: hardware block
3947 * @bld: the update package buffer build to add to
3948 * @chgs: the list of changes to make in hardware
3950 static enum ice_status
3951 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
3952 struct LIST_HEAD_TYPE *chgs)
3954 struct ice_chs_chg *tmp;
3956 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
3959 if (tmp->type == ICE_VSIG_ADD)
3961 else if (tmp->type == ICE_VSI_MOVE)
3963 else if (tmp->type == ICE_VSIG_REM)
3967 struct ice_xlt2_section *p;
3970 id = ice_sect_id(blk, ICE_XLT2);
3971 p = (struct ice_xlt2_section *)
3972 ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
3975 return ICE_ERR_MAX_LIMIT;
3977 p->count = CPU_TO_LE16(1);
3978 p->offset = CPU_TO_LE16(tmp->vsi);
3979 p->value[0] = CPU_TO_LE16(tmp->vsig);
3987 * ice_upd_prof_hw - update hardware using the change list
3988 * @hw: pointer to the HW struct
3989 * @blk: hardware block
3990 * @chgs: the list of changes to make in hardware
3992 static enum ice_status
3993 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
3994 struct LIST_HEAD_TYPE *chgs)
3996 struct ice_buf_build *b;
3997 struct ice_chs_chg *tmp;
3998 enum ice_status status;
4006 /* count number of sections we need */
4007 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4008 switch (tmp->type) {
4009 case ICE_PTG_ES_ADD:
4027 sects = xlt1 + xlt2 + tcam + es;
4032 /* Build update package buffer */
4033 b = ice_pkg_buf_alloc(hw);
4035 return ICE_ERR_NO_MEMORY;
4037 status = ice_pkg_buf_reserve_section(b, sects);
4041 /* Preserve order of table update: ES, TCAM, PTG, VSIG */
4043 status = ice_prof_bld_es(hw, blk, b, chgs);
4049 status = ice_prof_bld_tcam(hw, blk, b, chgs);
4055 status = ice_prof_bld_xlt1(blk, b, chgs);
4061 status = ice_prof_bld_xlt2(blk, b, chgs);
4066 /* After package buffer build check if the section count in buffer is
4067 * non-zero and matches the number of sections detected for package
4070 pkg_sects = ice_pkg_buf_get_active_sections(b);
4071 if (!pkg_sects || pkg_sects != sects) {
4072 status = ICE_ERR_INVAL_SIZE;
4076 /* update package */
4077 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4078 if (status == ICE_ERR_AQ_ERROR)
4079 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile.");
4082 ice_pkg_buf_free(hw, b);
4087 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
4088 * @hw: pointer to the HW struct
4089 * @prof_id: profile ID
4090 * @mask_sel: mask select
4092 * This function enable any of the masks selected by the mask select parameter
4093 * for the profile specified.
4095 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
4097 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
4099 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
4100 GLQF_FDMASK_SEL(prof_id), mask_sel);
4103 #define ICE_SRC_DST_MAX_COUNT 8
4105 struct ice_fd_src_dst_pair {
4111 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
4112 /* These are defined in pairs */
4113 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
4114 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
4116 { ICE_PROT_IPV4_IL, 2, 12 },
4117 { ICE_PROT_IPV4_IL, 2, 16 },
4119 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
4120 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
4122 { ICE_PROT_IPV6_IL, 8, 8 },
4123 { ICE_PROT_IPV6_IL, 8, 24 },
4125 { ICE_PROT_TCP_IL, 1, 0 },
4126 { ICE_PROT_TCP_IL, 1, 2 },
4128 { ICE_PROT_UDP_OF, 1, 0 },
4129 { ICE_PROT_UDP_OF, 1, 2 },
4131 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
4132 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
4134 { ICE_PROT_SCTP_IL, 1, 0 },
4135 { ICE_PROT_SCTP_IL, 1, 2 }
4138 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
4141 * ice_update_fd_swap - set register appropriately for a FD FV extraction
4142 * @hw: pointer to the HW struct
4143 * @prof_id: profile ID
4144 * @es: extraction sequence (length of array is determined by the block)
4146 static enum ice_status
4147 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
4149 ice_declare_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4150 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
4151 #define ICE_FD_FV_NOT_FOUND (-2)
4152 s8 first_free = ICE_FD_FV_NOT_FOUND;
4153 u8 used[ICE_MAX_FV_WORDS] = { 0 };
4158 ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4160 ice_init_fd_mask_regs(hw);
4162 /* This code assumes that the Flow Director field vectors are assigned
4163 * from the end of the FV indexes working towards the zero index, that
4164 * only complete fields will be included and will be consecutive, and
4165 * that there are no gaps between valid indexes.
4168 /* Determine swap fields present */
4169 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
4170 /* Find the first free entry, assuming right to left population.
4171 * This is where we can start adding additional pairs if needed.
4173 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4177 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) {
4178 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4179 es[i].off == ice_fd_pairs[j].off) {
4180 ice_set_bit(j, pair_list);
4186 orig_free = first_free;
4188 /* determine missing swap fields that need to be added */
4189 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4190 u8 bit1 = ice_is_bit_set(pair_list, i + 1);
4191 u8 bit0 = ice_is_bit_set(pair_list, i);
4196 /* add the appropriate 'paired' entry */
4202 /* check for room */
4203 if (first_free + 1 < ice_fd_pairs[index].count)
4204 return ICE_ERR_MAX_LIMIT;
4206 /* place in extraction sequence */
4207 for (k = 0; k < ice_fd_pairs[index].count; k++) {
4208 es[first_free - k].prot_id =
4209 ice_fd_pairs[index].prot_id;
4210 es[first_free - k].off =
4211 ice_fd_pairs[index].off + (k * 2);
4213 /* keep track of non-relevant fields */
4214 mask_sel |= 1 << (first_free - k);
4217 pair_start[index] = first_free;
4218 first_free -= ice_fd_pairs[index].count;
4222 /* fill in the swap array */
4223 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4225 u8 indexes_used = 1;
4227 /* assume flat at this index */
4228 #define ICE_SWAP_VALID 0x80
4229 used[si] = si | ICE_SWAP_VALID;
4231 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4236 /* check for a swap location */
4237 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) {
4238 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4239 es[si].off == ice_fd_pairs[j].off) {
4242 /* determine the appropriate matching field */
4243 idx = j + ((j % 2) ? -1 : 1);
4245 indexes_used = ice_fd_pairs[idx].count;
4246 for (k = 0; k < indexes_used; k++) {
4247 used[si - k] = (pair_start[idx] - k) |
4258 /* for each set of 4 swap indexes, write the appropriate register */
4259 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4262 for (k = 0; k < 4; k++) {
4267 raw_entry |= used[idx] << (k * BITS_PER_BYTE);
4270 /* write the appropriate register set, based on HW block */
4271 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_entry);
4273 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %x\n",
4274 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_entry);
4277 /* update the masks for this profile to be sure we ignore fields that
4278 * are not relevant to our match criteria
4280 ice_update_fd_mask(hw, prof_id, mask_sel);
4286 * ice_add_prof_with_mask - add profile
4287 * @hw: pointer to the HW struct
4288 * @blk: hardware block
4289 * @id: profile tracking ID
4290 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4291 * @es: extraction sequence (length of array is determined by the block)
4292 * @masks: extraction sequence (length of array is determined by the block)
4294 * This function registers a profile, which matches a set of PTYPES with a
4295 * particular extraction sequence. While the hardware profile is allocated
4296 * it will not be written until the first call to ice_add_flow that specifies
4297 * the ID value used here.
4300 ice_add_prof_with_mask(struct ice_hw *hw, enum ice_block blk, u64 id,
4301 u8 ptypes[], struct ice_fv_word *es, u16 *masks)
4303 u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4304 struct ice_prof_map *prof;
4305 enum ice_status status;
4309 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4311 /* search for existing profile */
4312 status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4314 /* allocate profile ID */
4315 status = ice_alloc_prof_id(hw, blk, &prof_id);
4317 goto err_ice_add_prof;
4318 if (blk == ICE_BLK_FD) {
4319 /* For Flow Director block, the extraction sequence may
4320 * need to be altered in the case where there are paired
4321 * fields that have no match. This is necessary because
4322 * for Flow Director, src and dest fields need to paired
4323 * for filter programming and these values are swapped
4326 status = ice_update_fd_swap(hw, prof_id, es);
4328 goto err_ice_add_prof;
4330 status = ice_update_prof_masking(hw, blk, prof_id, es, masks);
4332 goto err_ice_add_prof;
4334 /* and write new es */
4335 ice_write_es(hw, blk, prof_id, es);
4338 ice_prof_inc_ref(hw, blk, prof_id);
4340 /* add profile info */
4342 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
4344 goto err_ice_add_prof;
4346 prof->profile_cookie = id;
4347 prof->prof_id = prof_id;
4348 prof->ptype_count = 0;
4351 /* build list of ptgs */
4352 while (bytes && prof->ptype_count < ICE_MAX_PTYPE_PER_PROFILE) {
4355 if (!ptypes[byte]) {
4360 /* Examine 8 bits per byte */
4361 for (bit = 0; bit < 8; bit++) {
4362 if (ptypes[byte] & BIT(bit)) {
4366 ptype = byte * BITS_PER_BYTE + bit;
4367 if (ptype < ICE_FLOW_PTYPE_MAX) {
4368 prof->ptype[prof->ptype_count] = ptype;
4370 if (++prof->ptype_count >=
4371 ICE_MAX_PTYPE_PER_PROFILE)
4375 /* nothing left in byte, then exit */
4376 m = ~((1 << (bit + 1)) - 1);
4377 if (!(ptypes[byte] & m))
4386 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
4387 status = ICE_SUCCESS;
4390 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4395 * ice_add_prof - add profile
4396 * @hw: pointer to the HW struct
4397 * @blk: hardware block
4398 * @id: profile tracking ID
4399 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4400 * @es: extraction sequence (length of array is determined by the block)
4402 * This function registers a profile, which matches a set of PTYPES with a
4403 * particular extraction sequence. While the hardware profile is allocated
4404 * it will not be written until the first call to ice_add_flow that specifies
4405 * the ID value used here.
4408 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4409 struct ice_fv_word *es)
4411 u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4412 struct ice_prof_map *prof;
4413 enum ice_status status;
4417 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4419 /* search for existing profile */
4420 status = ice_find_prof_id(hw, blk, es, &prof_id);
4422 /* allocate profile ID */
4423 status = ice_alloc_prof_id(hw, blk, &prof_id);
4425 goto err_ice_add_prof;
4426 if (blk == ICE_BLK_FD) {
4427 /* For Flow Director block, the extraction sequence may
4428 * need to be altered in the case where there are paired
4429 * fields that have no match. This is necessary because
4430 * for Flow Director, src and dest fields need to paired
4431 * for filter programming and these values are swapped
4434 status = ice_update_fd_swap(hw, prof_id, es);
4436 goto err_ice_add_prof;
4439 /* and write new es */
4440 ice_write_es(hw, blk, prof_id, es);
4443 ice_prof_inc_ref(hw, blk, prof_id);
4445 /* add profile info */
4447 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
4449 goto err_ice_add_prof;
4451 prof->profile_cookie = id;
4452 prof->prof_id = prof_id;
4453 prof->ptype_count = 0;
4456 /* build list of ptgs */
4457 while (bytes && prof->ptype_count < ICE_MAX_PTYPE_PER_PROFILE) {
4460 if (!ptypes[byte]) {
4465 /* Examine 8 bits per byte */
4466 for (bit = 0; bit < 8; bit++) {
4467 if (ptypes[byte] & 1 << bit) {
4471 ptype = byte * BITS_PER_BYTE + bit;
4472 if (ptype < ICE_FLOW_PTYPE_MAX) {
4473 prof->ptype[prof->ptype_count] = ptype;
4475 if (++prof->ptype_count >=
4476 ICE_MAX_PTYPE_PER_PROFILE)
4480 /* nothing left in byte, then exit */
4481 m = ~((1 << (bit + 1)) - 1);
4482 if (!(ptypes[byte] & m))
4491 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
4492 status = ICE_SUCCESS;
4495 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4500 * ice_search_prof_id_low - Search for a profile tracking ID low level
4501 * @hw: pointer to the HW struct
4502 * @blk: hardware block
4503 * @id: profile tracking ID
4505 * This will search for a profile tracking ID which was previously added. This
4506 * version assumes that the caller has already acquired the prof map lock.
4508 static struct ice_prof_map *
4509 ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
4511 struct ice_prof_map *entry = NULL;
4512 struct ice_prof_map *map;
4514 LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map,
4516 if (map->profile_cookie == id) {
4526 * ice_search_prof_id - Search for a profile tracking ID
4527 * @hw: pointer to the HW struct
4528 * @blk: hardware block
4529 * @id: profile tracking ID
4531 * This will search for a profile tracking ID which was previously added.
4533 struct ice_prof_map *
4534 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4536 struct ice_prof_map *entry;
4538 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4539 entry = ice_search_prof_id_low(hw, blk, id);
4540 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4546 * ice_vsig_prof_id_count - count profiles in a VSIG
4547 * @hw: pointer to the HW struct
4548 * @blk: hardware block
4549 * @vsig: VSIG to remove the profile from
4552 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4554 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4555 struct ice_vsig_prof *p;
4557 LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4558 ice_vsig_prof, list) {
4566 * ice_rel_tcam_idx - release a TCAM index
4567 * @hw: pointer to the HW struct
4568 * @blk: hardware block
4569 * @idx: the index to release
4571 static enum ice_status
4572 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4574 /* Masks to invoke a never match entry */
4575 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4576 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4577 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4578 enum ice_status status;
4580 /* write the TCAM entry */
4581 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4586 /* release the TCAM entry */
4587 status = ice_free_tcam_ent(hw, blk, idx);
4593 * ice_rem_prof_id - remove one profile from a VSIG
4594 * @hw: pointer to the HW struct
4595 * @blk: hardware block
4596 * @prof: pointer to profile structure to remove
4598 static enum ice_status
4599 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4600 struct ice_vsig_prof *prof)
4602 enum ice_status status;
4605 for (i = 0; i < prof->tcam_count; i++) {
4606 prof->tcam[i].in_use = false;
4607 status = ice_rel_tcam_idx(hw, blk, prof->tcam[i].tcam_idx);
4609 return ICE_ERR_HW_TABLE;
4616 * ice_rem_vsig - remove VSIG
4617 * @hw: pointer to the HW struct
4618 * @blk: hardware block
4619 * @vsig: the VSIG to remove
4620 * @chg: the change list
4622 static enum ice_status
4623 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4624 struct LIST_HEAD_TYPE *chg)
4626 u16 idx = vsig & ICE_VSIG_IDX_M;
4627 struct ice_vsig_vsi *vsi_cur;
4628 struct ice_vsig_prof *d, *t;
4629 enum ice_status status;
4631 /* remove TCAM entries */
4632 LIST_FOR_EACH_ENTRY_SAFE(d, t,
4633 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4634 ice_vsig_prof, list) {
4635 status = ice_rem_prof_id(hw, blk, d);
4643 /* Move all VSIS associated with this VSIG to the default VSIG */
4644 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4645 /* If the VSIG has at least 1 VSI then iterate through the list
4646 * and remove the VSIs before deleting the group.
4650 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4651 struct ice_chs_chg *p;
4653 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4655 return ICE_ERR_NO_MEMORY;
4657 p->type = ICE_VSIG_REM;
4658 p->orig_vsig = vsig;
4659 p->vsig = ICE_DEFAULT_VSIG;
4660 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4662 LIST_ADD(&p->list_entry, chg);
4668 status = ice_vsig_free(hw, blk, vsig);
4674 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
4675 * @hw: pointer to the HW struct
4676 * @blk: hardware block
4677 * @vsig: VSIG to remove the profile from
4678 * @hdl: profile handle indicating which profile to remove
4679 * @chg: list to receive a record of changes
4681 static enum ice_status
4682 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4683 struct LIST_HEAD_TYPE *chg)
4685 u16 idx = vsig & ICE_VSIG_IDX_M;
4686 struct ice_vsig_prof *p, *t;
4687 enum ice_status status;
4689 LIST_FOR_EACH_ENTRY_SAFE(p, t,
4690 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4691 ice_vsig_prof, list) {
4692 if (p->profile_cookie == hdl) {
4693 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4694 /* this is the last profile, remove the VSIG */
4695 return ice_rem_vsig(hw, blk, vsig, chg);
4697 status = ice_rem_prof_id(hw, blk, p);
4706 return ICE_ERR_DOES_NOT_EXIST;
4710 * ice_rem_flow_all - remove all flows with a particular profile
4711 * @hw: pointer to the HW struct
4712 * @blk: hardware block
4713 * @id: profile tracking ID
4715 static enum ice_status
4716 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4718 struct ice_chs_chg *del, *tmp;
4719 struct LIST_HEAD_TYPE chg;
4720 enum ice_status status;
4723 INIT_LIST_HEAD(&chg);
4725 for (i = 1; i < ICE_MAX_VSIGS; i++) {
4726 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4727 if (ice_has_prof_vsig(hw, blk, i, id)) {
4728 status = ice_rem_prof_id_vsig(hw, blk, i, id,
4731 goto err_ice_rem_flow_all;
4736 status = ice_upd_prof_hw(hw, blk, &chg);
4738 err_ice_rem_flow_all:
4739 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
4740 LIST_DEL(&del->list_entry);
4748 * ice_rem_prof - remove profile
4749 * @hw: pointer to the HW struct
4750 * @blk: hardware block
4751 * @id: profile tracking ID
4753 * This will remove the profile specified by the ID parameter, which was
4754 * previously created through ice_add_prof. If any existing entries
4755 * are associated with this profile, they will be removed as well.
4757 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4759 struct ice_prof_map *pmap;
4760 enum ice_status status;
4762 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4764 pmap = ice_search_prof_id_low(hw, blk, id);
4766 status = ICE_ERR_DOES_NOT_EXIST;
4767 goto err_ice_rem_prof;
4770 /* remove all flows with this profile */
4771 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4773 goto err_ice_rem_prof;
4775 /* dereference profile, and possibly remove */
4776 ice_prof_dec_ref(hw, blk, pmap->prof_id);
4778 LIST_DEL(&pmap->list);
4781 status = ICE_SUCCESS;
4784 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4789 * ice_get_prof_ptgs - get ptgs for profile
4790 * @hw: pointer to the HW struct
4791 * @blk: hardware block
4792 * @hdl: profile handle
4795 static enum ice_status
4796 ice_get_prof_ptgs(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4797 struct LIST_HEAD_TYPE *chg)
4799 struct ice_prof_map *map;
4800 struct ice_chs_chg *p;
4803 /* Get the details on the profile specified by the handle ID */
4804 map = ice_search_prof_id(hw, blk, hdl);
4806 return ICE_ERR_DOES_NOT_EXIST;
4808 for (i = 0; i < map->ptype_count; i++) {
4809 enum ice_status status;
4813 status = ice_get_ptg(hw, blk, map->ptype[i], &ptg, &add);
4815 goto err_ice_get_prof_ptgs;
4817 if (add || !hw->blk[blk].es.written[map->prof_id]) {
4818 /* add PTG to change list */
4819 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4821 goto err_ice_get_prof_ptgs;
4823 p->type = ICE_PTG_ES_ADD;
4824 p->ptype = map->ptype[i];
4828 p->add_prof = !hw->blk[blk].es.written[map->prof_id];
4829 p->prof_id = map->prof_id;
4831 hw->blk[blk].es.written[map->prof_id] = true;
4833 LIST_ADD(&p->list_entry, chg);
4839 err_ice_get_prof_ptgs:
4840 /* let caller clean up the change list */
4841 return ICE_ERR_NO_MEMORY;
4845 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4846 * @hw: pointer to the HW struct
4847 * @blk: hardware block
4848 * @vsig: VSIG from which to copy the list
4851 * This routine makes a copy of the list of profiles in the specified VSIG.
4853 static enum ice_status
4854 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4855 struct LIST_HEAD_TYPE *lst)
4857 struct ice_vsig_prof *ent1, *ent2;
4858 u16 idx = vsig & ICE_VSIG_IDX_M;
4860 LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4861 ice_vsig_prof, list) {
4862 struct ice_vsig_prof *p;
4864 /* copy to the input list */
4865 p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
4867 goto err_ice_get_profs_vsig;
4869 ice_memcpy(p, ent1, sizeof(*p), ICE_NONDMA_TO_NONDMA);
4871 LIST_ADD_TAIL(&p->list, lst);
4876 err_ice_get_profs_vsig:
4877 LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
4878 LIST_DEL(&ent1->list);
4882 return ICE_ERR_NO_MEMORY;
4886 * ice_add_prof_to_lst - add profile entry to a list
4887 * @hw: pointer to the HW struct
4888 * @blk: hardware block
4889 * @lst: the list to be added to
4890 * @hdl: profile handle of entry to add
4892 static enum ice_status
4893 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4894 struct LIST_HEAD_TYPE *lst, u64 hdl)
4896 struct ice_vsig_prof *p;
4897 struct ice_prof_map *map;
4900 map = ice_search_prof_id(hw, blk, hdl);
4902 return ICE_ERR_DOES_NOT_EXIST;
4904 p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
4906 return ICE_ERR_NO_MEMORY;
4908 p->profile_cookie = map->profile_cookie;
4909 p->prof_id = map->prof_id;
4910 p->tcam_count = map->ptype_count;
4912 for (i = 0; i < map->ptype_count; i++) {
4915 p->tcam[i].prof_id = map->prof_id;
4916 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4918 if (ice_ptg_find_ptype(hw, blk, map->ptype[i], &ptg))
4921 p->tcam[i].ptg = ptg;
4924 LIST_ADD(&p->list, lst);
4930 * ice_move_vsi - move VSI to another VSIG
4931 * @hw: pointer to the HW struct
4932 * @blk: hardware block
4933 * @vsi: the VSI to move
4934 * @vsig: the VSIG to move the VSI to
4935 * @chg: the change list
4937 static enum ice_status
4938 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4939 struct LIST_HEAD_TYPE *chg)
4941 enum ice_status status;
4942 struct ice_chs_chg *p;
4945 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4947 return ICE_ERR_NO_MEMORY;
4949 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
4951 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
4958 p->type = ICE_VSI_MOVE;
4960 p->orig_vsig = orig_vsig;
4963 LIST_ADD(&p->list_entry, chg);
4969 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
4970 * @hw: pointer to the HW struct
4971 * @blk: hardware block
4972 * @enable: true to enable, false to disable
4973 * @vsig: the vsig of the TCAM entry
4974 * @tcam: pointer the TCAM info structure of the TCAM to disable
4975 * @chg: the change list
4977 * This function appends an enable or disable TCAM entry in the change log
4979 static enum ice_status
4980 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
4981 u16 vsig, struct ice_tcam_inf *tcam,
4982 struct LIST_HEAD_TYPE *chg)
4984 enum ice_status status;
4985 struct ice_chs_chg *p;
4987 /* Default: enable means change the low flag bit to don't care */
4988 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4989 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4990 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4992 /* If disabled, change the low flag bit to never match */
4998 /* add TCAM to change list */
4999 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5001 return ICE_ERR_NO_MEMORY;
5003 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
5004 tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
5007 goto err_ice_prof_tcam_ena_dis;
5009 tcam->in_use = enable;
5011 p->type = ICE_TCAM_ADD;
5012 p->add_tcam_idx = true;
5013 p->prof_id = tcam->prof_id;
5016 p->tcam_idx = tcam->tcam_idx;
5019 LIST_ADD(&p->list_entry, chg);
5023 err_ice_prof_tcam_ena_dis:
5029 * ice_adj_prof_priorities - adjust profile based on priorities
5030 * @hw: pointer to the HW struct
5031 * @blk: hardware block
5032 * @vsig: the VSIG for which to adjust profile priorities
5033 * @chg: the change list
5035 static enum ice_status
5036 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5037 struct LIST_HEAD_TYPE *chg)
5039 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
5040 struct ice_vsig_prof *t;
5041 enum ice_status status;
5044 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
5045 idx = vsig & ICE_VSIG_IDX_M;
5047 /* Priority is based on the order in which the profiles are added. The
5048 * newest added profile has highest priority and the oldest added
5049 * profile has the lowest priority. Since the profile property list for
5050 * a VSIG is sorted from newest to oldest, this code traverses the list
5051 * in order and enables the first of each PTG that it finds (that is not
5052 * already enabled); it also disables any duplicate PTGs that it finds
5053 * in the older profiles (that are currently enabled).
5056 LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5057 ice_vsig_prof, list) {
5060 for (i = 0; i < t->tcam_count; i++) {
5061 /* Scan the priorities from newest to oldest.
5062 * Make sure that the newest profiles take priority.
5064 if (ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
5065 t->tcam[i].in_use) {
5066 /* need to mark this PTG as never match, as it
5067 * was already in use and therefore duplicate
5068 * (and lower priority)
5070 status = ice_prof_tcam_ena_dis(hw, blk, false,
5076 } else if (!ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
5077 !t->tcam[i].in_use) {
5078 /* need to enable this PTG, as it in not in use
5079 * and not enabled (highest priority)
5081 status = ice_prof_tcam_ena_dis(hw, blk, true,
5089 /* keep track of used ptgs */
5090 ice_set_bit(t->tcam[i].ptg, ptgs_used);
5098 * ice_add_prof_id_vsig - add profile to VSIG
5099 * @hw: pointer to the HW struct
5100 * @blk: hardware block
5101 * @vsig: the VSIG to which this profile is to be added
5102 * @hdl: the profile handle indicating the profile to add
5103 * @chg: the change list
5105 static enum ice_status
5106 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5107 struct LIST_HEAD_TYPE *chg)
5109 /* Masks that ignore flags */
5110 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5111 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5112 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5113 struct ice_prof_map *map;
5114 struct ice_vsig_prof *t;
5115 struct ice_chs_chg *p;
5118 /* Get the details on the profile specified by the handle ID */
5119 map = ice_search_prof_id(hw, blk, hdl);
5121 return ICE_ERR_DOES_NOT_EXIST;
5123 /* Error, if this VSIG already has this profile */
5124 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5125 return ICE_ERR_ALREADY_EXISTS;
5127 /* new VSIG profile structure */
5128 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5130 goto err_ice_add_prof_id_vsig;
5132 t->profile_cookie = map->profile_cookie;
5133 t->prof_id = map->prof_id;
5134 t->tcam_count = map->ptype_count;
5136 /* create TCAM entries */
5137 for (i = 0; i < map->ptype_count; i++) {
5138 enum ice_status status;
5143 /* If properly sequenced, we should never have to allocate new
5146 status = ice_get_ptg(hw, blk, map->ptype[i], &ptg, &add);
5148 goto err_ice_add_prof_id_vsig;
5150 /* add TCAM to change list */
5151 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5153 goto err_ice_add_prof_id_vsig;
5155 /* allocate the TCAM entry index */
5156 status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
5159 goto err_ice_add_prof_id_vsig;
5162 t->tcam[i].ptg = ptg;
5163 t->tcam[i].prof_id = map->prof_id;
5164 t->tcam[i].tcam_idx = tcam_idx;
5165 t->tcam[i].in_use = true;
5167 p->type = ICE_TCAM_ADD;
5168 p->add_tcam_idx = true;
5169 p->prof_id = t->tcam[i].prof_id;
5170 p->ptg = t->tcam[i].ptg;
5172 p->tcam_idx = t->tcam[i].tcam_idx;
5174 /* write the TCAM entry */
5175 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5177 t->tcam[i].ptg, vsig, 0, 0,
5178 vl_msk, dc_msk, nm_msk);
5180 goto err_ice_add_prof_id_vsig;
5183 LIST_ADD(&p->list_entry, chg);
5186 /* add profile to VSIG */
5188 &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
5192 err_ice_add_prof_id_vsig:
5193 /* let caller clean up the change list */
5195 return ICE_ERR_NO_MEMORY;
5199 * ice_create_prof_id_vsig - add a new VSIG with a single profile
5200 * @hw: pointer to the HW struct
5201 * @blk: hardware block
5202 * @vsi: the initial VSI that will be in VSIG
5203 * @hdl: the profile handle of the profile that will be added to the VSIG
5204 * @chg: the change list
5206 static enum ice_status
5207 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5208 struct LIST_HEAD_TYPE *chg)
5210 enum ice_status status;
5211 struct ice_chs_chg *p;
5214 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5216 return ICE_ERR_NO_MEMORY;
5218 new_vsig = ice_vsig_alloc(hw, blk);
5220 status = ICE_ERR_HW_TABLE;
5221 goto err_ice_create_prof_id_vsig;
5224 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5226 goto err_ice_create_prof_id_vsig;
5228 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
5230 goto err_ice_create_prof_id_vsig;
5232 p->type = ICE_VSIG_ADD;
5234 p->orig_vsig = ICE_DEFAULT_VSIG;
5237 LIST_ADD(&p->list_entry, chg);
5241 err_ice_create_prof_id_vsig:
5242 /* let caller clean up the change list */
5248 * ice_create_vsig_from_list - create a new VSIG with a list of profiles
5249 * @hw: pointer to the HW struct
5250 * @blk: hardware block
5251 * @vsi: the initial VSI that will be in VSIG
5252 * @lst: the list of profile that will be added to the VSIG
5253 * @chg: the change list
5255 static enum ice_status
5256 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5257 struct LIST_HEAD_TYPE *lst, struct LIST_HEAD_TYPE *chg)
5259 struct ice_vsig_prof *t;
5260 enum ice_status status;
5263 vsig = ice_vsig_alloc(hw, blk);
5265 return ICE_ERR_HW_TABLE;
5267 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5271 LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
5272 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5282 * ice_find_prof_vsig - find a VSIG with a specific profile handle
5283 * @hw: pointer to the HW struct
5284 * @blk: hardware block
5285 * @hdl: the profile handle of the profile to search for
5286 * @vsig: returns the VSIG with the matching profile
5289 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5291 struct ice_vsig_prof *t;
5292 struct LIST_HEAD_TYPE lst;
5293 enum ice_status status;
5295 INIT_LIST_HEAD(&lst);
5297 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5301 t->profile_cookie = hdl;
5302 LIST_ADD(&t->list, &lst);
5304 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5309 return status == ICE_SUCCESS;
5313 * ice_add_vsi_flow - add VSI flow
5314 * @hw: pointer to the HW struct
5315 * @blk: hardware block
5317 * @vsig: target VSIG to include the input VSI
5319 * Calling this function will add the VSI to a given VSIG and
5320 * update the HW tables accordingly. This call can be used to
5321 * add multiple VSIs to a VSIG if we know beforehand that those
5322 * VSIs have the same characteristics of the VSIG. This will
5323 * save time in generating a new VSIG and TCAMs till a match is
5324 * found and subsequent rollback when a matching VSIG is found.
5327 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
5329 struct ice_chs_chg *tmp, *del;
5330 struct LIST_HEAD_TYPE chg;
5331 enum ice_status status;
5333 /* if target VSIG is default the move is invalid */
5334 if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
5335 return ICE_ERR_PARAM;
5337 INIT_LIST_HEAD(&chg);
5339 /* move VSI to the VSIG that matches */
5340 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5341 /* update hardware if success */
5343 status = ice_upd_prof_hw(hw, blk, &chg);
5345 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5346 LIST_DEL(&del->list_entry);
5354 * ice_add_prof_id_flow - add profile flow
5355 * @hw: pointer to the HW struct
5356 * @blk: hardware block
5357 * @vsi: the VSI to enable with the profile specified by ID
5358 * @hdl: profile handle
5360 * Calling this function will update the hardware tables to enable the
5361 * profile indicated by the ID parameter for the VSIs specified in the VSI
5362 * array. Once successfully called, the flow will be enabled.
5365 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5367 struct ice_vsig_prof *tmp1, *del1;
5368 struct LIST_HEAD_TYPE union_lst;
5369 struct ice_chs_chg *tmp, *del;
5370 struct LIST_HEAD_TYPE chrs;
5371 struct LIST_HEAD_TYPE chg;
5372 enum ice_status status;
5373 u16 vsig, or_vsig = 0;
5375 INIT_LIST_HEAD(&union_lst);
5376 INIT_LIST_HEAD(&chrs);
5377 INIT_LIST_HEAD(&chg);
5379 status = ice_get_prof_ptgs(hw, blk, hdl, &chg);
5383 /* determine if VSI is already part of a VSIG */
5384 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5385 if (!status && vsig) {
5392 /* make sure that there is no overlap/conflict between the new
5393 * characteristics and the existing ones; we don't support that
5396 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5397 status = ICE_ERR_ALREADY_EXISTS;
5398 goto err_ice_add_prof_id_flow;
5401 /* last VSI in the VSIG? */
5402 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5404 goto err_ice_add_prof_id_flow;
5405 only_vsi = (ref == 1);
5407 /* create a union of the current profiles and the one being
5410 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5412 goto err_ice_add_prof_id_flow;
5414 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5416 goto err_ice_add_prof_id_flow;
5418 /* search for an existing VSIG with an exact charc match */
5419 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5421 /* move VSI to the VSIG that matches */
5422 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5424 goto err_ice_add_prof_id_flow;
5426 /* VSI has been moved out of or_vsig. If the or_vsig had
5427 * only that VSI it is now empty and can be removed.
5430 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5432 goto err_ice_add_prof_id_flow;
5434 } else if (only_vsi) {
5435 /* If the original VSIG only contains one VSI, then it
5436 * will be the requesting VSI. In this case the VSI is
5437 * not sharing entries and we can simply add the new
5438 * profile to the VSIG.
5440 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
5442 goto err_ice_add_prof_id_flow;
5444 /* Adjust priorities */
5445 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5447 goto err_ice_add_prof_id_flow;
5449 /* No match, so we need a new VSIG */
5450 status = ice_create_vsig_from_lst(hw, blk, vsi,
5453 goto err_ice_add_prof_id_flow;
5455 /* Adjust priorities */
5456 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5458 goto err_ice_add_prof_id_flow;
5461 /* need to find or add a VSIG */
5462 /* search for an existing VSIG with an exact charc match */
5463 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5464 /* found an exact match */
5465 /* add or move VSI to the VSIG that matches */
5466 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5468 goto err_ice_add_prof_id_flow;
5470 /* we did not find an exact match */
5471 /* we need to add a VSIG */
5472 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5475 goto err_ice_add_prof_id_flow;
5479 /* update hardware */
5481 status = ice_upd_prof_hw(hw, blk, &chg);
5483 err_ice_add_prof_id_flow:
5484 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5485 LIST_DEL(&del->list_entry);
5489 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
5490 LIST_DEL(&del1->list);
5494 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &chrs, ice_vsig_prof, list) {
5495 LIST_DEL(&del1->list);
5503 * ice_rem_prof_from_list - remove a profile from list
5504 * @hw: pointer to the HW struct
5505 * @lst: list to remove the profile from
5506 * @hdl: the profile handle indicating the profile to remove
5508 static enum ice_status
5509 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
5511 struct ice_vsig_prof *ent, *tmp;
5513 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) {
5514 if (ent->profile_cookie == hdl) {
5515 LIST_DEL(&ent->list);
5521 return ICE_ERR_DOES_NOT_EXIST;
5525 * ice_rem_prof_id_flow - remove flow
5526 * @hw: pointer to the HW struct
5527 * @blk: hardware block
5528 * @vsi: the VSI from which to remove the profile specified by ID
5529 * @hdl: profile tracking handle
5531 * Calling this function will update the hardware tables to remove the
5532 * profile indicated by the ID parameter for the VSIs specified in the VSI
5533 * array. Once successfully called, the flow will be disabled.
5536 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5538 struct ice_vsig_prof *tmp1, *del1;
5539 struct LIST_HEAD_TYPE chg, copy;
5540 struct ice_chs_chg *tmp, *del;
5541 enum ice_status status;
5544 INIT_LIST_HEAD(©);
5545 INIT_LIST_HEAD(&chg);
5547 /* determine if VSI is already part of a VSIG */
5548 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5549 if (!status && vsig) {
5555 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5556 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5558 goto err_ice_rem_prof_id_flow;
5559 only_vsi = (ref == 1);
5562 /* If the original VSIG only contains one reference,
5563 * which will be the requesting VSI, then the VSI is not
5564 * sharing entries and we can simply remove the specific
5565 * characteristics from the VSIG.
5569 /* If there are no profiles left for this VSIG,
5570 * then simply remove the the VSIG.
5572 status = ice_rem_vsig(hw, blk, vsig, &chg);
5574 goto err_ice_rem_prof_id_flow;
5576 status = ice_rem_prof_id_vsig(hw, blk, vsig,
5579 goto err_ice_rem_prof_id_flow;
5581 /* Adjust priorities */
5582 status = ice_adj_prof_priorities(hw, blk, vsig,
5585 goto err_ice_rem_prof_id_flow;
5589 /* Make a copy of the VSIG's list of Profiles */
5590 status = ice_get_profs_vsig(hw, blk, vsig, ©);
5592 goto err_ice_rem_prof_id_flow;
5594 /* Remove specified profile entry from the list */
5595 status = ice_rem_prof_from_list(hw, ©, hdl);
5597 goto err_ice_rem_prof_id_flow;
5599 if (LIST_EMPTY(©)) {
5600 status = ice_move_vsi(hw, blk, vsi,
5601 ICE_DEFAULT_VSIG, &chg);
5603 goto err_ice_rem_prof_id_flow;
5605 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
5607 /* found an exact match */
5608 /* add or move VSI to the VSIG that matches */
5609 /* Search for a VSIG with a matching profile
5613 /* Found match, move VSI to the matching VSIG */
5614 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5616 goto err_ice_rem_prof_id_flow;
5618 /* since no existing VSIG supports this
5619 * characteristic pattern, we need to create a
5620 * new VSIG and TCAM entries
5622 status = ice_create_vsig_from_lst(hw, blk, vsi,
5625 goto err_ice_rem_prof_id_flow;
5627 /* Adjust priorities */
5628 status = ice_adj_prof_priorities(hw, blk, vsig,
5631 goto err_ice_rem_prof_id_flow;
5635 status = ICE_ERR_DOES_NOT_EXIST;
5638 /* update hardware tables */
5640 status = ice_upd_prof_hw(hw, blk, &chg);
5642 err_ice_rem_prof_id_flow:
5643 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5644 LIST_DEL(&del->list_entry);
5648 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) {
5649 LIST_DEL(&del1->list);