1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_common.h"
6 #include "ice_flex_pipe.h"
7 #include "ice_protocol_type.h"
10 /* To support tunneling entries by PF, the package will append the PF number to
11 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
13 static const struct ice_tunnel_type_scan tnls[] = {
14 { TNL_VXLAN, "TNL_VXLAN_PF" },
15 { TNL_GENEVE, "TNL_GENEVE_PF" },
19 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
23 ICE_SID_XLT_KEY_BUILDER_SW,
26 ICE_SID_PROFID_TCAM_SW,
27 ICE_SID_PROFID_REDIR_SW,
29 ICE_SID_CDID_KEY_BUILDER_SW,
36 ICE_SID_XLT_KEY_BUILDER_ACL,
39 ICE_SID_PROFID_TCAM_ACL,
40 ICE_SID_PROFID_REDIR_ACL,
42 ICE_SID_CDID_KEY_BUILDER_ACL,
43 ICE_SID_CDID_REDIR_ACL
49 ICE_SID_XLT_KEY_BUILDER_FD,
52 ICE_SID_PROFID_TCAM_FD,
53 ICE_SID_PROFID_REDIR_FD,
55 ICE_SID_CDID_KEY_BUILDER_FD,
62 ICE_SID_XLT_KEY_BUILDER_RSS,
65 ICE_SID_PROFID_TCAM_RSS,
66 ICE_SID_PROFID_REDIR_RSS,
68 ICE_SID_CDID_KEY_BUILDER_RSS,
69 ICE_SID_CDID_REDIR_RSS
75 ICE_SID_XLT_KEY_BUILDER_PE,
78 ICE_SID_PROFID_TCAM_PE,
79 ICE_SID_PROFID_REDIR_PE,
81 ICE_SID_CDID_KEY_BUILDER_PE,
87 * ice_sect_id - returns section ID
91 * This helper function returns the proper section ID given a block type and a
94 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
96 return ice_sect_lkup[blk][sect];
101 * @buf: pointer to the ice buffer
103 * This helper function validates a buffer's header.
105 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
107 struct ice_buf_hdr *hdr;
111 hdr = (struct ice_buf_hdr *)buf->buf;
113 section_count = LE16_TO_CPU(hdr->section_count);
114 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
117 data_end = LE16_TO_CPU(hdr->data_end);
118 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
126 * @ice_seg: pointer to the ice segment
128 * Returns the address of the buffer table within the ice segment.
130 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
132 struct ice_nvm_table *nvms;
134 nvms = (struct ice_nvm_table *)
135 (ice_seg->device_table +
136 LE32_TO_CPU(ice_seg->device_table_count));
138 return (_FORCE_ struct ice_buf_table *)
139 (nvms->vers + LE32_TO_CPU(nvms->table_count));
144 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
145 * @state: pointer to the enum state
147 * This function will enumerate all the buffers in the ice segment. The first
148 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
149 * ice_seg is set to NULL which continues the enumeration. When the function
150 * returns a NULL pointer, then the end of the buffers has been reached, or an
151 * unexpected value has been detected (for example an invalid section count or
152 * an invalid buffer end value).
154 static struct ice_buf_hdr *
155 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
158 state->buf_table = ice_find_buf_table(ice_seg);
159 if (!state->buf_table)
163 return ice_pkg_val_buf(state->buf_table->buf_array);
166 if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
167 return ice_pkg_val_buf(state->buf_table->buf_array +
174 * ice_pkg_advance_sect
175 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
176 * @state: pointer to the enum state
178 * This helper function will advance the section within the ice segment,
179 * also advancing the buffer if needed.
182 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
184 if (!ice_seg && !state->buf)
187 if (!ice_seg && state->buf)
188 if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
191 state->buf = ice_pkg_enum_buf(ice_seg, state);
195 /* start of new buffer, reset section index */
201 * ice_pkg_enum_section
202 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
203 * @state: pointer to the enum state
204 * @sect_type: section type to enumerate
206 * This function will enumerate all the sections of a particular type in the
207 * ice segment. The first call is made with the ice_seg parameter non-NULL;
208 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
209 * When the function returns a NULL pointer, then the end of the matching
210 * sections has been reached.
213 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
219 state->type = sect_type;
221 if (!ice_pkg_advance_sect(ice_seg, state))
224 /* scan for next matching section */
225 while (state->buf->section_entry[state->sect_idx].type !=
226 CPU_TO_LE32(state->type))
227 if (!ice_pkg_advance_sect(NULL, state))
230 /* validate section */
231 offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
232 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
235 size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
236 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
239 /* make sure the section fits in the buffer */
240 if (offset + size > ICE_PKG_BUF_SIZE)
244 LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
246 /* calc pointer to this section */
247 state->sect = ((u8 *)state->buf) +
248 LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
255 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
256 * @state: pointer to the enum state
257 * @sect_type: section type to enumerate
258 * @offset: pointer to variable that receives the offset in the table (optional)
259 * @handler: function that handles access to the entries into the section type
261 * This function will enumerate all the entries in particular section type in
262 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
263 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
264 * When the function returns a NULL pointer, then the end of the entries has
267 * Since each section may have a different header and entry size, the handler
268 * function is needed to determine the number and location entries in each
271 * The offset parameter is optional, but should be used for sections that
272 * contain an offset for each section table. For such cases, the section handler
273 * function must return the appropriate offset + index to give the absolution
274 * offset for each entry. For example, if the base for a section's header
275 * indicates a base offset of 10, and the index for the entry is 2, then
276 * section handler function should set the offset to 10 + 2 = 12.
279 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
280 u32 sect_type, u32 *offset,
281 void *(*handler)(u32 sect_type, void *section,
282 u32 index, u32 *offset))
290 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
293 state->entry_idx = 0;
294 state->handler = handler;
303 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
306 /* end of a section, look for another section of this type */
307 if (!ice_pkg_enum_section(NULL, state, 0))
310 state->entry_idx = 0;
311 entry = state->handler(state->sect_type, state->sect,
312 state->entry_idx, offset);
319 * ice_boost_tcam_handler
320 * @sect_type: section type
321 * @section: pointer to section
322 * @index: index of the boost TCAM entry to be returned
323 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
325 * This is a callback function that can be passed to ice_pkg_enum_entry.
326 * Handles enumeration of individual boost TCAM entries.
329 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
331 struct ice_boost_tcam_section *boost;
336 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
339 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
345 boost = (struct ice_boost_tcam_section *)section;
346 if (index >= LE16_TO_CPU(boost->count))
349 return boost->tcam + index;
353 * ice_find_boost_entry
354 * @ice_seg: pointer to the ice segment (non-NULL)
355 * @addr: Boost TCAM address of entry to search for
356 * @entry: returns pointer to the entry
358 * Finds a particular Boost TCAM entry and returns a pointer to that entry
359 * if it is found. The ice_seg parameter must not be NULL since the first call
360 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
362 static enum ice_status
363 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
364 struct ice_boost_tcam_entry **entry)
366 struct ice_boost_tcam_entry *tcam;
367 struct ice_pkg_enum state;
369 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
372 return ICE_ERR_PARAM;
375 tcam = (struct ice_boost_tcam_entry *)
376 ice_pkg_enum_entry(ice_seg, &state,
377 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
378 ice_boost_tcam_handler);
379 if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
392 * ice_label_enum_handler
393 * @sect_type: section type
394 * @section: pointer to section
395 * @index: index of the label entry to be returned
396 * @offset: pointer to receive absolute offset, always zero for label sections
398 * This is a callback function that can be passed to ice_pkg_enum_entry.
399 * Handles enumeration of individual label entries.
402 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
405 struct ice_label_section *labels;
410 if (index > ICE_MAX_LABELS_IN_BUF)
416 labels = (struct ice_label_section *)section;
417 if (index >= LE16_TO_CPU(labels->count))
420 return labels->label + index;
425 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
426 * @type: the section type that will contain the label (0 on subsequent calls)
427 * @state: ice_pkg_enum structure that will hold the state of the enumeration
428 * @value: pointer to a value that will return the label's value if found
430 * Enumerates a list of labels in the package. The caller will call
431 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
432 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
433 * the end of the list has been reached.
436 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
439 struct ice_label *label;
441 /* Check for valid label section on first call */
442 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
445 label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
447 ice_label_enum_handler);
451 *value = LE16_TO_CPU(label->value);
457 * @hw: pointer to the HW structure
458 * @ice_seg: pointer to the segment of the package scan (non-NULL)
460 * This function will scan the package and save off relevant information
461 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
462 * since the first call to ice_enum_labels requires a pointer to an actual
465 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
467 struct ice_pkg_enum state;
472 ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
473 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
478 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
481 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
482 for (i = 0; tnls[i].type != TNL_LAST; i++) {
483 size_t len = strlen(tnls[i].label_prefix);
485 /* Look for matching label start, before continuing */
486 if (strncmp(label_name, tnls[i].label_prefix, len))
489 /* Make sure this label matches our PF. Note that the PF
490 * character ('0' - '7') will be located where our
491 * prefix string's null terminator is located.
493 if ((label_name[len] - '0') == hw->pf_id) {
494 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
495 hw->tnl.tbl[hw->tnl.count].valid = false;
496 hw->tnl.tbl[hw->tnl.count].in_use = false;
497 hw->tnl.tbl[hw->tnl.count].marked = false;
498 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
499 hw->tnl.tbl[hw->tnl.count].port = 0;
505 label_name = ice_enum_labels(NULL, 0, &state, &val);
508 /* Cache the appropriate boost TCAM entry pointers */
509 for (i = 0; i < hw->tnl.count; i++) {
510 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
511 &hw->tnl.tbl[i].boost_entry);
512 if (hw->tnl.tbl[i].boost_entry)
513 hw->tnl.tbl[i].valid = true;
519 #define ICE_DC_KEY 0x1 /* don't care */
520 #define ICE_DC_KEYINV 0x1
521 #define ICE_NM_KEY 0x0 /* never match */
522 #define ICE_NM_KEYINV 0x0
523 #define ICE_0_KEY 0x1 /* match 0 */
524 #define ICE_0_KEYINV 0x0
525 #define ICE_1_KEY 0x0 /* match 1 */
526 #define ICE_1_KEYINV 0x1
529 * ice_gen_key_word - generate 16-bits of a key/mask word
531 * @valid: valid bits mask (change only the valid bits)
532 * @dont_care: don't care mask
533 * @nvr_mtch: never match mask
534 * @key: pointer to an array of where the resulting key portion
535 * @key_inv: pointer to an array of where the resulting key invert portion
537 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
538 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
539 * of key and 8 bits of key invert.
541 * '0' = b01, always match a 0 bit
542 * '1' = b10, always match a 1 bit
543 * '?' = b11, don't care bit (always matches)
544 * '~' = b00, never match bit
548 * dont_care: b0 0 1 1 0 0
549 * never_mtch: b0 0 0 0 1 1
550 * ------------------------------
551 * Result: key: b01 10 11 11 00 00
553 static enum ice_status
554 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
557 u8 in_key = *key, in_key_inv = *key_inv;
560 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
561 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
567 /* encode the 8 bits into 8-bit key and 8-bit key invert */
568 for (i = 0; i < 8; i++) {
572 if (!(valid & 0x1)) { /* change only valid bits */
573 *key |= (in_key & 0x1) << 7;
574 *key_inv |= (in_key_inv & 0x1) << 7;
575 } else if (dont_care & 0x1) { /* don't care bit */
576 *key |= ICE_DC_KEY << 7;
577 *key_inv |= ICE_DC_KEYINV << 7;
578 } else if (nvr_mtch & 0x1) { /* never match bit */
579 *key |= ICE_NM_KEY << 7;
580 *key_inv |= ICE_NM_KEYINV << 7;
581 } else if (val & 0x01) { /* exact 1 match */
582 *key |= ICE_1_KEY << 7;
583 *key_inv |= ICE_1_KEYINV << 7;
584 } else { /* exact 0 match */
585 *key |= ICE_0_KEY << 7;
586 *key_inv |= ICE_0_KEYINV << 7;
601 * ice_bits_max_set - determine if the number of bits set is within a maximum
602 * @mask: pointer to the byte array which is the mask
603 * @size: the number of bytes in the mask
604 * @max: the max number of set bits
606 * This function determines if there are at most 'max' number of bits set in an
607 * array. Returns true if the number for bits set is <= max or will return false
610 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
615 /* check each byte */
616 for (i = 0; i < size; i++) {
617 /* if 0, go to next byte */
621 /* We know there is at least one set bit in this byte because of
622 * the above check; if we already have found 'max' number of
623 * bits set, then we can return failure now.
628 /* count the bits in this byte, checking threshold */
629 for (j = 0; j < BITS_PER_BYTE; j++) {
630 count += (mask[i] & (0x1 << j)) ? 1 : 0;
640 * ice_set_key - generate a variable sized key with multiples of 16-bits
641 * @key: pointer to where the key will be stored
642 * @size: the size of the complete key in bytes (must be even)
643 * @val: array of 8-bit values that makes up the value portion of the key
644 * @upd: array of 8-bit masks that determine what key portion to update
645 * @dc: array of 8-bit masks that make up the don't care mask
646 * @nm: array of 8-bit masks that make up the never match mask
647 * @off: the offset of the first byte in the key to update
648 * @len: the number of bytes in the key update
650 * This function generates a key from a value, a don't care mask and a never
652 * upd, dc, and nm are optional parameters, and can be NULL:
653 * upd == NULL --> udp mask is all 1's (update all bits)
654 * dc == NULL --> dc mask is all 0's (no don't care bits)
655 * nm == NULL --> nm mask is all 0's (no never match bits)
658 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
664 /* size must be a multiple of 2 bytes. */
667 half_size = size / 2;
669 if (off + len > half_size)
672 /* Make sure at most one bit is set in the never match mask. Having more
673 * than one never match mask bit set will cause HW to consume excessive
674 * power otherwise; this is a power management efficiency check.
676 #define ICE_NVR_MTCH_BITS_MAX 1
677 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
680 for (i = 0; i < len; i++)
681 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
682 dc ? dc[i] : 0, nm ? nm[i] : 0,
683 key + off + i, key + half_size + off + i))
690 * ice_acquire_global_cfg_lock
691 * @hw: pointer to the HW structure
692 * @access: access type (read or write)
694 * This function will request ownership of the global config lock for reading
695 * or writing of the package. When attempting to obtain write access, the
696 * caller must check for the following two return values:
698 * ICE_SUCCESS - Means the caller has acquired the global config lock
699 * and can perform writing of the package.
700 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
701 * package or has found that no update was necessary; in
702 * this case, the caller can just skip performing any
703 * update of the package.
705 static enum ice_status
706 ice_acquire_global_cfg_lock(struct ice_hw *hw,
707 enum ice_aq_res_access_type access)
709 enum ice_status status;
711 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
713 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
714 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
716 if (status == ICE_ERR_AQ_NO_WORK)
717 ice_debug(hw, ICE_DBG_PKG,
718 "Global config lock: No work to do\n");
724 * ice_release_global_cfg_lock
725 * @hw: pointer to the HW structure
727 * This function will release the global config lock.
729 static void ice_release_global_cfg_lock(struct ice_hw *hw)
731 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
735 * ice_acquire_change_lock
736 * @hw: pointer to the HW structure
737 * @access: access type (read or write)
739 * This function will request ownership of the change lock.
742 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
744 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
746 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
747 ICE_CHANGE_LOCK_TIMEOUT);
751 * ice_release_change_lock
752 * @hw: pointer to the HW structure
754 * This function will release the change lock using the proper Admin Command.
756 void ice_release_change_lock(struct ice_hw *hw)
758 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
760 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
764 * ice_aq_download_pkg
765 * @hw: pointer to the hardware structure
766 * @pkg_buf: the package buffer to transfer
767 * @buf_size: the size of the package buffer
768 * @last_buf: last buffer indicator
769 * @error_offset: returns error offset
770 * @error_info: returns error information
771 * @cd: pointer to command details structure or NULL
773 * Download Package (0x0C40)
775 static enum ice_status
776 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
777 u16 buf_size, bool last_buf, u32 *error_offset,
778 u32 *error_info, struct ice_sq_cd *cd)
780 struct ice_aqc_download_pkg *cmd;
781 struct ice_aq_desc desc;
782 enum ice_status status;
784 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
791 cmd = &desc.params.download_pkg;
792 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
793 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
796 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
798 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
799 if (status == ICE_ERR_AQ_ERROR) {
800 /* Read error from buffer only when the FW returned an error */
801 struct ice_aqc_download_pkg_resp *resp;
803 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
805 *error_offset = LE32_TO_CPU(resp->error_offset);
807 *error_info = LE32_TO_CPU(resp->error_info);
815 * @hw: pointer to the hardware structure
816 * @pkg_buf: the package cmd buffer
817 * @buf_size: the size of the package cmd buffer
818 * @last_buf: last buffer indicator
819 * @error_offset: returns error offset
820 * @error_info: returns error information
821 * @cd: pointer to command details structure or NULL
823 * Update Package (0x0C42)
825 static enum ice_status
826 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
827 bool last_buf, u32 *error_offset, u32 *error_info,
828 struct ice_sq_cd *cd)
830 struct ice_aqc_download_pkg *cmd;
831 struct ice_aq_desc desc;
832 enum ice_status status;
834 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
841 cmd = &desc.params.download_pkg;
842 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
843 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
846 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
848 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
849 if (status == ICE_ERR_AQ_ERROR) {
850 /* Read error from buffer only when the FW returned an error */
851 struct ice_aqc_download_pkg_resp *resp;
853 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
855 *error_offset = LE32_TO_CPU(resp->error_offset);
857 *error_info = LE32_TO_CPU(resp->error_info);
864 * ice_find_seg_in_pkg
865 * @hw: pointer to the hardware structure
866 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
867 * @pkg_hdr: pointer to the package header to be searched
869 * This function searches a package file for a particular segment type. On
870 * success it returns a pointer to the segment header, otherwise it will
873 static struct ice_generic_seg_hdr *
874 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
875 struct ice_pkg_hdr *pkg_hdr)
879 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
880 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
881 pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
882 pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
884 /* Search all package segments for the requested segment type */
885 for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
886 struct ice_generic_seg_hdr *seg;
888 seg = (struct ice_generic_seg_hdr *)
889 ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
891 if (LE32_TO_CPU(seg->seg_type) == seg_type)
900 * @hw: pointer to the hardware structure
901 * @bufs: pointer to an array of buffers
902 * @count: the number of buffers in the array
904 * Obtains change lock and updates package.
907 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
909 enum ice_status status;
912 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
916 for (i = 0; i < count; i++) {
917 bool last = ((i + 1) == count);
919 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
921 status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
922 last, &offset, &info, NULL);
925 ice_debug(hw, ICE_DBG_PKG,
926 "Update pkg failed: err %d off %d inf %d\n",
927 status, offset, info);
932 ice_release_change_lock(hw);
939 * @hw: pointer to the hardware structure
940 * @bufs: pointer to an array of buffers
941 * @count: the number of buffers in the array
943 * Obtains global config lock and downloads the package configuration buffers
944 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
945 * found indicates that the rest of the buffers are all metadata buffers.
947 static enum ice_status
948 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
950 enum ice_status status;
951 struct ice_buf_hdr *bh;
955 return ICE_ERR_PARAM;
957 /* If the first buffer's first section has its metadata bit set
958 * then there are no buffers to be downloaded, and the operation is
959 * considered a success.
961 bh = (struct ice_buf_hdr *)bufs;
962 if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
965 /* reset pkg_dwnld_status in case this function is called in the
968 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
970 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
972 if (status == ICE_ERR_AQ_NO_WORK)
973 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
975 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
979 for (i = 0; i < count; i++) {
980 bool last = ((i + 1) == count);
983 /* check next buffer for metadata flag */
984 bh = (struct ice_buf_hdr *)(bufs + i + 1);
986 /* A set metadata flag in the next buffer will signal
987 * that the current buffer will be the last buffer
990 if (LE16_TO_CPU(bh->section_count))
991 if (LE32_TO_CPU(bh->section_entry[0].type) &
996 bh = (struct ice_buf_hdr *)(bufs + i);
998 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
999 &offset, &info, NULL);
1001 /* Save AQ status from download package */
1002 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1004 ice_debug(hw, ICE_DBG_PKG,
1005 "Pkg download failed: err %d off %d inf %d\n",
1006 status, offset, info);
1014 ice_release_global_cfg_lock(hw);
1020 * ice_aq_get_pkg_info_list
1021 * @hw: pointer to the hardware structure
1022 * @pkg_info: the buffer which will receive the information list
1023 * @buf_size: the size of the pkg_info information buffer
1024 * @cd: pointer to command details structure or NULL
1026 * Get Package Info List (0x0C43)
1028 static enum ice_status
1029 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1030 struct ice_aqc_get_pkg_info_resp *pkg_info,
1031 u16 buf_size, struct ice_sq_cd *cd)
1033 struct ice_aq_desc desc;
1035 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1036 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1038 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1043 * @hw: pointer to the hardware structure
1044 * @ice_seg: pointer to the segment of the package to be downloaded
1046 * Handles the download of a complete package.
1048 static enum ice_status
1049 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1051 struct ice_buf_table *ice_buf_tbl;
1053 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1054 ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
1055 ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
1056 ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
1058 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1059 LE32_TO_CPU(ice_seg->hdr.seg_type),
1060 LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
1062 ice_buf_tbl = ice_find_buf_table(ice_seg);
1064 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1065 LE32_TO_CPU(ice_buf_tbl->buf_count));
1067 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1068 LE32_TO_CPU(ice_buf_tbl->buf_count));
1073 * @hw: pointer to the hardware structure
1074 * @pkg_hdr: pointer to the driver's package hdr
1076 * Saves off the package details into the HW structure.
1078 static enum ice_status
1079 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1081 struct ice_global_metadata_seg *meta_seg;
1082 struct ice_generic_seg_hdr *seg_hdr;
1084 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1086 return ICE_ERR_PARAM;
1088 meta_seg = (struct ice_global_metadata_seg *)
1089 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
1091 hw->pkg_ver = meta_seg->pkg_ver;
1092 ice_memcpy(hw->pkg_name, meta_seg->pkg_name,
1093 sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA);
1095 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1096 meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
1097 meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
1098 meta_seg->pkg_name);
1100 ice_debug(hw, ICE_DBG_INIT,
1101 "Did not find metadata segment in driver package\n");
1105 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1107 hw->ice_pkg_ver = seg_hdr->seg_ver;
1108 ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
1109 sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA);
1111 ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
1112 seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
1113 seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
1116 ice_debug(hw, ICE_DBG_INIT,
1117 "Did not find ice segment in driver package\n");
1126 * @hw: pointer to the hardware structure
1128 * Store details of the package currently loaded in HW into the HW structure.
1130 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1132 struct ice_aqc_get_pkg_info_resp *pkg_info;
1133 enum ice_status status;
1137 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1139 size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
1141 pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1143 return ICE_ERR_NO_MEMORY;
1145 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1147 goto init_pkg_free_alloc;
1149 for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
1150 #define ICE_PKG_FLAG_COUNT 4
1151 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1154 if (pkg_info->pkg_info[i].is_active) {
1155 flags[place++] = 'A';
1156 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1157 ice_memcpy(hw->active_pkg_name,
1158 pkg_info->pkg_info[i].name,
1159 sizeof(hw->active_pkg_name),
1160 ICE_NONDMA_TO_NONDMA);
1161 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1163 if (pkg_info->pkg_info[i].is_active_at_boot)
1164 flags[place++] = 'B';
1165 if (pkg_info->pkg_info[i].is_modified)
1166 flags[place++] = 'M';
1167 if (pkg_info->pkg_info[i].is_in_nvm)
1168 flags[place++] = 'N';
1170 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1171 i, pkg_info->pkg_info[i].ver.major,
1172 pkg_info->pkg_info[i].ver.minor,
1173 pkg_info->pkg_info[i].ver.update,
1174 pkg_info->pkg_info[i].ver.draft,
1175 pkg_info->pkg_info[i].name, flags);
1178 init_pkg_free_alloc:
1179 ice_free(hw, pkg_info);
1185 * ice_verify_pkg - verify package
1186 * @pkg: pointer to the package buffer
1187 * @len: size of the package buffer
1189 * Verifies various attributes of the package file, including length, format
1190 * version, and the requirement of at least one segment.
1192 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1197 if (len < sizeof(*pkg))
1198 return ICE_ERR_BUF_TOO_SHORT;
1200 if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1201 pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1202 pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
1203 pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
1206 /* pkg must have at least one segment */
1207 seg_count = LE32_TO_CPU(pkg->seg_count);
1211 /* make sure segment array fits in package length */
1212 if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
1213 return ICE_ERR_BUF_TOO_SHORT;
1215 /* all segments must fit within length */
1216 for (i = 0; i < seg_count; i++) {
1217 u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1218 struct ice_generic_seg_hdr *seg;
1220 /* segment header must fit */
1221 if (len < off + sizeof(*seg))
1222 return ICE_ERR_BUF_TOO_SHORT;
1224 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1226 /* segment body must fit */
1227 if (len < off + LE32_TO_CPU(seg->seg_size))
1228 return ICE_ERR_BUF_TOO_SHORT;
1235 * ice_free_seg - free package segment pointer
1236 * @hw: pointer to the hardware structure
1238 * Frees the package segment pointer in the proper manner, depending on if the
1239 * segment was allocated or just the passed in pointer was stored.
1241 void ice_free_seg(struct ice_hw *hw)
1244 ice_free(hw, hw->pkg_copy);
1245 hw->pkg_copy = NULL;
1252 * ice_init_pkg_regs - initialize additional package registers
1253 * @hw: pointer to the hardware structure
1255 static void ice_init_pkg_regs(struct ice_hw *hw)
1257 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1258 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1259 #define ICE_SW_BLK_IDX 0
1261 /* setup Switch block input mask, which is 48-bits in two parts */
1262 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1263 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1267 * ice_chk_pkg_version - check package version for compatibility with driver
1268 * @pkg_ver: pointer to a version structure to check
1270 * Check to make sure that the package about to be downloaded is compatible with
1271 * the driver. To be compatible, the major and minor components of the package
1272 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1275 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1277 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1278 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1279 return ICE_ERR_NOT_SUPPORTED;
1285 * ice_init_pkg - initialize/download package
1286 * @hw: pointer to the hardware structure
1287 * @buf: pointer to the package buffer
1288 * @len: size of the package buffer
1290 * This function initializes a package. The package contains HW tables
1291 * required to do packet processing. First, the function extracts package
1292 * information such as version. Then it finds the ice configuration segment
1293 * within the package; this function then saves a copy of the segment pointer
1294 * within the supplied package buffer. Next, the function will cache any hints
1295 * from the package, followed by downloading the package itself. Note, that if
1296 * a previous PF driver has already downloaded the package successfully, then
1297 * the current driver will not have to download the package again.
1299 * The local package contents will be used to query default behavior and to
1300 * update specific sections of the HW's version of the package (e.g. to update
1301 * the parse graph to understand new protocols).
1303 * This function stores a pointer to the package buffer memory, and it is
1304 * expected that the supplied buffer will not be freed immediately. If the
1305 * package buffer needs to be freed, such as when read from a file, use
1306 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1309 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1311 struct ice_pkg_hdr *pkg;
1312 enum ice_status status;
1313 struct ice_seg *seg;
1316 return ICE_ERR_PARAM;
1318 pkg = (struct ice_pkg_hdr *)buf;
1319 status = ice_verify_pkg(pkg, len);
1321 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1326 /* initialize package info */
1327 status = ice_init_pkg_info(hw, pkg);
1331 /* before downloading the package, check package version for
1332 * compatibility with driver
1334 status = ice_chk_pkg_version(&hw->pkg_ver);
1338 /* find segment in given package */
1339 seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
1341 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1345 /* initialize package hints and then download package */
1346 ice_init_pkg_hints(hw, seg);
1347 status = ice_download_pkg(hw, seg);
1348 if (status == ICE_ERR_AQ_NO_WORK) {
1349 ice_debug(hw, ICE_DBG_INIT,
1350 "package previously loaded - no work.\n");
1351 status = ICE_SUCCESS;
1354 /* Get information on the package currently loaded in HW, then make sure
1355 * the driver is compatible with this version.
1358 status = ice_get_pkg_info(hw);
1360 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1365 /* on successful package download update other required
1366 * registers to support the package and fill HW tables
1367 * with package content.
1369 ice_init_pkg_regs(hw);
1370 ice_fill_blk_tbls(hw);
1372 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1380 * ice_copy_and_init_pkg - initialize/download a copy of the package
1381 * @hw: pointer to the hardware structure
1382 * @buf: pointer to the package buffer
1383 * @len: size of the package buffer
1385 * This function copies the package buffer, and then calls ice_init_pkg() to
1386 * initialize the copied package contents.
1388 * The copying is necessary if the package buffer supplied is constant, or if
1389 * the memory may disappear shortly after calling this function.
1391 * If the package buffer resides in the data segment and can be modified, the
1392 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1394 * However, if the package buffer needs to be copied first, such as when being
1395 * read from a file, the caller should use ice_copy_and_init_pkg().
1397 * This function will first copy the package buffer, before calling
1398 * ice_init_pkg(). The caller is free to immediately destroy the original
1399 * package buffer, as the new copy will be managed by this function and
1402 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1404 enum ice_status status;
1408 return ICE_ERR_PARAM;
1410 buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1412 status = ice_init_pkg(hw, buf_copy, len);
1414 /* Free the copy, since we failed to initialize the package */
1415 ice_free(hw, buf_copy);
1417 /* Track the copied pkg so we can free it later */
1418 hw->pkg_copy = buf_copy;
1427 * @hw: pointer to the HW structure
1429 * Allocates a package buffer and returns a pointer to the buffer header.
1430 * Note: all package contents must be in Little Endian form.
1432 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1434 struct ice_buf_build *bld;
1435 struct ice_buf_hdr *buf;
1437 bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1441 buf = (struct ice_buf_hdr *)bld;
1442 buf->data_end = CPU_TO_LE16(sizeof(*buf) -
1443 sizeof(buf->section_entry[0]));
1449 * @sect_type: section type
1450 * @section: pointer to section
1451 * @index: index of the field vector entry to be returned
1452 * @offset: ptr to variable that receives the offset in the field vector table
1454 * This is a callback function that can be passed to ice_pkg_enum_entry.
1455 * This function treats the given section as of type ice_sw_fv_section and
1456 * enumerates offset field. "offset" is an index into the field vector
1460 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1462 struct ice_sw_fv_section *fv_section =
1463 (struct ice_sw_fv_section *)section;
1465 if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1467 if (index >= LE16_TO_CPU(fv_section->count))
1470 /* "index" passed in to this function is relative to a given
1471 * 4k block. To get to the true index into the field vector
1472 * table need to add the relative index to the base_offset
1473 * field of this section
1475 *offset = LE16_TO_CPU(fv_section->base_offset) + index;
1476 return fv_section->fv + index;
1480 * ice_get_sw_prof_type - determine switch profile type
1481 * @hw: pointer to the HW structure
1482 * @fv: pointer to the switch field vector
1484 static enum ice_prof_type
1485 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
1489 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1490 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1491 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1492 fv->ew[i].off == ICE_VNI_OFFSET)
1493 return ICE_PROF_TUN_UDP;
1495 /* GRE tunnel will have GRE protocol */
1496 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1497 return ICE_PROF_TUN_GRE;
1499 /* PPPOE tunnel will have PPPOE protocol */
1500 if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
1501 return ICE_PROF_TUN_PPPOE;
1504 return ICE_PROF_NON_TUN;
1508 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1509 * @hw: pointer to hardware structure
1510 * @type: type of profiles requested
1511 * @bm: pointer to memory for returning the bitmap of field vectors
1514 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
1517 struct ice_pkg_enum state;
1518 struct ice_seg *ice_seg;
1521 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1523 if (type == ICE_PROF_ALL) {
1526 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++)
1531 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1535 enum ice_prof_type prof_type;
1538 fv = (struct ice_fv *)
1539 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1540 &offset, ice_sw_fv_handler);
1544 /* Determine field vector type */
1545 prof_type = ice_get_sw_prof_type(hw, fv);
1547 if (type & prof_type)
1548 ice_set_bit((u16)offset, bm);
1554 * ice_get_sw_fv_list
1555 * @hw: pointer to the HW structure
1556 * @prot_ids: field vector to search for with a given protocol ID
1557 * @ids_cnt: lookup/protocol count
1558 * @bm: bitmap of field vectors to consider
1559 * @fv_list: Head of a list
1561 * Finds all the field vector entries from switch block that contain
1562 * a given protocol ID and returns a list of structures of type
1563 * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1564 * definition and profile ID information
1565 * NOTE: The caller of the function is responsible for freeing the memory
1566 * allocated for every list entry.
1569 ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt,
1570 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1572 struct ice_sw_fv_list_entry *fvl;
1573 struct ice_sw_fv_list_entry *tmp;
1574 struct ice_pkg_enum state;
1575 struct ice_seg *ice_seg;
1579 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1581 if (!ids_cnt || !hw->seg)
1582 return ICE_ERR_PARAM;
1588 fv = (struct ice_fv *)
1589 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1590 &offset, ice_sw_fv_handler);
1595 /* If field vector is not in the bitmap list, then skip this
1598 if (!ice_is_bit_set(bm, (u16)offset))
1601 for (i = 0; i < ids_cnt; i++) {
1604 /* This code assumes that if a switch field vector line
1605 * has a matching protocol, then this line will contain
1606 * the entries necessary to represent every field in
1607 * that protocol header.
1609 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1610 if (fv->ew[j].prot_id == prot_ids[i])
1612 if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1614 if (i + 1 == ids_cnt) {
1615 fvl = (struct ice_sw_fv_list_entry *)
1616 ice_malloc(hw, sizeof(*fvl));
1620 fvl->profile_id = offset;
1621 LIST_ADD(&fvl->list_entry, fv_list);
1626 if (LIST_EMPTY(fv_list))
1631 LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1633 LIST_DEL(&fvl->list_entry);
1637 return ICE_ERR_NO_MEMORY;
1641 * ice_init_prof_result_bm - Initialize the profile result index bitmap
1642 * @hw: pointer to hardware structure
1644 void ice_init_prof_result_bm(struct ice_hw *hw)
1646 struct ice_pkg_enum state;
1647 struct ice_seg *ice_seg;
1650 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1660 fv = (struct ice_fv *)
1661 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1662 &off, ice_sw_fv_handler);
1667 ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1670 /* Determine empty field vector indices, these can be
1671 * used for recipe results. Skip index 0, since it is
1672 * always used for Switch ID.
1674 for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1675 if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1676 fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1678 hw->switch_info->prof_res_bm[off]);
1684 * @hw: pointer to the HW structure
1685 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1687 * Frees a package buffer
1689 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1695 * ice_pkg_buf_reserve_section
1696 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1697 * @count: the number of sections to reserve
1699 * Reserves one or more section table entries in a package buffer. This routine
1700 * can be called multiple times as long as they are made before calling
1701 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1702 * is called once, the number of sections that can be allocated will not be able
1703 * to be increased; not using all reserved sections is fine, but this will
1704 * result in some wasted space in the buffer.
1705 * Note: all package contents must be in Little Endian form.
1707 static enum ice_status
1708 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1710 struct ice_buf_hdr *buf;
1715 return ICE_ERR_PARAM;
1717 buf = (struct ice_buf_hdr *)&bld->buf;
1719 /* already an active section, can't increase table size */
1720 section_count = LE16_TO_CPU(buf->section_count);
1721 if (section_count > 0)
1724 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1726 bld->reserved_section_table_entries += count;
1728 data_end = LE16_TO_CPU(buf->data_end) +
1729 (count * sizeof(buf->section_entry[0]));
1730 buf->data_end = CPU_TO_LE16(data_end);
1736 * ice_pkg_buf_alloc_section
1737 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1738 * @type: the section type value
1739 * @size: the size of the section to reserve (in bytes)
1741 * Reserves memory in the buffer for a section's content and updates the
1742 * buffers' status accordingly. This routine returns a pointer to the first
1743 * byte of the section start within the buffer, which is used to fill in the
1745 * Note: all package contents must be in Little Endian form.
1748 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1750 struct ice_buf_hdr *buf;
1754 if (!bld || !type || !size)
1757 buf = (struct ice_buf_hdr *)&bld->buf;
1759 /* check for enough space left in buffer */
1760 data_end = LE16_TO_CPU(buf->data_end);
1762 /* section start must align on 4 byte boundary */
1763 data_end = ICE_ALIGN(data_end, 4);
1765 if ((data_end + size) > ICE_MAX_S_DATA_END)
1768 /* check for more available section table entries */
1769 sect_count = LE16_TO_CPU(buf->section_count);
1770 if (sect_count < bld->reserved_section_table_entries) {
1771 void *section_ptr = ((u8 *)buf) + data_end;
1773 buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
1774 buf->section_entry[sect_count].size = CPU_TO_LE16(size);
1775 buf->section_entry[sect_count].type = CPU_TO_LE32(type);
1778 buf->data_end = CPU_TO_LE16(data_end);
1780 buf->section_count = CPU_TO_LE16(sect_count + 1);
1784 /* no free section table entries */
1789 * ice_pkg_buf_get_active_sections
1790 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1792 * Returns the number of active sections. Before using the package buffer
1793 * in an update package command, the caller should make sure that there is at
1794 * least one active section - otherwise, the buffer is not legal and should
1796 * Note: all package contents must be in Little Endian form.
1798 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1800 struct ice_buf_hdr *buf;
1805 buf = (struct ice_buf_hdr *)&bld->buf;
1806 return LE16_TO_CPU(buf->section_count);
1810 * ice_pkg_buf_header
1811 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1813 * Return a pointer to the buffer's header
1815 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1824 * ice_tunnel_port_in_use
1825 * @hw: pointer to the HW structure
1826 * @port: port to search for
1827 * @index: optionally returns index
1829 * Returns whether a port is already in use as a tunnel, and optionally its
1832 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
1836 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1837 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
1847 * ice_tunnel_get_type
1848 * @hw: pointer to the HW structure
1849 * @port: port to search for
1850 * @type: returns tunnel index
1852 * For a given port number, will return the type of tunnel.
1855 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
1859 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1860 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
1861 *type = hw->tnl.tbl[i].type;
1869 * ice_find_free_tunnel_entry
1870 * @hw: pointer to the HW structure
1871 * @type: tunnel type
1872 * @index: optionally returns index
1874 * Returns whether there is a free tunnel entry, and optionally its index
1877 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1882 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1883 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
1884 hw->tnl.tbl[i].type == type) {
1894 * ice_get_tunnel_port - retrieve an open tunnel port
1895 * @hw: pointer to the HW structure
1896 * @type: tunnel type (TNL_ALL will return any open port)
1897 * @port: returns open port
1900 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
1905 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1906 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
1907 (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
1908 *port = hw->tnl.tbl[i].port;
1917 * @hw: pointer to the HW structure
1918 * @type: type of tunnel
1919 * @port: port to use for vxlan tunnel
1924 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
1926 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1927 enum ice_status status = ICE_ERR_MAX_LIMIT;
1928 struct ice_buf_build *bld;
1931 if (ice_tunnel_port_in_use(hw, port, NULL))
1932 return ICE_ERR_ALREADY_EXISTS;
1934 if (!ice_find_free_tunnel_entry(hw, type, &index))
1935 return ICE_ERR_OUT_OF_RANGE;
1937 bld = ice_pkg_buf_alloc(hw);
1939 return ICE_ERR_NO_MEMORY;
1941 /* allocate 2 sections, one for Rx parser, one for Tx parser */
1942 if (ice_pkg_buf_reserve_section(bld, 2))
1943 goto ice_create_tunnel_err;
1945 sect_rx = (struct ice_boost_tcam_section *)
1946 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1949 goto ice_create_tunnel_err;
1950 sect_rx->count = CPU_TO_LE16(1);
1952 sect_tx = (struct ice_boost_tcam_section *)
1953 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1956 goto ice_create_tunnel_err;
1957 sect_tx->count = CPU_TO_LE16(1);
1959 /* copy original boost entry to update package buffer */
1960 ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1961 sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
1963 /* over-write the never-match dest port key bits with the encoded port
1966 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
1967 (u8 *)&port, NULL, NULL, NULL,
1968 offsetof(struct ice_boost_key_value, hv_dst_port_key),
1969 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
1971 /* exact copy of entry to Tx section entry */
1972 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
1973 ICE_NONDMA_TO_NONDMA);
1975 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1977 hw->tnl.tbl[index].port = port;
1978 hw->tnl.tbl[index].in_use = true;
1981 ice_create_tunnel_err:
1982 ice_pkg_buf_free(hw, bld);
1988 * ice_destroy_tunnel
1989 * @hw: pointer to the HW structure
1990 * @port: port of tunnel to destroy (ignored if the all parameter is true)
1991 * @all: flag that states to destroy all tunnels
1993 * Destroys a tunnel or all tunnels by creating an update package buffer
1994 * targeting the specific updates requested and then performing an update
1997 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
1999 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2000 enum ice_status status = ICE_ERR_MAX_LIMIT;
2001 struct ice_buf_build *bld;
2006 /* determine count */
2007 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2008 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2009 (all || hw->tnl.tbl[i].port == port))
2013 return ICE_ERR_PARAM;
2015 /* size of section - there is at least one entry */
2016 size = (count - 1) * sizeof(*sect_rx->tcam) + sizeof(*sect_rx);
2018 bld = ice_pkg_buf_alloc(hw);
2020 return ICE_ERR_NO_MEMORY;
2022 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2023 if (ice_pkg_buf_reserve_section(bld, 2))
2024 goto ice_destroy_tunnel_err;
2026 sect_rx = (struct ice_boost_tcam_section *)
2027 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2030 goto ice_destroy_tunnel_err;
2031 sect_rx->count = CPU_TO_LE16(1);
2033 sect_tx = (struct ice_boost_tcam_section *)
2034 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2037 goto ice_destroy_tunnel_err;
2038 sect_tx->count = CPU_TO_LE16(1);
2040 /* copy original boost entry to update package buffer, one copy to Rx
2041 * section, another copy to the Tx section
2043 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2044 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2045 (all || hw->tnl.tbl[i].port == port)) {
2046 ice_memcpy(sect_rx->tcam + i,
2047 hw->tnl.tbl[i].boost_entry,
2048 sizeof(*sect_rx->tcam),
2049 ICE_NONDMA_TO_NONDMA);
2050 ice_memcpy(sect_tx->tcam + i,
2051 hw->tnl.tbl[i].boost_entry,
2052 sizeof(*sect_tx->tcam),
2053 ICE_NONDMA_TO_NONDMA);
2054 hw->tnl.tbl[i].marked = true;
2057 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2059 for (i = 0; i < hw->tnl.count &&
2060 i < ICE_TUNNEL_MAX_ENTRIES; i++)
2061 if (hw->tnl.tbl[i].marked) {
2062 hw->tnl.tbl[i].port = 0;
2063 hw->tnl.tbl[i].in_use = false;
2064 hw->tnl.tbl[i].marked = false;
2067 ice_destroy_tunnel_err:
2068 ice_pkg_buf_free(hw, bld);
2074 * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
2075 * @hw: pointer to the hardware structure
2076 * @blk: hardware block
2078 * @fv_idx: field vector word index
2079 * @prot: variable to receive the protocol ID
2080 * @off: variable to receive the protocol offset
2083 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u8 fv_idx,
2086 struct ice_fv_word *fv_ext;
2088 if (prof >= hw->blk[blk].es.count)
2089 return ICE_ERR_PARAM;
2091 if (fv_idx >= hw->blk[blk].es.fvw)
2092 return ICE_ERR_PARAM;
2094 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2096 *prot = fv_ext[fv_idx].prot_id;
2097 *off = fv_ext[fv_idx].off;
2102 /* PTG Management */
2105 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
2106 * @hw: pointer to the hardware structure
2108 * @ptype: the ptype to search for
2109 * @ptg: pointer to variable that receives the PTG
2111 * This function will search the PTGs for a particular ptype, returning the
2112 * PTG ID that contains it through the ptg parameter, with the value of
2113 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
2115 static enum ice_status
2116 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2118 if (ptype >= ICE_XLT1_CNT || !ptg)
2119 return ICE_ERR_PARAM;
2121 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2126 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
2127 * @hw: pointer to the hardware structure
2129 * @ptg: the ptg to allocate
2131 * This function allocates a given packet type group ID specified by the ptg
2135 void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2137 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2141 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
2142 * @hw: pointer to the hardware structure
2144 * @ptype: the ptype to remove
2145 * @ptg: the ptg to remove the ptype from
2147 * This function will remove the ptype from the specific ptg, and move it to
2148 * the default PTG (ICE_DEFAULT_PTG).
2150 static enum ice_status
2151 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2153 struct ice_ptg_ptype **ch;
2154 struct ice_ptg_ptype *p;
2156 if (ptype > ICE_XLT1_CNT - 1)
2157 return ICE_ERR_PARAM;
2159 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2160 return ICE_ERR_DOES_NOT_EXIST;
2162 /* Should not happen if .in_use is set, bad config */
2163 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2166 /* find the ptype within this PTG, and bypass the link over it */
2167 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2168 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2170 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2171 *ch = p->next_ptype;
2175 ch = &p->next_ptype;
2179 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2180 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2186 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
2187 * @hw: pointer to the hardware structure
2189 * @ptype: the ptype to add or move
2190 * @ptg: the ptg to add or move the ptype to
2192 * This function will either add or move a ptype to a particular PTG depending
2193 * on if the ptype is already part of another group. Note that using a
2194 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
2197 static enum ice_status
2198 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2200 enum ice_status status;
2203 if (ptype > ICE_XLT1_CNT - 1)
2204 return ICE_ERR_PARAM;
2206 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2207 return ICE_ERR_DOES_NOT_EXIST;
2209 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2213 /* Is ptype already in the correct PTG? */
2214 if (original_ptg == ptg)
2217 /* Remove from original PTG and move back to the default PTG */
2218 if (original_ptg != ICE_DEFAULT_PTG)
2219 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2221 /* Moving to default PTG? Then we're done with this request */
2222 if (ptg == ICE_DEFAULT_PTG)
2225 /* Add ptype to PTG at beginning of list */
2226 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2227 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2228 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2229 &hw->blk[blk].xlt1.ptypes[ptype];
2231 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2232 hw->blk[blk].xlt1.t[ptype] = ptg;
2237 /* Block / table size info */
2238 struct ice_blk_size_details {
2239 u16 xlt1; /* # XLT1 entries */
2240 u16 xlt2; /* # XLT2 entries */
2241 u16 prof_tcam; /* # profile ID TCAM entries */
2242 u16 prof_id; /* # profile IDs */
2243 u8 prof_cdid_bits; /* # cdid one-hot bits used in key */
2244 u16 prof_redir; /* # profile redirection entries */
2245 u16 es; /* # extraction sequence entries */
2246 u16 fvw; /* # field vector words */
2247 u8 overwrite; /* overwrite existing entries allowed */
2248 u8 reverse; /* reverse FV order */
2251 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2254 * XLT1 - Number of entries in XLT1 table
2255 * XLT2 - Number of entries in XLT2 table
2256 * TCAM - Number of entries Profile ID TCAM table
2257 * CDID - Control Domain ID of the hardware block
2258 * PRED - Number of entries in the Profile Redirection Table
2259 * FV - Number of entries in the Field Vector
2260 * FVW - Width (in WORDs) of the Field Vector
2261 * OVR - Overwrite existing table entries
2264 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
2265 /* Overwrite , Reverse FV */
2266 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2268 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2270 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2272 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2274 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2279 ICE_SID_XLT1_OFF = 0,
2282 ICE_SID_PR_REDIR_OFF,
2287 /* Characteristic handling */
2290 * ice_match_prop_lst - determine if properties of two lists match
2291 * @list1: first properties list
2292 * @list2: second properties list
2294 * Count, cookies and the order must match in order to be considered equivalent.
2297 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
2299 struct ice_vsig_prof *tmp1;
2300 struct ice_vsig_prof *tmp2;
2304 /* compare counts */
2305 LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) {
2308 LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) {
2311 if (!count || count != chk_count)
2314 tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
2315 tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
2317 /* profile cookies must compare, and in the exact same order to take
2318 * into account priority
2321 if (tmp2->profile_cookie != tmp1->profile_cookie)
2324 tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
2325 tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
2331 /* VSIG Management */
2334 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2335 * @hw: pointer to the hardware structure
2337 * @vsi: VSI of interest
2338 * @vsig: pointer to receive the VSI group
2340 * This function will lookup the VSI entry in the XLT2 list and return
2341 * the VSI group its associated with.
2344 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2346 if (!vsig || vsi >= ICE_MAX_VSI)
2347 return ICE_ERR_PARAM;
2349 /* As long as there's a default or valid VSIG associated with the input
2350 * VSI, the functions returns a success. Any handling of VSIG will be
2351 * done by the following add, update or remove functions.
2353 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2359 * ice_vsig_alloc_val - allocate a new VSIG by value
2360 * @hw: pointer to the hardware structure
2362 * @vsig: the vsig to allocate
2364 * This function will allocate a given VSIG specified by the vsig parameter.
2366 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2368 u16 idx = vsig & ICE_VSIG_IDX_M;
2370 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2371 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2372 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2375 return ICE_VSIG_VALUE(idx, hw->pf_id);
2379 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2380 * @hw: pointer to the hardware structure
2383 * This function will iterate through the VSIG list and mark the first
2384 * unused entry for the new VSIG entry as used and return that value.
2386 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2390 for (i = 1; i < ICE_MAX_VSIGS; i++)
2391 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2392 return ice_vsig_alloc_val(hw, blk, i);
2394 return ICE_DEFAULT_VSIG;
2398 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2399 * @hw: pointer to the hardware structure
2401 * @chs: characteristic list
2402 * @vsig: returns the VSIG with the matching profiles, if found
2404 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2405 * a group have the same characteristic set. To check if there exists a VSIG
2406 * which has the same characteristics as the input characteristics; this
2407 * function will iterate through the XLT2 list and return the VSIG that has a
2408 * matching configuration. In order to make sure that priorities are accounted
2409 * for, the list must match exactly, including the order in which the
2410 * characteristics are listed.
2412 static enum ice_status
2413 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2414 struct LIST_HEAD_TYPE *chs, u16 *vsig)
2416 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2419 for (i = 0; i < xlt2->count; i++) {
2420 if (xlt2->vsig_tbl[i].in_use &&
2421 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2422 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2427 return ICE_ERR_DOES_NOT_EXIST;
2431 * ice_vsig_free - free VSI group
2432 * @hw: pointer to the hardware structure
2434 * @vsig: VSIG to remove
2436 * The function will remove all VSIs associated with the input VSIG and move
2437 * them to the DEFAULT_VSIG and mark the VSIG available.
2439 static enum ice_status
2440 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2442 struct ice_vsig_prof *dtmp, *del;
2443 struct ice_vsig_vsi *vsi_cur;
2446 idx = vsig & ICE_VSIG_IDX_M;
2447 if (idx >= ICE_MAX_VSIGS)
2448 return ICE_ERR_PARAM;
2450 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2451 return ICE_ERR_DOES_NOT_EXIST;
2453 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2455 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2456 /* If the VSIG has at least 1 VSI then iterate through the
2457 * list and remove the VSIs before deleting the group.
2460 /* remove all vsis associated with this VSIG XLT2 entry */
2462 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2464 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2465 vsi_cur->changed = 1;
2466 vsi_cur->next_vsi = NULL;
2470 /* NULL terminate head of VSI list */
2471 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2474 /* free characteristic list */
2475 LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
2476 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2477 ice_vsig_prof, list) {
2478 LIST_DEL(&del->list);
2482 /* if VSIG characteristic list was cleared for reset
2483 * re-initialize the list head
2485 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2491 * ice_vsig_remove_vsi - remove VSI from VSIG
2492 * @hw: pointer to the hardware structure
2494 * @vsi: VSI to remove
2495 * @vsig: VSI group to remove from
2497 * The function will remove the input VSI from its VSI group and move it
2498 * to the DEFAULT_VSIG.
2500 static enum ice_status
2501 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2503 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2506 idx = vsig & ICE_VSIG_IDX_M;
2508 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2509 return ICE_ERR_PARAM;
2511 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2512 return ICE_ERR_DOES_NOT_EXIST;
2514 /* entry already in default VSIG, don't have to remove */
2515 if (idx == ICE_DEFAULT_VSIG)
2518 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2522 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2523 vsi_cur = (*vsi_head);
2525 /* iterate the VSI list, skip over the entry to be removed */
2527 if (vsi_tgt == vsi_cur) {
2528 (*vsi_head) = vsi_cur->next_vsi;
2531 vsi_head = &vsi_cur->next_vsi;
2532 vsi_cur = vsi_cur->next_vsi;
2535 /* verify if VSI was removed from group list */
2537 return ICE_ERR_DOES_NOT_EXIST;
2539 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2540 vsi_cur->changed = 1;
2541 vsi_cur->next_vsi = NULL;
2547 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2548 * @hw: pointer to the hardware structure
2551 * @vsig: destination VSI group
2553 * This function will move or add the input VSI to the target VSIG.
2554 * The function will find the original VSIG the VSI belongs to and
2555 * move the entry to the DEFAULT_VSIG, update the original VSIG and
2556 * then move entry to the new VSIG.
2558 static enum ice_status
2559 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2561 struct ice_vsig_vsi *tmp;
2562 enum ice_status status;
2565 idx = vsig & ICE_VSIG_IDX_M;
2567 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2568 return ICE_ERR_PARAM;
2570 /* if VSIG not in use and VSIG is not default type this VSIG
2573 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2574 vsig != ICE_DEFAULT_VSIG)
2575 return ICE_ERR_DOES_NOT_EXIST;
2577 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2581 /* no update required if vsigs match */
2582 if (orig_vsig == vsig)
2585 if (orig_vsig != ICE_DEFAULT_VSIG) {
2586 /* remove entry from orig_vsig and add to default VSIG */
2587 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2592 if (idx == ICE_DEFAULT_VSIG)
2595 /* Create VSI entry and add VSIG and prop_mask values */
2596 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2597 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2599 /* Add new entry to the head of the VSIG list */
2600 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2601 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2602 &hw->blk[blk].xlt2.vsis[vsi];
2603 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2604 hw->blk[blk].xlt2.t[vsi] = vsig;
2610 * ice_prof_has_mask_idx - determine if profile index masking is identical
2611 * @hw: pointer to the hardware structure
2613 * @prof: profile to check
2614 * @idx: profile index to check
2615 * @masks: masks to match
2618 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
2621 bool expect_no_mask = false;
2626 /* If mask is 0x0000 or 0xffff, then there is no masking */
2627 if (mask == 0 || mask == 0xffff)
2628 expect_no_mask = true;
2630 /* Scan the enabled masks on this profile, for the specified idx */
2631 for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
2632 hw->blk[blk].masks.count; i++)
2633 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
2634 if (hw->blk[blk].masks.masks[i].in_use &&
2635 hw->blk[blk].masks.masks[i].idx == idx) {
2637 if (hw->blk[blk].masks.masks[i].mask == mask)
2642 if (expect_no_mask) {
2654 * ice_prof_has_mask - determine if profile masking is identical
2655 * @hw: pointer to the hardware structure
2657 * @prof: profile to check
2658 * @masks: masks to match
2661 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
2665 /* es->mask_ena[prof] will have the mask */
2666 for (i = 0; i < hw->blk[blk].es.fvw; i++)
2667 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
2674 * ice_find_prof_id_with_mask - find profile ID for a given field vector
2675 * @hw: pointer to the hardware structure
2677 * @fv: field vector to search for
2678 * @masks: masks for fv
2679 * @prof_id: receives the profile ID
2681 static enum ice_status
2682 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
2683 struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
2685 struct ice_es *es = &hw->blk[blk].es;
2688 for (i = 0; i < es->count; i++) {
2689 u16 off = i * es->fvw;
2691 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2694 /* check if masks settings are the same for this profile */
2695 if (!ice_prof_has_mask(hw, blk, i, masks))
2702 return ICE_ERR_DOES_NOT_EXIST;
2706 * ice_find_prof_id - find profile ID for a given field vector
2707 * @hw: pointer to the hardware structure
2709 * @fv: field vector to search for
2710 * @prof_id: receives the profile ID
2712 static enum ice_status
2713 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
2714 struct ice_fv_word *fv, u8 *prof_id)
2716 struct ice_es *es = &hw->blk[blk].es;
2719 for (i = 0; i < es->count; i++) {
2722 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2729 return ICE_ERR_DOES_NOT_EXIST;
2733 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
2734 * @blk: the block type
2735 * @rsrc_type: pointer to variable to receive the resource type
2737 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2741 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
2744 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
2747 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2750 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2753 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
2762 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
2763 * @blk: the block type
2764 * @rsrc_type: pointer to variable to receive the resource type
2766 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2770 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
2773 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
2776 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2779 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2782 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
2791 * ice_alloc_tcam_ent - allocate hardware TCAM entry
2792 * @hw: pointer to the HW struct
2793 * @blk: the block to allocate the TCAM for
2794 * @tcam_idx: pointer to variable to receive the TCAM entry
2796 * This function allocates a new entry in a Profile ID TCAM for a specific
2799 static enum ice_status
2800 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
2804 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2805 return ICE_ERR_PARAM;
2807 return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
2811 * ice_free_tcam_ent - free hardware TCAM entry
2812 * @hw: pointer to the HW struct
2813 * @blk: the block from which to free the TCAM entry
2814 * @tcam_idx: the TCAM entry to free
2816 * This function frees an entry in a Profile ID TCAM for a specific block.
2818 static enum ice_status
2819 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2823 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2824 return ICE_ERR_PARAM;
2826 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2830 * ice_alloc_prof_id - allocate profile ID
2831 * @hw: pointer to the HW struct
2832 * @blk: the block to allocate the profile ID for
2833 * @prof_id: pointer to variable to receive the profile ID
2835 * This function allocates a new profile ID, which also corresponds to a Field
2836 * Vector (Extraction Sequence) entry.
2838 static enum ice_status
2839 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2841 enum ice_status status;
2845 if (!ice_prof_id_rsrc_type(blk, &res_type))
2846 return ICE_ERR_PARAM;
2848 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2850 *prof_id = (u8)get_prof;
2856 * ice_free_prof_id - free profile ID
2857 * @hw: pointer to the HW struct
2858 * @blk: the block from which to free the profile ID
2859 * @prof_id: the profile ID to free
2861 * This function frees a profile ID, which also corresponds to a Field Vector.
2863 static enum ice_status
2864 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2866 u16 tmp_prof_id = (u16)prof_id;
2869 if (!ice_prof_id_rsrc_type(blk, &res_type))
2870 return ICE_ERR_PARAM;
2872 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2876 * ice_prof_inc_ref - increment reference count for profile
2877 * @hw: pointer to the HW struct
2878 * @blk: the block from which to free the profile ID
2879 * @prof_id: the profile ID for which to increment the reference count
2881 static enum ice_status
2882 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2884 if (prof_id > hw->blk[blk].es.count)
2885 return ICE_ERR_PARAM;
2887 hw->blk[blk].es.ref_count[prof_id]++;
2893 * ice_write_prof_mask_reg - write profile mask register
2894 * @hw: pointer to the HW struct
2895 * @blk: hardware block
2896 * @mask_idx: mask index
2897 * @idx: index of the FV which will use the mask
2898 * @mask: the 16-bit mask
2901 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
2909 offset = GLQF_HMASK(mask_idx);
2910 val = (idx << GLQF_HMASK_MSK_INDEX_S) &
2911 GLQF_HMASK_MSK_INDEX_M;
2912 val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
2915 offset = GLQF_FDMASK(mask_idx);
2916 val = (idx << GLQF_FDMASK_MSK_INDEX_S) &
2917 GLQF_FDMASK_MSK_INDEX_M;
2918 val |= (mask << GLQF_FDMASK_MASK_S) &
2922 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2927 wr32(hw, offset, val);
2928 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
2929 blk, idx, offset, val);
2933 * ice_write_prof_mask_enable_res - write profile mask enable register
2934 * @hw: pointer to the HW struct
2935 * @blk: hardware block
2936 * @prof_id: profile ID
2937 * @enable_mask: enable mask
2940 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
2941 u16 prof_id, u32 enable_mask)
2947 offset = GLQF_HMASK_SEL(prof_id);
2950 offset = GLQF_FDMASK_SEL(prof_id);
2953 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2958 wr32(hw, offset, enable_mask);
2959 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
2960 blk, prof_id, offset, enable_mask);
2964 * ice_init_prof_masks - initial prof masks
2965 * @hw: pointer to the HW struct
2966 * @blk: hardware block
2968 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
2973 ice_init_lock(&hw->blk[blk].masks.lock);
2975 per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
2977 hw->blk[blk].masks.count = per_pf;
2978 hw->blk[blk].masks.first = hw->pf_id * per_pf;
2980 ice_memset(hw->blk[blk].masks.masks, 0,
2981 sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM);
2983 for (i = hw->blk[blk].masks.first;
2984 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
2985 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
2989 * ice_init_all_prof_masks - initial all prof masks
2990 * @hw: pointer to the HW struct
2992 void ice_init_all_prof_masks(struct ice_hw *hw)
2994 ice_init_prof_masks(hw, ICE_BLK_RSS);
2995 ice_init_prof_masks(hw, ICE_BLK_FD);
2999 * ice_alloc_prof_mask - allocate profile mask
3000 * @hw: pointer to the HW struct
3001 * @blk: hardware block
3002 * @idx: index of FV which will use the mask
3003 * @mask: the 16-bit mask
3004 * @mask_idx: variable to receive the mask index
3006 static enum ice_status
3007 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
3010 bool found_unused = false, found_copy = false;
3011 enum ice_status status = ICE_ERR_MAX_LIMIT;
3012 u16 unused_idx = 0, copy_idx = 0;
3015 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3016 return ICE_ERR_PARAM;
3018 ice_acquire_lock(&hw->blk[blk].masks.lock);
3020 for (i = hw->blk[blk].masks.first;
3021 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3022 if (hw->blk[blk].masks.masks[i].in_use) {
3023 /* if mask is in use and it exactly duplicates the
3024 * desired mask and index, then in can be reused
3026 if (hw->blk[blk].masks.masks[i].mask == mask &&
3027 hw->blk[blk].masks.masks[i].idx == idx) {
3033 /* save off unused index, but keep searching in case
3034 * there is an exact match later on
3036 if (!found_unused) {
3037 found_unused = true;
3044 else if (found_unused)
3047 goto err_ice_alloc_prof_mask;
3049 /* update mask for a new entry */
3051 hw->blk[blk].masks.masks[i].in_use = true;
3052 hw->blk[blk].masks.masks[i].mask = mask;
3053 hw->blk[blk].masks.masks[i].idx = idx;
3054 hw->blk[blk].masks.masks[i].ref = 0;
3055 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
3058 hw->blk[blk].masks.masks[i].ref++;
3060 status = ICE_SUCCESS;
3062 err_ice_alloc_prof_mask:
3063 ice_release_lock(&hw->blk[blk].masks.lock);
3069 * ice_free_prof_mask - free profile mask
3070 * @hw: pointer to the HW struct
3071 * @blk: hardware block
3072 * @mask_idx: index of mask
3074 static enum ice_status
3075 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
3077 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3078 return ICE_ERR_PARAM;
3080 if (!(mask_idx >= hw->blk[blk].masks.first &&
3081 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
3082 return ICE_ERR_DOES_NOT_EXIST;
3084 ice_acquire_lock(&hw->blk[blk].masks.lock);
3086 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
3087 goto exit_ice_free_prof_mask;
3089 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
3090 hw->blk[blk].masks.masks[mask_idx].ref--;
3091 goto exit_ice_free_prof_mask;
3095 hw->blk[blk].masks.masks[mask_idx].in_use = false;
3096 hw->blk[blk].masks.masks[mask_idx].mask = 0;
3097 hw->blk[blk].masks.masks[mask_idx].idx = 0;
3099 /* update mask as unused entry */
3100 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d", blk, mask_idx);
3101 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
3103 exit_ice_free_prof_mask:
3104 ice_release_lock(&hw->blk[blk].masks.lock);
3110 * ice_free_prof_masks - free all profile masks for a profile
3111 * @hw: pointer to the HW struct
3112 * @blk: hardware block
3113 * @prof_id: profile ID
3115 static enum ice_status
3116 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
3121 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3122 return ICE_ERR_PARAM;
3124 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
3125 for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
3126 if (mask_bm & BIT(i))
3127 ice_free_prof_mask(hw, blk, i);
3133 * ice_shutdown_prof_masks - releases lock for masking
3134 * @hw: pointer to the HW struct
3135 * @blk: hardware block
3137 * This should be called before unloading the driver
3139 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
3143 ice_acquire_lock(&hw->blk[blk].masks.lock);
3145 for (i = hw->blk[blk].masks.first;
3146 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
3147 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3149 hw->blk[blk].masks.masks[i].in_use = false;
3150 hw->blk[blk].masks.masks[i].idx = 0;
3151 hw->blk[blk].masks.masks[i].mask = 0;
3154 ice_release_lock(&hw->blk[blk].masks.lock);
3155 ice_destroy_lock(&hw->blk[blk].masks.lock);
3159 * ice_shutdown_all_prof_masks - releases all locks for masking
3160 * @hw: pointer to the HW struct
3161 * @blk: hardware block
3163 * This should be called before unloading the driver
3165 void ice_shutdown_all_prof_masks(struct ice_hw *hw)
3167 ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
3168 ice_shutdown_prof_masks(hw, ICE_BLK_FD);
3172 * ice_update_prof_masking - set registers according to masking
3173 * @hw: pointer to the HW struct
3174 * @blk: hardware block
3175 * @prof_id: profile ID
3179 static enum ice_status
3180 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
3181 struct ice_fv_word *es, u16 *masks)
3188 /* Only support FD and RSS masking, otherwise nothing to be done */
3189 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3192 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3193 if (masks[i] && masks[i] != 0xFFFF) {
3194 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
3195 ena_mask |= BIT(idx);
3197 /* not enough bitmaps */
3204 /* free any bitmaps we have allocated */
3205 for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
3206 if (ena_mask & BIT(i))
3207 ice_free_prof_mask(hw, blk, i);
3209 return ICE_ERR_OUT_OF_RANGE;
3212 /* enable the masks for this profile */
3213 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
3215 /* store enabled masks with profile so that they can be freed later */
3216 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
3222 * ice_write_es - write an extraction sequence to hardware
3223 * @hw: pointer to the HW struct
3224 * @blk: the block in which to write the extraction sequence
3225 * @prof_id: the profile ID to write
3226 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
3229 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3230 struct ice_fv_word *fv)
3234 off = prof_id * hw->blk[blk].es.fvw;
3236 ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
3237 sizeof(*fv), ICE_NONDMA_MEM);
3238 hw->blk[blk].es.written[prof_id] = false;
3240 ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
3241 sizeof(*fv), ICE_NONDMA_TO_NONDMA);
3246 * ice_prof_dec_ref - decrement reference count for profile
3247 * @hw: pointer to the HW struct
3248 * @blk: the block from which to free the profile ID
3249 * @prof_id: the profile ID for which to decrement the reference count
3251 static enum ice_status
3252 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3254 if (prof_id > hw->blk[blk].es.count)
3255 return ICE_ERR_PARAM;
3257 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3258 if (!--hw->blk[blk].es.ref_count[prof_id]) {
3259 ice_write_es(hw, blk, prof_id, NULL);
3260 ice_free_prof_masks(hw, blk, prof_id);
3261 return ice_free_prof_id(hw, blk, prof_id);
3268 /* Block / table section IDs */
3269 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3273 ICE_SID_PROFID_TCAM_SW,
3274 ICE_SID_PROFID_REDIR_SW,
3281 ICE_SID_PROFID_TCAM_ACL,
3282 ICE_SID_PROFID_REDIR_ACL,
3289 ICE_SID_PROFID_TCAM_FD,
3290 ICE_SID_PROFID_REDIR_FD,
3297 ICE_SID_PROFID_TCAM_RSS,
3298 ICE_SID_PROFID_REDIR_RSS,
3305 ICE_SID_PROFID_TCAM_PE,
3306 ICE_SID_PROFID_REDIR_PE,
3312 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3313 * @hw: pointer to the hardware structure
3314 * @blk: the HW block to initialize
3317 void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3321 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3324 ptg = hw->blk[blk].xlt1.t[pt];
3325 if (ptg != ICE_DEFAULT_PTG) {
3326 ice_ptg_alloc_val(hw, blk, ptg);
3327 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3333 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3334 * @hw: pointer to the hardware structure
3335 * @blk: the HW block to initialize
3337 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3341 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3344 vsig = hw->blk[blk].xlt2.t[vsi];
3346 ice_vsig_alloc_val(hw, blk, vsig);
3347 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3348 /* no changes at this time, since this has been
3349 * initialized from the original package
3351 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3357 * ice_init_sw_db - init software database from HW tables
3358 * @hw: pointer to the hardware structure
3360 static void ice_init_sw_db(struct ice_hw *hw)
3364 for (i = 0; i < ICE_BLK_COUNT; i++) {
3365 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3366 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3371 * ice_fill_tbl - Reads content of a single table type into database
3372 * @hw: pointer to the hardware structure
3373 * @block_id: Block ID of the table to copy
3374 * @sid: Section ID of the table to copy
3376 * Will attempt to read the entire content of a given table of a single block
3377 * into the driver database. We assume that the buffer will always
3378 * be as large or larger than the data contained in the package. If
3379 * this condition is not met, there is most likely an error in the package
3382 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3384 u32 dst_len, sect_len, offset = 0;
3385 struct ice_prof_redir_section *pr;
3386 struct ice_prof_id_section *pid;
3387 struct ice_xlt1_section *xlt1;
3388 struct ice_xlt2_section *xlt2;
3389 struct ice_sw_fv_section *es;
3390 struct ice_pkg_enum state;
3394 /* if the HW segment pointer is null then the first iteration of
3395 * ice_pkg_enum_section() will fail. In this case the HW tables will
3396 * not be filled and return success.
3399 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3403 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
3405 sect = ice_pkg_enum_section(hw->seg, &state, sid);
3409 case ICE_SID_XLT1_SW:
3410 case ICE_SID_XLT1_FD:
3411 case ICE_SID_XLT1_RSS:
3412 case ICE_SID_XLT1_ACL:
3413 case ICE_SID_XLT1_PE:
3414 xlt1 = (struct ice_xlt1_section *)sect;
3416 sect_len = LE16_TO_CPU(xlt1->count) *
3417 sizeof(*hw->blk[block_id].xlt1.t);
3418 dst = hw->blk[block_id].xlt1.t;
3419 dst_len = hw->blk[block_id].xlt1.count *
3420 sizeof(*hw->blk[block_id].xlt1.t);
3422 case ICE_SID_XLT2_SW:
3423 case ICE_SID_XLT2_FD:
3424 case ICE_SID_XLT2_RSS:
3425 case ICE_SID_XLT2_ACL:
3426 case ICE_SID_XLT2_PE:
3427 xlt2 = (struct ice_xlt2_section *)sect;
3428 src = (_FORCE_ u8 *)xlt2->value;
3429 sect_len = LE16_TO_CPU(xlt2->count) *
3430 sizeof(*hw->blk[block_id].xlt2.t);
3431 dst = (u8 *)hw->blk[block_id].xlt2.t;
3432 dst_len = hw->blk[block_id].xlt2.count *
3433 sizeof(*hw->blk[block_id].xlt2.t);
3435 case ICE_SID_PROFID_TCAM_SW:
3436 case ICE_SID_PROFID_TCAM_FD:
3437 case ICE_SID_PROFID_TCAM_RSS:
3438 case ICE_SID_PROFID_TCAM_ACL:
3439 case ICE_SID_PROFID_TCAM_PE:
3440 pid = (struct ice_prof_id_section *)sect;
3441 src = (u8 *)pid->entry;
3442 sect_len = LE16_TO_CPU(pid->count) *
3443 sizeof(*hw->blk[block_id].prof.t);
3444 dst = (u8 *)hw->blk[block_id].prof.t;
3445 dst_len = hw->blk[block_id].prof.count *
3446 sizeof(*hw->blk[block_id].prof.t);
3448 case ICE_SID_PROFID_REDIR_SW:
3449 case ICE_SID_PROFID_REDIR_FD:
3450 case ICE_SID_PROFID_REDIR_RSS:
3451 case ICE_SID_PROFID_REDIR_ACL:
3452 case ICE_SID_PROFID_REDIR_PE:
3453 pr = (struct ice_prof_redir_section *)sect;
3454 src = pr->redir_value;
3455 sect_len = LE16_TO_CPU(pr->count) *
3456 sizeof(*hw->blk[block_id].prof_redir.t);
3457 dst = hw->blk[block_id].prof_redir.t;
3458 dst_len = hw->blk[block_id].prof_redir.count *
3459 sizeof(*hw->blk[block_id].prof_redir.t);
3461 case ICE_SID_FLD_VEC_SW:
3462 case ICE_SID_FLD_VEC_FD:
3463 case ICE_SID_FLD_VEC_RSS:
3464 case ICE_SID_FLD_VEC_ACL:
3465 case ICE_SID_FLD_VEC_PE:
3466 es = (struct ice_sw_fv_section *)sect;
3468 sect_len = (u32)(LE16_TO_CPU(es->count) *
3469 hw->blk[block_id].es.fvw) *
3470 sizeof(*hw->blk[block_id].es.t);
3471 dst = (u8 *)hw->blk[block_id].es.t;
3472 dst_len = (u32)(hw->blk[block_id].es.count *
3473 hw->blk[block_id].es.fvw) *
3474 sizeof(*hw->blk[block_id].es.t);
3480 /* if the section offset exceeds destination length, terminate
3483 if (offset > dst_len)
3486 /* if the sum of section size and offset exceed destination size
3487 * then we are out of bounds of the HW table size for that PF.
3488 * Changing section length to fill the remaining table space
3491 if ((offset + sect_len) > dst_len)
3492 sect_len = dst_len - offset;
3494 ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
3496 sect = ice_pkg_enum_section(NULL, &state, sid);
3501 * ice_fill_blk_tbls - Read package context for tables
3502 * @hw: pointer to the hardware structure
3504 * Reads the current package contents and populates the driver
3505 * database with the data iteratively for all advanced feature
3506 * blocks. Assume that the HW tables have been allocated.
3508 void ice_fill_blk_tbls(struct ice_hw *hw)
3512 for (i = 0; i < ICE_BLK_COUNT; i++) {
3513 enum ice_block blk_id = (enum ice_block)i;
3515 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3516 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3517 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3518 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3519 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3526 * ice_free_prof_map - free profile map
3527 * @hw: pointer to the hardware structure
3528 * @blk_idx: HW block index
3530 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3532 struct ice_es *es = &hw->blk[blk_idx].es;
3533 struct ice_prof_map *del, *tmp;
3535 ice_acquire_lock(&es->prof_map_lock);
3536 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
3537 ice_prof_map, list) {
3538 LIST_DEL(&del->list);
3541 INIT_LIST_HEAD(&es->prof_map);
3542 ice_release_lock(&es->prof_map_lock);
3546 * ice_free_flow_profs - free flow profile entries
3547 * @hw: pointer to the hardware structure
3548 * @blk_idx: HW block index
3550 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3552 struct ice_flow_prof *p, *tmp;
3554 ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
3555 LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
3556 ice_flow_prof, l_entry) {
3557 struct ice_flow_entry *e, *t;
3559 LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
3560 ice_flow_entry, l_entry)
3561 ice_flow_rem_entry(hw, ICE_FLOW_ENTRY_HNDL(e));
3563 LIST_DEL(&p->l_entry);
3565 ice_free(hw, p->acts);
3568 ice_release_lock(&hw->fl_profs_locks[blk_idx]);
3570 /* if driver is in reset and tables are being cleared
3571 * re-initialize the flow profile list heads
3573 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3577 * ice_free_vsig_tbl - free complete VSIG table entries
3578 * @hw: pointer to the hardware structure
3579 * @blk: the HW block on which to free the VSIG table entries
3581 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3585 if (!hw->blk[blk].xlt2.vsig_tbl)
3588 for (i = 1; i < ICE_MAX_VSIGS; i++)
3589 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3590 ice_vsig_free(hw, blk, i);
3594 * ice_free_hw_tbls - free hardware table memory
3595 * @hw: pointer to the hardware structure
3597 void ice_free_hw_tbls(struct ice_hw *hw)
3599 struct ice_rss_cfg *r, *rt;
3602 for (i = 0; i < ICE_BLK_COUNT; i++) {
3603 if (hw->blk[i].is_list_init) {
3604 struct ice_es *es = &hw->blk[i].es;
3606 ice_free_prof_map(hw, i);
3607 ice_destroy_lock(&es->prof_map_lock);
3608 ice_free_flow_profs(hw, i);
3609 ice_destroy_lock(&hw->fl_profs_locks[i]);
3611 hw->blk[i].is_list_init = false;
3613 ice_free_vsig_tbl(hw, (enum ice_block)i);
3614 ice_free(hw, hw->blk[i].xlt1.ptypes);
3615 ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
3616 ice_free(hw, hw->blk[i].xlt1.t);
3617 ice_free(hw, hw->blk[i].xlt2.t);
3618 ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
3619 ice_free(hw, hw->blk[i].xlt2.vsis);
3620 ice_free(hw, hw->blk[i].prof.t);
3621 ice_free(hw, hw->blk[i].prof_redir.t);
3622 ice_free(hw, hw->blk[i].es.t);
3623 ice_free(hw, hw->blk[i].es.ref_count);
3624 ice_free(hw, hw->blk[i].es.written);
3625 ice_free(hw, hw->blk[i].es.mask_ena);
3628 LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
3629 ice_rss_cfg, l_entry) {
3630 LIST_DEL(&r->l_entry);
3633 ice_destroy_lock(&hw->rss_locks);
3634 ice_shutdown_all_prof_masks(hw);
3635 ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
3639 * ice_init_flow_profs - init flow profile locks and list heads
3640 * @hw: pointer to the hardware structure
3641 * @blk_idx: HW block index
3643 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3645 ice_init_lock(&hw->fl_profs_locks[blk_idx]);
3646 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3650 * ice_init_hw_tbls - init hardware table memory
3651 * @hw: pointer to the hardware structure
3653 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3657 ice_init_lock(&hw->rss_locks);
3658 INIT_LIST_HEAD(&hw->rss_list_head);
3659 ice_init_all_prof_masks(hw);
3660 for (i = 0; i < ICE_BLK_COUNT; i++) {
3661 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3662 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3663 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3664 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3665 struct ice_es *es = &hw->blk[i].es;
3668 if (hw->blk[i].is_list_init)
3671 ice_init_flow_profs(hw, i);
3672 ice_init_lock(&es->prof_map_lock);
3673 INIT_LIST_HEAD(&es->prof_map);
3674 hw->blk[i].is_list_init = true;
3676 hw->blk[i].overwrite = blk_sizes[i].overwrite;
3677 es->reverse = blk_sizes[i].reverse;
3679 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3680 xlt1->count = blk_sizes[i].xlt1;
3682 xlt1->ptypes = (struct ice_ptg_ptype *)
3683 ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
3688 xlt1->ptg_tbl = (struct ice_ptg_entry *)
3689 ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
3694 xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
3698 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3699 xlt2->count = blk_sizes[i].xlt2;
3701 xlt2->vsis = (struct ice_vsig_vsi *)
3702 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
3707 xlt2->vsig_tbl = (struct ice_vsig_entry *)
3708 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
3709 if (!xlt2->vsig_tbl)
3712 for (j = 0; j < xlt2->count; j++)
3713 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3715 xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
3719 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3720 prof->count = blk_sizes[i].prof_tcam;
3721 prof->max_prof_id = blk_sizes[i].prof_id;
3722 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3723 prof->t = (struct ice_prof_tcam_entry *)
3724 ice_calloc(hw, prof->count, sizeof(*prof->t));
3729 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3730 prof_redir->count = blk_sizes[i].prof_redir;
3731 prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
3732 sizeof(*prof_redir->t));
3737 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3738 es->count = blk_sizes[i].es;
3739 es->fvw = blk_sizes[i].fvw;
3740 es->t = (struct ice_fv_word *)
3741 ice_calloc(hw, (u32)(es->count * es->fvw),
3746 es->ref_count = (u16 *)
3747 ice_calloc(hw, es->count, sizeof(*es->ref_count));
3749 es->written = (u8 *)
3750 ice_calloc(hw, es->count, sizeof(*es->written));
3751 es->mask_ena = (u32 *)
3752 ice_calloc(hw, es->count, sizeof(*es->mask_ena));
3759 ice_free_hw_tbls(hw);
3760 return ICE_ERR_NO_MEMORY;
3764 * ice_prof_gen_key - generate profile ID key
3765 * @hw: pointer to the HW struct
3766 * @blk: the block in which to write profile ID to
3767 * @ptg: packet type group (PTG) portion of key
3768 * @vsig: VSIG portion of key
3769 * @cdid: cdid portion of key
3770 * @flags: flag portion of key
3771 * @vl_msk: valid mask
3772 * @dc_msk: don't care mask
3773 * @nm_msk: never match mask
3774 * @key: output of profile ID key
3776 static enum ice_status
3777 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3778 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3779 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3780 u8 key[ICE_TCAM_KEY_SZ])
3782 struct ice_prof_id_key inkey;
3785 inkey.xlt2_cdid = CPU_TO_LE16(vsig);
3786 inkey.flags = CPU_TO_LE16(flags);
3788 switch (hw->blk[blk].prof.cdid_bits) {
3792 #define ICE_CD_2_M 0xC000U
3793 #define ICE_CD_2_S 14
3794 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
3795 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
3798 #define ICE_CD_4_M 0xF000U
3799 #define ICE_CD_4_S 12
3800 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
3801 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
3804 #define ICE_CD_8_M 0xFF00U
3805 #define ICE_CD_8_S 16
3806 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
3807 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
3810 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3814 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3815 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3819 * ice_tcam_write_entry - write TCAM entry
3820 * @hw: pointer to the HW struct
3821 * @blk: the block in which to write profile ID to
3822 * @idx: the entry index to write to
3823 * @prof_id: profile ID
3824 * @ptg: packet type group (PTG) portion of key
3825 * @vsig: VSIG portion of key
3826 * @cdid: cdid portion of key
3827 * @flags: flag portion of key
3828 * @vl_msk: valid mask
3829 * @dc_msk: don't care mask
3830 * @nm_msk: never match mask
3832 static enum ice_status
3833 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3834 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3835 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3836 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3837 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3839 struct ice_prof_tcam_entry;
3840 enum ice_status status;
3842 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3843 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3845 hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
3846 hw->blk[blk].prof.t[idx].prof_id = prof_id;
3853 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
3854 * @hw: pointer to the hardware structure
3856 * @vsig: VSIG to query
3857 * @refs: pointer to variable to receive the reference count
3859 static enum ice_status
3860 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3862 u16 idx = vsig & ICE_VSIG_IDX_M;
3863 struct ice_vsig_vsi *ptr;
3866 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3867 return ICE_ERR_DOES_NOT_EXIST;
3869 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3872 ptr = ptr->next_vsi;
3879 * ice_has_prof_vsig - check to see if VSIG has a specific profile
3880 * @hw: pointer to the hardware structure
3882 * @vsig: VSIG to check against
3883 * @hdl: profile handle
3886 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3888 u16 idx = vsig & ICE_VSIG_IDX_M;
3889 struct ice_vsig_prof *ent;
3891 LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3892 ice_vsig_prof, list) {
3893 if (ent->profile_cookie == hdl)
3897 ice_debug(hw, ICE_DBG_INIT,
3898 "Characteristic list for VSI group %d not found.\n",
3904 * ice_prof_bld_es - build profile ID extraction sequence changes
3905 * @hw: pointer to the HW struct
3906 * @blk: hardware block
3907 * @bld: the update package buffer build to add to
3908 * @chgs: the list of changes to make in hardware
3910 static enum ice_status
3911 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3912 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
3914 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3915 struct ice_chs_chg *tmp;
3917 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
3918 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3919 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3920 struct ice_pkg_es *p;
3923 id = ice_sect_id(blk, ICE_VEC_TBL);
3924 p = (struct ice_pkg_es *)
3925 ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
3930 return ICE_ERR_MAX_LIMIT;
3932 p->count = CPU_TO_LE16(1);
3933 p->offset = CPU_TO_LE16(tmp->prof_id);
3935 ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
3936 ICE_NONDMA_TO_NONDMA);
3944 * ice_prof_bld_tcam - build profile ID TCAM changes
3945 * @hw: pointer to the HW struct
3946 * @blk: hardware block
3947 * @bld: the update package buffer build to add to
3948 * @chgs: the list of changes to make in hardware
3950 static enum ice_status
3951 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
3952 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
3954 struct ice_chs_chg *tmp;
3956 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
3957 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
3958 struct ice_prof_id_section *p;
3961 id = ice_sect_id(blk, ICE_PROF_TCAM);
3962 p = (struct ice_prof_id_section *)
3963 ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
3966 return ICE_ERR_MAX_LIMIT;
3968 p->count = CPU_TO_LE16(1);
3969 p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
3970 p->entry[0].prof_id = tmp->prof_id;
3972 ice_memcpy(p->entry[0].key,
3973 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
3974 sizeof(hw->blk[blk].prof.t->key),
3975 ICE_NONDMA_TO_NONDMA);
3983 * ice_prof_bld_xlt1 - build XLT1 changes
3984 * @blk: hardware block
3985 * @bld: the update package buffer build to add to
3986 * @chgs: the list of changes to make in hardware
3988 static enum ice_status
3989 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
3990 struct LIST_HEAD_TYPE *chgs)
3992 struct ice_chs_chg *tmp;
3994 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
3995 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
3996 struct ice_xlt1_section *p;
3999 id = ice_sect_id(blk, ICE_XLT1);
4000 p = (struct ice_xlt1_section *)
4001 ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
4004 return ICE_ERR_MAX_LIMIT;
4006 p->count = CPU_TO_LE16(1);
4007 p->offset = CPU_TO_LE16(tmp->ptype);
4008 p->value[0] = tmp->ptg;
4016 * ice_prof_bld_xlt2 - build XLT2 changes
4017 * @blk: hardware block
4018 * @bld: the update package buffer build to add to
4019 * @chgs: the list of changes to make in hardware
4021 static enum ice_status
4022 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4023 struct LIST_HEAD_TYPE *chgs)
4025 struct ice_chs_chg *tmp;
4027 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4030 if (tmp->type == ICE_VSIG_ADD)
4032 else if (tmp->type == ICE_VSI_MOVE)
4034 else if (tmp->type == ICE_VSIG_REM)
4038 struct ice_xlt2_section *p;
4041 id = ice_sect_id(blk, ICE_XLT2);
4042 p = (struct ice_xlt2_section *)
4043 ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
4046 return ICE_ERR_MAX_LIMIT;
4048 p->count = CPU_TO_LE16(1);
4049 p->offset = CPU_TO_LE16(tmp->vsi);
4050 p->value[0] = CPU_TO_LE16(tmp->vsig);
4058 * ice_upd_prof_hw - update hardware using the change list
4059 * @hw: pointer to the HW struct
4060 * @blk: hardware block
4061 * @chgs: the list of changes to make in hardware
4063 static enum ice_status
4064 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4065 struct LIST_HEAD_TYPE *chgs)
4067 struct ice_buf_build *b;
4068 struct ice_chs_chg *tmp;
4069 enum ice_status status;
4077 /* count number of sections we need */
4078 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4079 switch (tmp->type) {
4080 case ICE_PTG_ES_ADD:
4098 sects = xlt1 + xlt2 + tcam + es;
4103 /* Build update package buffer */
4104 b = ice_pkg_buf_alloc(hw);
4106 return ICE_ERR_NO_MEMORY;
4108 status = ice_pkg_buf_reserve_section(b, sects);
4112 /* Preserve order of table update: ES, TCAM, PTG, VSIG */
4114 status = ice_prof_bld_es(hw, blk, b, chgs);
4120 status = ice_prof_bld_tcam(hw, blk, b, chgs);
4126 status = ice_prof_bld_xlt1(blk, b, chgs);
4132 status = ice_prof_bld_xlt2(blk, b, chgs);
4137 /* After package buffer build check if the section count in buffer is
4138 * non-zero and matches the number of sections detected for package
4141 pkg_sects = ice_pkg_buf_get_active_sections(b);
4142 if (!pkg_sects || pkg_sects != sects) {
4143 status = ICE_ERR_INVAL_SIZE;
4147 /* update package */
4148 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4149 if (status == ICE_ERR_AQ_ERROR)
4150 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile.");
4153 ice_pkg_buf_free(hw, b);
4158 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
4159 * @hw: pointer to the HW struct
4160 * @prof_id: profile ID
4161 * @mask_sel: mask select
4163 * This function enable any of the masks selected by the mask select parameter
4164 * for the profile specified.
4166 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
4168 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
4170 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
4171 GLQF_FDMASK_SEL(prof_id), mask_sel);
4174 #define ICE_SRC_DST_MAX_COUNT 8
4176 struct ice_fd_src_dst_pair {
4182 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
4183 /* These are defined in pairs */
4184 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
4185 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
4187 { ICE_PROT_IPV4_IL, 2, 12 },
4188 { ICE_PROT_IPV4_IL, 2, 16 },
4190 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
4191 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
4193 { ICE_PROT_IPV6_IL, 8, 8 },
4194 { ICE_PROT_IPV6_IL, 8, 24 },
4196 { ICE_PROT_TCP_IL, 1, 0 },
4197 { ICE_PROT_TCP_IL, 1, 2 },
4199 { ICE_PROT_UDP_OF, 1, 0 },
4200 { ICE_PROT_UDP_OF, 1, 2 },
4202 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
4203 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
4205 { ICE_PROT_SCTP_IL, 1, 0 },
4206 { ICE_PROT_SCTP_IL, 1, 2 }
4209 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
4212 * ice_update_fd_swap - set register appropriately for a FD FV extraction
4213 * @hw: pointer to the HW struct
4214 * @prof_id: profile ID
4215 * @es: extraction sequence (length of array is determined by the block)
4217 static enum ice_status
4218 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
4220 ice_declare_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4221 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
4222 #define ICE_FD_FV_NOT_FOUND (-2)
4223 s8 first_free = ICE_FD_FV_NOT_FOUND;
4224 u8 used[ICE_MAX_FV_WORDS] = { 0 };
4229 ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4231 /* This code assumes that the Flow Director field vectors are assigned
4232 * from the end of the FV indexes working towards the zero index, that
4233 * only complete fields will be included and will be consecutive, and
4234 * that there are no gaps between valid indexes.
4237 /* Determine swap fields present */
4238 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
4239 /* Find the first free entry, assuming right to left population.
4240 * This is where we can start adding additional pairs if needed.
4242 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4246 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) {
4247 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4248 es[i].off == ice_fd_pairs[j].off) {
4249 ice_set_bit(j, pair_list);
4255 orig_free = first_free;
4257 /* determine missing swap fields that need to be added */
4258 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4259 u8 bit1 = ice_is_bit_set(pair_list, i + 1);
4260 u8 bit0 = ice_is_bit_set(pair_list, i);
4265 /* add the appropriate 'paired' entry */
4271 /* check for room */
4272 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4273 return ICE_ERR_MAX_LIMIT;
4275 /* place in extraction sequence */
4276 for (k = 0; k < ice_fd_pairs[index].count; k++) {
4277 es[first_free - k].prot_id =
4278 ice_fd_pairs[index].prot_id;
4279 es[first_free - k].off =
4280 ice_fd_pairs[index].off + (k * 2);
4283 return ICE_ERR_OUT_OF_RANGE;
4285 /* keep track of non-relevant fields */
4286 mask_sel |= BIT(first_free - k);
4289 pair_start[index] = first_free;
4290 first_free -= ice_fd_pairs[index].count;
4294 /* fill in the swap array */
4295 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4297 u8 indexes_used = 1;
4299 /* assume flat at this index */
4300 #define ICE_SWAP_VALID 0x80
4301 used[si] = si | ICE_SWAP_VALID;
4303 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4308 /* check for a swap location */
4309 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) {
4310 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4311 es[si].off == ice_fd_pairs[j].off) {
4314 /* determine the appropriate matching field */
4315 idx = j + ((j % 2) ? -1 : 1);
4317 indexes_used = ice_fd_pairs[idx].count;
4318 for (k = 0; k < indexes_used; k++) {
4319 used[si - k] = (pair_start[idx] - k) |
4330 /* for each set of 4 swap and 4 inset indexes, write the appropriate
4333 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4337 for (k = 0; k < 4; k++) {
4341 if (used[idx] && !(mask_sel & BIT(idx))) {
4342 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4343 #define ICE_INSET_DFLT 0x9f
4344 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4348 /* write the appropriate swap register set */
4349 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4351 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4352 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4354 /* write the appropriate inset register set */
4355 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4357 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4358 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4361 /* initially clear the mask select for this profile */
4362 ice_update_fd_mask(hw, prof_id, 0);
4368 * ice_add_prof_with_mask - add profile
4369 * @hw: pointer to the HW struct
4370 * @blk: hardware block
4371 * @id: profile tracking ID
4372 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4373 * @es: extraction sequence (length of array is determined by the block)
4374 * @masks: extraction sequence (length of array is determined by the block)
4376 * This function registers a profile, which matches a set of PTYPES with a
4377 * particular extraction sequence. While the hardware profile is allocated
4378 * it will not be written until the first call to ice_add_flow that specifies
4379 * the ID value used here.
4382 ice_add_prof_with_mask(struct ice_hw *hw, enum ice_block blk, u64 id,
4383 u8 ptypes[], struct ice_fv_word *es, u16 *masks)
4385 u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4386 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
4387 struct ice_prof_map *prof;
4388 enum ice_status status;
4392 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
4394 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4396 /* search for existing profile */
4397 status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4399 /* allocate profile ID */
4400 status = ice_alloc_prof_id(hw, blk, &prof_id);
4402 goto err_ice_add_prof;
4403 if (blk == ICE_BLK_FD) {
4404 /* For Flow Director block, the extraction sequence may
4405 * need to be altered in the case where there are paired
4406 * fields that have no match. This is necessary because
4407 * for Flow Director, src and dest fields need to paired
4408 * for filter programming and these values are swapped
4411 status = ice_update_fd_swap(hw, prof_id, es);
4413 goto err_ice_add_prof;
4415 status = ice_update_prof_masking(hw, blk, prof_id, es, masks);
4417 goto err_ice_add_prof;
4419 /* and write new es */
4420 ice_write_es(hw, blk, prof_id, es);
4423 ice_prof_inc_ref(hw, blk, prof_id);
4425 /* add profile info */
4427 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
4429 goto err_ice_add_prof;
4431 prof->profile_cookie = id;
4432 prof->prof_id = prof_id;
4436 /* build list of ptgs */
4437 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4440 if (!ptypes[byte]) {
4445 /* Examine 8 bits per byte */
4446 for (bit = 0; bit < 8; bit++) {
4447 if (ptypes[byte] & BIT(bit)) {
4452 ptype = byte * BITS_PER_BYTE + bit;
4454 /* The package should place all ptypes in a
4455 * non-zero PTG, so the following call should
4458 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4461 /* If PTG is already added, skip and continue */
4462 if (ice_is_bit_set(ptgs_used, ptg))
4465 ice_set_bit(ptg, ptgs_used);
4466 prof->ptg[prof->ptg_cnt] = ptg;
4468 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4471 /* nothing left in byte, then exit */
4472 m = ~((1 << (bit + 1)) - 1);
4473 if (!(ptypes[byte] & m))
4482 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
4483 status = ICE_SUCCESS;
4486 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4491 * ice_add_prof - add profile
4492 * @hw: pointer to the HW struct
4493 * @blk: hardware block
4494 * @id: profile tracking ID
4495 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4496 * @es: extraction sequence (length of array is determined by the block)
4498 * This function registers a profile, which matches a set of PTGs with a
4499 * particular extraction sequence. While the hardware profile is allocated
4500 * it will not be written until the first call to ice_add_flow that specifies
4501 * the ID value used here.
4504 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4505 struct ice_fv_word *es)
4507 u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4508 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
4509 struct ice_prof_map *prof;
4510 enum ice_status status;
4514 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
4516 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4518 /* search for existing profile */
4519 status = ice_find_prof_id(hw, blk, es, &prof_id);
4521 /* allocate profile ID */
4522 status = ice_alloc_prof_id(hw, blk, &prof_id);
4524 goto err_ice_add_prof;
4525 if (blk == ICE_BLK_FD) {
4526 /* For Flow Director block, the extraction sequence may
4527 * need to be altered in the case where there are paired
4528 * fields that have no match. This is necessary because
4529 * for Flow Director, src and dest fields need to paired
4530 * for filter programming and these values are swapped
4533 status = ice_update_fd_swap(hw, prof_id, es);
4535 goto err_ice_add_prof;
4538 /* and write new es */
4539 ice_write_es(hw, blk, prof_id, es);
4542 ice_prof_inc_ref(hw, blk, prof_id);
4544 /* add profile info */
4546 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
4548 goto err_ice_add_prof;
4550 prof->profile_cookie = id;
4551 prof->prof_id = prof_id;
4555 /* build list of ptgs */
4556 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4559 if (!ptypes[byte]) {
4564 /* Examine 8 bits per byte */
4565 for (bit = 0; bit < 8; bit++) {
4566 if (ptypes[byte] & BIT(bit)) {
4571 ptype = byte * BITS_PER_BYTE + bit;
4573 /* The package should place all ptypes in a
4574 * non-zero PTG, so the following call should
4577 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4580 /* If PTG is already added, skip and continue */
4581 if (ice_is_bit_set(ptgs_used, ptg))
4584 ice_set_bit(ptg, ptgs_used);
4585 prof->ptg[prof->ptg_cnt] = ptg;
4587 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4590 /* nothing left in byte, then exit */
4591 m = ~((1 << (bit + 1)) - 1);
4592 if (!(ptypes[byte] & m))
4601 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
4602 status = ICE_SUCCESS;
4605 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4610 * ice_search_prof_id_low - Search for a profile tracking ID low level
4611 * @hw: pointer to the HW struct
4612 * @blk: hardware block
4613 * @id: profile tracking ID
4615 * This will search for a profile tracking ID which was previously added. This
4616 * version assumes that the caller has already acquired the prof map lock.
4618 static struct ice_prof_map *
4619 ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
4621 struct ice_prof_map *entry = NULL;
4622 struct ice_prof_map *map;
4624 LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map,
4626 if (map->profile_cookie == id) {
4636 * ice_search_prof_id - Search for a profile tracking ID
4637 * @hw: pointer to the HW struct
4638 * @blk: hardware block
4639 * @id: profile tracking ID
4641 * This will search for a profile tracking ID which was previously added.
4643 struct ice_prof_map *
4644 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4646 struct ice_prof_map *entry;
4648 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4649 entry = ice_search_prof_id_low(hw, blk, id);
4650 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4656 * ice_vsig_prof_id_count - count profiles in a VSIG
4657 * @hw: pointer to the HW struct
4658 * @blk: hardware block
4659 * @vsig: VSIG to remove the profile from
4662 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4664 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4665 struct ice_vsig_prof *p;
4667 LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4668 ice_vsig_prof, list) {
4676 * ice_rel_tcam_idx - release a TCAM index
4677 * @hw: pointer to the HW struct
4678 * @blk: hardware block
4679 * @idx: the index to release
4681 static enum ice_status
4682 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4684 /* Masks to invoke a never match entry */
4685 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4686 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4687 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4688 enum ice_status status;
4690 /* write the TCAM entry */
4691 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4696 /* release the TCAM entry */
4697 status = ice_free_tcam_ent(hw, blk, idx);
4703 * ice_rem_prof_id - remove one profile from a VSIG
4704 * @hw: pointer to the HW struct
4705 * @blk: hardware block
4706 * @prof: pointer to profile structure to remove
4708 static enum ice_status
4709 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4710 struct ice_vsig_prof *prof)
4712 enum ice_status status;
4715 for (i = 0; i < prof->tcam_count; i++) {
4716 if (prof->tcam[i].in_use) {
4717 prof->tcam[i].in_use = false;
4718 status = ice_rel_tcam_idx(hw, blk,
4719 prof->tcam[i].tcam_idx);
4721 return ICE_ERR_HW_TABLE;
4729 * ice_rem_vsig - remove VSIG
4730 * @hw: pointer to the HW struct
4731 * @blk: hardware block
4732 * @vsig: the VSIG to remove
4733 * @chg: the change list
4735 static enum ice_status
4736 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4737 struct LIST_HEAD_TYPE *chg)
4739 u16 idx = vsig & ICE_VSIG_IDX_M;
4740 struct ice_vsig_vsi *vsi_cur;
4741 struct ice_vsig_prof *d, *t;
4742 enum ice_status status;
4744 /* remove TCAM entries */
4745 LIST_FOR_EACH_ENTRY_SAFE(d, t,
4746 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4747 ice_vsig_prof, list) {
4748 status = ice_rem_prof_id(hw, blk, d);
4756 /* Move all VSIS associated with this VSIG to the default VSIG */
4757 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4758 /* If the VSIG has at least 1 VSI then iterate through the list
4759 * and remove the VSIs before deleting the group.
4763 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4764 struct ice_chs_chg *p;
4766 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4768 return ICE_ERR_NO_MEMORY;
4770 p->type = ICE_VSIG_REM;
4771 p->orig_vsig = vsig;
4772 p->vsig = ICE_DEFAULT_VSIG;
4773 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4775 LIST_ADD(&p->list_entry, chg);
4781 status = ice_vsig_free(hw, blk, vsig);
4787 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
4788 * @hw: pointer to the HW struct
4789 * @blk: hardware block
4790 * @vsig: VSIG to remove the profile from
4791 * @hdl: profile handle indicating which profile to remove
4792 * @chg: list to receive a record of changes
4794 static enum ice_status
4795 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4796 struct LIST_HEAD_TYPE *chg)
4798 u16 idx = vsig & ICE_VSIG_IDX_M;
4799 struct ice_vsig_prof *p, *t;
4800 enum ice_status status;
4802 LIST_FOR_EACH_ENTRY_SAFE(p, t,
4803 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4804 ice_vsig_prof, list) {
4805 if (p->profile_cookie == hdl) {
4806 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4807 /* this is the last profile, remove the VSIG */
4808 return ice_rem_vsig(hw, blk, vsig, chg);
4810 status = ice_rem_prof_id(hw, blk, p);
4819 return ICE_ERR_DOES_NOT_EXIST;
4823 * ice_rem_flow_all - remove all flows with a particular profile
4824 * @hw: pointer to the HW struct
4825 * @blk: hardware block
4826 * @id: profile tracking ID
4828 static enum ice_status
4829 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4831 struct ice_chs_chg *del, *tmp;
4832 struct LIST_HEAD_TYPE chg;
4833 enum ice_status status;
4836 INIT_LIST_HEAD(&chg);
4838 for (i = 1; i < ICE_MAX_VSIGS; i++) {
4839 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4840 if (ice_has_prof_vsig(hw, blk, i, id)) {
4841 status = ice_rem_prof_id_vsig(hw, blk, i, id,
4844 goto err_ice_rem_flow_all;
4849 status = ice_upd_prof_hw(hw, blk, &chg);
4851 err_ice_rem_flow_all:
4852 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
4853 LIST_DEL(&del->list_entry);
4861 * ice_rem_prof - remove profile
4862 * @hw: pointer to the HW struct
4863 * @blk: hardware block
4864 * @id: profile tracking ID
4866 * This will remove the profile specified by the ID parameter, which was
4867 * previously created through ice_add_prof. If any existing entries
4868 * are associated with this profile, they will be removed as well.
4870 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4872 struct ice_prof_map *pmap;
4873 enum ice_status status;
4875 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4877 pmap = ice_search_prof_id_low(hw, blk, id);
4879 status = ICE_ERR_DOES_NOT_EXIST;
4880 goto err_ice_rem_prof;
4883 /* remove all flows with this profile */
4884 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4886 goto err_ice_rem_prof;
4888 /* dereference profile, and possibly remove */
4889 ice_prof_dec_ref(hw, blk, pmap->prof_id);
4891 LIST_DEL(&pmap->list);
4894 status = ICE_SUCCESS;
4897 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4902 * ice_get_prof - get profile
4903 * @hw: pointer to the HW struct
4904 * @blk: hardware block
4905 * @hdl: profile handle
4908 static enum ice_status
4909 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4910 struct LIST_HEAD_TYPE *chg)
4912 struct ice_prof_map *map;
4913 struct ice_chs_chg *p;
4916 /* Get the details on the profile specified by the handle ID */
4917 map = ice_search_prof_id(hw, blk, hdl);
4919 return ICE_ERR_DOES_NOT_EXIST;
4921 for (i = 0; i < map->ptg_cnt; i++) {
4922 if (!hw->blk[blk].es.written[map->prof_id]) {
4923 /* add ES to change list */
4924 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4926 goto err_ice_get_prof;
4928 p->type = ICE_PTG_ES_ADD;
4930 p->ptg = map->ptg[i];
4934 p->prof_id = map->prof_id;
4936 hw->blk[blk].es.written[map->prof_id] = true;
4938 LIST_ADD(&p->list_entry, chg);
4945 /* let caller clean up the change list */
4946 return ICE_ERR_NO_MEMORY;
4950 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4951 * @hw: pointer to the HW struct
4952 * @blk: hardware block
4953 * @vsig: VSIG from which to copy the list
4956 * This routine makes a copy of the list of profiles in the specified VSIG.
4958 static enum ice_status
4959 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4960 struct LIST_HEAD_TYPE *lst)
4962 struct ice_vsig_prof *ent1, *ent2;
4963 u16 idx = vsig & ICE_VSIG_IDX_M;
4965 LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4966 ice_vsig_prof, list) {
4967 struct ice_vsig_prof *p;
4969 /* copy to the input list */
4970 p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
4971 ICE_NONDMA_TO_NONDMA);
4973 goto err_ice_get_profs_vsig;
4975 LIST_ADD_TAIL(&p->list, lst);
4980 err_ice_get_profs_vsig:
4981 LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
4982 LIST_DEL(&ent1->list);
4986 return ICE_ERR_NO_MEMORY;
4990 * ice_add_prof_to_lst - add profile entry to a list
4991 * @hw: pointer to the HW struct
4992 * @blk: hardware block
4993 * @lst: the list to be added to
4994 * @hdl: profile handle of entry to add
4996 static enum ice_status
4997 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4998 struct LIST_HEAD_TYPE *lst, u64 hdl)
5000 struct ice_vsig_prof *p;
5001 struct ice_prof_map *map;
5004 map = ice_search_prof_id(hw, blk, hdl);
5006 return ICE_ERR_DOES_NOT_EXIST;
5008 p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
5010 return ICE_ERR_NO_MEMORY;
5012 p->profile_cookie = map->profile_cookie;
5013 p->prof_id = map->prof_id;
5014 p->tcam_count = map->ptg_cnt;
5016 for (i = 0; i < map->ptg_cnt; i++) {
5017 p->tcam[i].prof_id = map->prof_id;
5018 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
5019 p->tcam[i].ptg = map->ptg[i];
5022 LIST_ADD(&p->list, lst);
5028 * ice_move_vsi - move VSI to another VSIG
5029 * @hw: pointer to the HW struct
5030 * @blk: hardware block
5031 * @vsi: the VSI to move
5032 * @vsig: the VSIG to move the VSI to
5033 * @chg: the change list
5035 static enum ice_status
5036 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
5037 struct LIST_HEAD_TYPE *chg)
5039 enum ice_status status;
5040 struct ice_chs_chg *p;
5043 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5045 return ICE_ERR_NO_MEMORY;
5047 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
5049 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
5056 p->type = ICE_VSI_MOVE;
5058 p->orig_vsig = orig_vsig;
5061 LIST_ADD(&p->list_entry, chg);
5067 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
5068 * @hw: pointer to the HW struct
5069 * @blk: hardware block
5070 * @enable: true to enable, false to disable
5071 * @vsig: the vsig of the TCAM entry
5072 * @tcam: pointer the TCAM info structure of the TCAM to disable
5073 * @chg: the change list
5075 * This function appends an enable or disable TCAM entry in the change log
5077 static enum ice_status
5078 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
5079 u16 vsig, struct ice_tcam_inf *tcam,
5080 struct LIST_HEAD_TYPE *chg)
5082 enum ice_status status;
5083 struct ice_chs_chg *p;
5085 /* Default: enable means change the low flag bit to don't care */
5086 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
5087 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5088 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
5090 /* if disabling, free the tcam */
5092 status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
5098 /* for re-enabling, reallocate a tcam */
5099 status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
5103 /* add TCAM to change list */
5104 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5106 return ICE_ERR_NO_MEMORY;
5108 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
5109 tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
5112 goto err_ice_prof_tcam_ena_dis;
5116 p->type = ICE_TCAM_ADD;
5117 p->add_tcam_idx = true;
5118 p->prof_id = tcam->prof_id;
5121 p->tcam_idx = tcam->tcam_idx;
5124 LIST_ADD(&p->list_entry, chg);
5128 err_ice_prof_tcam_ena_dis:
5134 * ice_adj_prof_priorities - adjust profile based on priorities
5135 * @hw: pointer to the HW struct
5136 * @blk: hardware block
5137 * @vsig: the VSIG for which to adjust profile priorities
5138 * @chg: the change list
5140 static enum ice_status
5141 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5142 struct LIST_HEAD_TYPE *chg)
5144 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
5145 struct ice_vsig_prof *t;
5146 enum ice_status status;
5149 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
5150 idx = vsig & ICE_VSIG_IDX_M;
5152 /* Priority is based on the order in which the profiles are added. The
5153 * newest added profile has highest priority and the oldest added
5154 * profile has the lowest priority. Since the profile property list for
5155 * a VSIG is sorted from newest to oldest, this code traverses the list
5156 * in order and enables the first of each PTG that it finds (that is not
5157 * already enabled); it also disables any duplicate PTGs that it finds
5158 * in the older profiles (that are currently enabled).
5161 LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5162 ice_vsig_prof, list) {
5165 for (i = 0; i < t->tcam_count; i++) {
5166 /* Scan the priorities from newest to oldest.
5167 * Make sure that the newest profiles take priority.
5169 if (ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
5170 t->tcam[i].in_use) {
5171 /* need to mark this PTG as never match, as it
5172 * was already in use and therefore duplicate
5173 * (and lower priority)
5175 status = ice_prof_tcam_ena_dis(hw, blk, false,
5181 } else if (!ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
5182 !t->tcam[i].in_use) {
5183 /* need to enable this PTG, as it in not in use
5184 * and not enabled (highest priority)
5186 status = ice_prof_tcam_ena_dis(hw, blk, true,
5194 /* keep track of used ptgs */
5195 ice_set_bit(t->tcam[i].ptg, ptgs_used);
5203 * ice_add_prof_id_vsig - add profile to VSIG
5204 * @hw: pointer to the HW struct
5205 * @blk: hardware block
5206 * @vsig: the VSIG to which this profile is to be added
5207 * @hdl: the profile handle indicating the profile to add
5208 * @chg: the change list
5210 static enum ice_status
5211 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5212 struct LIST_HEAD_TYPE *chg)
5214 /* Masks that ignore flags */
5215 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5216 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5217 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5218 struct ice_prof_map *map;
5219 struct ice_vsig_prof *t;
5220 struct ice_chs_chg *p;
5223 /* Get the details on the profile specified by the handle ID */
5224 map = ice_search_prof_id(hw, blk, hdl);
5226 return ICE_ERR_DOES_NOT_EXIST;
5228 /* Error, if this VSIG already has this profile */
5229 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5230 return ICE_ERR_ALREADY_EXISTS;
5232 /* new VSIG profile structure */
5233 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5235 goto err_ice_add_prof_id_vsig;
5237 t->profile_cookie = map->profile_cookie;
5238 t->prof_id = map->prof_id;
5239 t->tcam_count = map->ptg_cnt;
5241 /* create TCAM entries */
5242 for (i = 0; i < map->ptg_cnt; i++) {
5243 enum ice_status status;
5246 /* add TCAM to change list */
5247 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5249 goto err_ice_add_prof_id_vsig;
5251 /* allocate the TCAM entry index */
5252 status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
5255 goto err_ice_add_prof_id_vsig;
5258 t->tcam[i].ptg = map->ptg[i];
5259 t->tcam[i].prof_id = map->prof_id;
5260 t->tcam[i].tcam_idx = tcam_idx;
5261 t->tcam[i].in_use = true;
5263 p->type = ICE_TCAM_ADD;
5264 p->add_tcam_idx = true;
5265 p->prof_id = t->tcam[i].prof_id;
5266 p->ptg = t->tcam[i].ptg;
5268 p->tcam_idx = t->tcam[i].tcam_idx;
5270 /* write the TCAM entry */
5271 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5273 t->tcam[i].ptg, vsig, 0, 0,
5274 vl_msk, dc_msk, nm_msk);
5276 goto err_ice_add_prof_id_vsig;
5279 LIST_ADD(&p->list_entry, chg);
5282 /* add profile to VSIG */
5284 &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
5288 err_ice_add_prof_id_vsig:
5289 /* let caller clean up the change list */
5291 return ICE_ERR_NO_MEMORY;
5295 * ice_create_prof_id_vsig - add a new VSIG with a single profile
5296 * @hw: pointer to the HW struct
5297 * @blk: hardware block
5298 * @vsi: the initial VSI that will be in VSIG
5299 * @hdl: the profile handle of the profile that will be added to the VSIG
5300 * @chg: the change list
5302 static enum ice_status
5303 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5304 struct LIST_HEAD_TYPE *chg)
5306 enum ice_status status;
5307 struct ice_chs_chg *p;
5310 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5312 return ICE_ERR_NO_MEMORY;
5314 new_vsig = ice_vsig_alloc(hw, blk);
5316 status = ICE_ERR_HW_TABLE;
5317 goto err_ice_create_prof_id_vsig;
5320 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5322 goto err_ice_create_prof_id_vsig;
5324 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
5326 goto err_ice_create_prof_id_vsig;
5328 p->type = ICE_VSIG_ADD;
5330 p->orig_vsig = ICE_DEFAULT_VSIG;
5333 LIST_ADD(&p->list_entry, chg);
5337 err_ice_create_prof_id_vsig:
5338 /* let caller clean up the change list */
5344 * ice_create_vsig_from_list - create a new VSIG with a list of profiles
5345 * @hw: pointer to the HW struct
5346 * @blk: hardware block
5347 * @vsi: the initial VSI that will be in VSIG
5348 * @lst: the list of profile that will be added to the VSIG
5349 * @chg: the change list
5351 static enum ice_status
5352 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5353 struct LIST_HEAD_TYPE *lst, struct LIST_HEAD_TYPE *chg)
5355 struct ice_vsig_prof *t;
5356 enum ice_status status;
5359 vsig = ice_vsig_alloc(hw, blk);
5361 return ICE_ERR_HW_TABLE;
5363 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5367 LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
5368 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5378 * ice_find_prof_vsig - find a VSIG with a specific profile handle
5379 * @hw: pointer to the HW struct
5380 * @blk: hardware block
5381 * @hdl: the profile handle of the profile to search for
5382 * @vsig: returns the VSIG with the matching profile
5385 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5387 struct ice_vsig_prof *t;
5388 struct LIST_HEAD_TYPE lst;
5389 enum ice_status status;
5391 INIT_LIST_HEAD(&lst);
5393 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5397 t->profile_cookie = hdl;
5398 LIST_ADD(&t->list, &lst);
5400 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5405 return status == ICE_SUCCESS;
5409 * ice_add_vsi_flow - add VSI flow
5410 * @hw: pointer to the HW struct
5411 * @blk: hardware block
5413 * @vsig: target VSIG to include the input VSI
5415 * Calling this function will add the VSI to a given VSIG and
5416 * update the HW tables accordingly. This call can be used to
5417 * add multiple VSIs to a VSIG if we know beforehand that those
5418 * VSIs have the same characteristics of the VSIG. This will
5419 * save time in generating a new VSIG and TCAMs till a match is
5420 * found and subsequent rollback when a matching VSIG is found.
5423 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
5425 struct ice_chs_chg *tmp, *del;
5426 struct LIST_HEAD_TYPE chg;
5427 enum ice_status status;
5429 /* if target VSIG is default the move is invalid */
5430 if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
5431 return ICE_ERR_PARAM;
5433 INIT_LIST_HEAD(&chg);
5435 /* move VSI to the VSIG that matches */
5436 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5437 /* update hardware if success */
5439 status = ice_upd_prof_hw(hw, blk, &chg);
5441 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5442 LIST_DEL(&del->list_entry);
5450 * ice_add_prof_id_flow - add profile flow
5451 * @hw: pointer to the HW struct
5452 * @blk: hardware block
5453 * @vsi: the VSI to enable with the profile specified by ID
5454 * @hdl: profile handle
5456 * Calling this function will update the hardware tables to enable the
5457 * profile indicated by the ID parameter for the VSIs specified in the VSI
5458 * array. Once successfully called, the flow will be enabled.
5461 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5463 struct ice_vsig_prof *tmp1, *del1;
5464 struct LIST_HEAD_TYPE union_lst;
5465 struct ice_chs_chg *tmp, *del;
5466 struct LIST_HEAD_TYPE chrs;
5467 struct LIST_HEAD_TYPE chg;
5468 enum ice_status status;
5469 u16 vsig, or_vsig = 0;
5471 INIT_LIST_HEAD(&union_lst);
5472 INIT_LIST_HEAD(&chrs);
5473 INIT_LIST_HEAD(&chg);
5476 status = ice_get_prof(hw, blk, hdl, &chg);
5480 /* determine if VSI is already part of a VSIG */
5481 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5482 if (!status && vsig) {
5489 /* make sure that there is no overlap/conflict between the new
5490 * characteristics and the existing ones; we don't support that
5493 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5494 status = ICE_ERR_ALREADY_EXISTS;
5495 goto err_ice_add_prof_id_flow;
5498 /* last VSI in the VSIG? */
5499 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5501 goto err_ice_add_prof_id_flow;
5502 only_vsi = (ref == 1);
5504 /* create a union of the current profiles and the one being
5507 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5509 goto err_ice_add_prof_id_flow;
5511 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5513 goto err_ice_add_prof_id_flow;
5515 /* search for an existing VSIG with an exact charc match */
5516 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5518 /* move VSI to the VSIG that matches */
5519 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5521 goto err_ice_add_prof_id_flow;
5523 /* VSI has been moved out of or_vsig. If the or_vsig had
5524 * only that VSI it is now empty and can be removed.
5527 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5529 goto err_ice_add_prof_id_flow;
5531 } else if (only_vsi) {
5532 /* If the original VSIG only contains one VSI, then it
5533 * will be the requesting VSI. In this case the VSI is
5534 * not sharing entries and we can simply add the new
5535 * profile to the VSIG.
5537 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
5539 goto err_ice_add_prof_id_flow;
5541 /* Adjust priorities */
5542 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5544 goto err_ice_add_prof_id_flow;
5546 /* No match, so we need a new VSIG */
5547 status = ice_create_vsig_from_lst(hw, blk, vsi,
5550 goto err_ice_add_prof_id_flow;
5552 /* Adjust priorities */
5553 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5555 goto err_ice_add_prof_id_flow;
5558 /* need to find or add a VSIG */
5559 /* search for an existing VSIG with an exact charc match */
5560 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5561 /* found an exact match */
5562 /* add or move VSI to the VSIG that matches */
5563 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5565 goto err_ice_add_prof_id_flow;
5567 /* we did not find an exact match */
5568 /* we need to add a VSIG */
5569 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5572 goto err_ice_add_prof_id_flow;
5576 /* update hardware */
5578 status = ice_upd_prof_hw(hw, blk, &chg);
5580 err_ice_add_prof_id_flow:
5581 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5582 LIST_DEL(&del->list_entry);
5586 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
5587 LIST_DEL(&del1->list);
5591 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &chrs, ice_vsig_prof, list) {
5592 LIST_DEL(&del1->list);
5600 * ice_rem_prof_from_list - remove a profile from list
5601 * @hw: pointer to the HW struct
5602 * @lst: list to remove the profile from
5603 * @hdl: the profile handle indicating the profile to remove
5605 static enum ice_status
5606 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
5608 struct ice_vsig_prof *ent, *tmp;
5610 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) {
5611 if (ent->profile_cookie == hdl) {
5612 LIST_DEL(&ent->list);
5618 return ICE_ERR_DOES_NOT_EXIST;
5622 * ice_rem_prof_id_flow - remove flow
5623 * @hw: pointer to the HW struct
5624 * @blk: hardware block
5625 * @vsi: the VSI from which to remove the profile specified by ID
5626 * @hdl: profile tracking handle
5628 * Calling this function will update the hardware tables to remove the
5629 * profile indicated by the ID parameter for the VSIs specified in the VSI
5630 * array. Once successfully called, the flow will be disabled.
5633 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5635 struct ice_vsig_prof *tmp1, *del1;
5636 struct LIST_HEAD_TYPE chg, copy;
5637 struct ice_chs_chg *tmp, *del;
5638 enum ice_status status;
5641 INIT_LIST_HEAD(©);
5642 INIT_LIST_HEAD(&chg);
5644 /* determine if VSI is already part of a VSIG */
5645 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5646 if (!status && vsig) {
5652 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5653 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5655 goto err_ice_rem_prof_id_flow;
5656 only_vsi = (ref == 1);
5659 /* If the original VSIG only contains one reference,
5660 * which will be the requesting VSI, then the VSI is not
5661 * sharing entries and we can simply remove the specific
5662 * characteristics from the VSIG.
5666 /* If there are no profiles left for this VSIG,
5667 * then simply remove the the VSIG.
5669 status = ice_rem_vsig(hw, blk, vsig, &chg);
5671 goto err_ice_rem_prof_id_flow;
5673 status = ice_rem_prof_id_vsig(hw, blk, vsig,
5676 goto err_ice_rem_prof_id_flow;
5678 /* Adjust priorities */
5679 status = ice_adj_prof_priorities(hw, blk, vsig,
5682 goto err_ice_rem_prof_id_flow;
5686 /* Make a copy of the VSIG's list of Profiles */
5687 status = ice_get_profs_vsig(hw, blk, vsig, ©);
5689 goto err_ice_rem_prof_id_flow;
5691 /* Remove specified profile entry from the list */
5692 status = ice_rem_prof_from_list(hw, ©, hdl);
5694 goto err_ice_rem_prof_id_flow;
5696 if (LIST_EMPTY(©)) {
5697 status = ice_move_vsi(hw, blk, vsi,
5698 ICE_DEFAULT_VSIG, &chg);
5700 goto err_ice_rem_prof_id_flow;
5702 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
5704 /* found an exact match */
5705 /* add or move VSI to the VSIG that matches */
5706 /* Search for a VSIG with a matching profile
5710 /* Found match, move VSI to the matching VSIG */
5711 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5713 goto err_ice_rem_prof_id_flow;
5715 /* since no existing VSIG supports this
5716 * characteristic pattern, we need to create a
5717 * new VSIG and TCAM entries
5719 status = ice_create_vsig_from_lst(hw, blk, vsi,
5722 goto err_ice_rem_prof_id_flow;
5724 /* Adjust priorities */
5725 status = ice_adj_prof_priorities(hw, blk, vsig,
5728 goto err_ice_rem_prof_id_flow;
5732 status = ICE_ERR_DOES_NOT_EXIST;
5735 /* update hardware tables */
5737 status = ice_upd_prof_hw(hw, blk, &chg);
5739 err_ice_rem_prof_id_flow:
5740 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5741 LIST_DEL(&del->list_entry);
5745 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) {
5746 LIST_DEL(&del1->list);