1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
6 #include "ice_flex_pipe.h"
7 #include "ice_protocol_type.h"
10 /* To support tunneling entries by PF, the package will append the PF number to
11 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
13 static const struct ice_tunnel_type_scan tnls[] = {
14 { TNL_VXLAN, "TNL_VXLAN_PF" },
15 { TNL_GENEVE, "TNL_GENEVE_PF" },
19 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
23 ICE_SID_XLT_KEY_BUILDER_SW,
26 ICE_SID_PROFID_TCAM_SW,
27 ICE_SID_PROFID_REDIR_SW,
29 ICE_SID_CDID_KEY_BUILDER_SW,
36 ICE_SID_XLT_KEY_BUILDER_ACL,
39 ICE_SID_PROFID_TCAM_ACL,
40 ICE_SID_PROFID_REDIR_ACL,
42 ICE_SID_CDID_KEY_BUILDER_ACL,
43 ICE_SID_CDID_REDIR_ACL
49 ICE_SID_XLT_KEY_BUILDER_FD,
52 ICE_SID_PROFID_TCAM_FD,
53 ICE_SID_PROFID_REDIR_FD,
55 ICE_SID_CDID_KEY_BUILDER_FD,
62 ICE_SID_XLT_KEY_BUILDER_RSS,
65 ICE_SID_PROFID_TCAM_RSS,
66 ICE_SID_PROFID_REDIR_RSS,
68 ICE_SID_CDID_KEY_BUILDER_RSS,
69 ICE_SID_CDID_REDIR_RSS
75 ICE_SID_XLT_KEY_BUILDER_PE,
78 ICE_SID_PROFID_TCAM_PE,
79 ICE_SID_PROFID_REDIR_PE,
81 ICE_SID_CDID_KEY_BUILDER_PE,
87 * ice_sect_id - returns section ID
91 * This helper function returns the proper section ID given a block type and a
94 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
96 return ice_sect_lkup[blk][sect];
101 * @buf: pointer to the ice buffer
103 * This helper function validates a buffer's header.
105 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
107 struct ice_buf_hdr *hdr;
111 hdr = (struct ice_buf_hdr *)buf->buf;
113 section_count = LE16_TO_CPU(hdr->section_count);
114 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
117 data_end = LE16_TO_CPU(hdr->data_end);
118 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
126 * @ice_seg: pointer to the ice segment
128 * Returns the address of the buffer table within the ice segment.
130 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
132 struct ice_nvm_table *nvms;
134 nvms = (struct ice_nvm_table *)
135 (ice_seg->device_table +
136 LE32_TO_CPU(ice_seg->device_table_count));
138 return (_FORCE_ struct ice_buf_table *)
139 (nvms->vers + LE32_TO_CPU(nvms->table_count));
144 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
145 * @state: pointer to the enum state
147 * This function will enumerate all the buffers in the ice segment. The first
148 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
149 * ice_seg is set to NULL which continues the enumeration. When the function
150 * returns a NULL pointer, then the end of the buffers has been reached, or an
151 * unexpected value has been detected (for example an invalid section count or
152 * an invalid buffer end value).
154 static struct ice_buf_hdr *
155 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
158 state->buf_table = ice_find_buf_table(ice_seg);
159 if (!state->buf_table)
163 return ice_pkg_val_buf(state->buf_table->buf_array);
166 if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
167 return ice_pkg_val_buf(state->buf_table->buf_array +
174 * ice_pkg_advance_sect
175 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
176 * @state: pointer to the enum state
178 * This helper function will advance the section within the ice segment,
179 * also advancing the buffer if needed.
182 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
184 if (!ice_seg && !state->buf)
187 if (!ice_seg && state->buf)
188 if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
191 state->buf = ice_pkg_enum_buf(ice_seg, state);
195 /* start of new buffer, reset section index */
201 * ice_pkg_enum_section
202 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
203 * @state: pointer to the enum state
204 * @sect_type: section type to enumerate
206 * This function will enumerate all the sections of a particular type in the
207 * ice segment. The first call is made with the ice_seg parameter non-NULL;
208 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
209 * When the function returns a NULL pointer, then the end of the matching
210 * sections has been reached.
213 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
219 state->type = sect_type;
221 if (!ice_pkg_advance_sect(ice_seg, state))
224 /* scan for next matching section */
225 while (state->buf->section_entry[state->sect_idx].type !=
226 CPU_TO_LE32(state->type))
227 if (!ice_pkg_advance_sect(NULL, state))
230 /* validate section */
231 offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
232 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
235 size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
236 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
239 /* make sure the section fits in the buffer */
240 if (offset + size > ICE_PKG_BUF_SIZE)
244 LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
246 /* calc pointer to this section */
247 state->sect = ((u8 *)state->buf) +
248 LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
255 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
256 * @state: pointer to the enum state
257 * @sect_type: section type to enumerate
258 * @offset: pointer to variable that receives the offset in the table (optional)
259 * @handler: function that handles access to the entries into the section type
261 * This function will enumerate all the entries in particular section type in
262 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
263 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
264 * When the function returns a NULL pointer, then the end of the entries has
267 * Since each section may have a different header and entry size, the handler
268 * function is needed to determine the number and location entries in each
271 * The offset parameter is optional, but should be used for sections that
272 * contain an offset for each section table. For such cases, the section handler
273 * function must return the appropriate offset + index to give the absolution
274 * offset for each entry. For example, if the base for a section's header
275 * indicates a base offset of 10, and the index for the entry is 2, then
276 * section handler function should set the offset to 10 + 2 = 12.
279 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
280 u32 sect_type, u32 *offset,
281 void *(*handler)(u32 sect_type, void *section,
282 u32 index, u32 *offset))
290 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
293 state->entry_idx = 0;
294 state->handler = handler;
303 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
306 /* end of a section, look for another section of this type */
307 if (!ice_pkg_enum_section(NULL, state, 0))
310 state->entry_idx = 0;
311 entry = state->handler(state->sect_type, state->sect,
312 state->entry_idx, offset);
319 * ice_boost_tcam_handler
320 * @sect_type: section type
321 * @section: pointer to section
322 * @index: index of the boost TCAM entry to be returned
323 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
325 * This is a callback function that can be passed to ice_pkg_enum_entry.
326 * Handles enumeration of individual boost TCAM entries.
329 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
331 struct ice_boost_tcam_section *boost;
336 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
339 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
345 boost = (struct ice_boost_tcam_section *)section;
346 if (index >= LE16_TO_CPU(boost->count))
349 return boost->tcam + index;
353 * ice_find_boost_entry
354 * @ice_seg: pointer to the ice segment (non-NULL)
355 * @addr: Boost TCAM address of entry to search for
356 * @entry: returns pointer to the entry
358 * Finds a particular Boost TCAM entry and returns a pointer to that entry
359 * if it is found. The ice_seg parameter must not be NULL since the first call
360 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
362 static enum ice_status
363 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
364 struct ice_boost_tcam_entry **entry)
366 struct ice_boost_tcam_entry *tcam;
367 struct ice_pkg_enum state;
369 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
372 return ICE_ERR_PARAM;
375 tcam = (struct ice_boost_tcam_entry *)
376 ice_pkg_enum_entry(ice_seg, &state,
377 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
378 ice_boost_tcam_handler);
379 if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
392 * ice_label_enum_handler
393 * @sect_type: section type
394 * @section: pointer to section
395 * @index: index of the label entry to be returned
396 * @offset: pointer to receive absolute offset, always zero for label sections
398 * This is a callback function that can be passed to ice_pkg_enum_entry.
399 * Handles enumeration of individual label entries.
402 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
405 struct ice_label_section *labels;
410 if (index > ICE_MAX_LABELS_IN_BUF)
416 labels = (struct ice_label_section *)section;
417 if (index >= LE16_TO_CPU(labels->count))
420 return labels->label + index;
425 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
426 * @type: the section type that will contain the label (0 on subsequent calls)
427 * @state: ice_pkg_enum structure that will hold the state of the enumeration
428 * @value: pointer to a value that will return the label's value if found
430 * Enumerates a list of labels in the package. The caller will call
431 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
432 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
433 * the end of the list has been reached.
436 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
439 struct ice_label *label;
441 /* Check for valid label section on first call */
442 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
445 label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
447 ice_label_enum_handler);
451 *value = LE16_TO_CPU(label->value);
457 * @hw: pointer to the HW structure
458 * @ice_seg: pointer to the segment of the package scan (non-NULL)
460 * This function will scan the package and save off relevant information
461 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
462 * since the first call to ice_enum_labels requires a pointer to an actual
465 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
467 struct ice_pkg_enum state;
472 ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
473 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
478 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
481 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
482 for (i = 0; tnls[i].type != TNL_LAST; i++) {
483 size_t len = strlen(tnls[i].label_prefix);
485 /* Look for matching label start, before continuing */
486 if (strncmp(label_name, tnls[i].label_prefix, len))
489 /* Make sure this label matches our PF. Note that the PF
490 * character ('0' - '7') will be located where our
491 * prefix string's null terminator is located.
493 if ((label_name[len] - '0') == hw->pf_id) {
494 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
495 hw->tnl.tbl[hw->tnl.count].valid = false;
496 hw->tnl.tbl[hw->tnl.count].in_use = false;
497 hw->tnl.tbl[hw->tnl.count].marked = false;
498 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
499 hw->tnl.tbl[hw->tnl.count].port = 0;
505 label_name = ice_enum_labels(NULL, 0, &state, &val);
508 /* Cache the appropriate boost TCAM entry pointers */
509 for (i = 0; i < hw->tnl.count; i++) {
510 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
511 &hw->tnl.tbl[i].boost_entry);
512 if (hw->tnl.tbl[i].boost_entry)
513 hw->tnl.tbl[i].valid = true;
519 #define ICE_DC_KEY 0x1 /* don't care */
520 #define ICE_DC_KEYINV 0x1
521 #define ICE_NM_KEY 0x0 /* never match */
522 #define ICE_NM_KEYINV 0x0
523 #define ICE_0_KEY 0x1 /* match 0 */
524 #define ICE_0_KEYINV 0x0
525 #define ICE_1_KEY 0x0 /* match 1 */
526 #define ICE_1_KEYINV 0x1
529 * ice_gen_key_word - generate 16-bits of a key/mask word
531 * @valid: valid bits mask (change only the valid bits)
532 * @dont_care: don't care mask
533 * @nvr_mtch: never match mask
534 * @key: pointer to an array of where the resulting key portion
535 * @key_inv: pointer to an array of where the resulting key invert portion
537 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
538 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
539 * of key and 8 bits of key invert.
541 * '0' = b01, always match a 0 bit
542 * '1' = b10, always match a 1 bit
543 * '?' = b11, don't care bit (always matches)
544 * '~' = b00, never match bit
548 * dont_care: b0 0 1 1 0 0
549 * never_mtch: b0 0 0 0 1 1
550 * ------------------------------
551 * Result: key: b01 10 11 11 00 00
553 static enum ice_status
554 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
557 u8 in_key = *key, in_key_inv = *key_inv;
560 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
561 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
567 /* encode the 8 bits into 8-bit key and 8-bit key invert */
568 for (i = 0; i < 8; i++) {
572 if (!(valid & 0x1)) { /* change only valid bits */
573 *key |= (in_key & 0x1) << 7;
574 *key_inv |= (in_key_inv & 0x1) << 7;
575 } else if (dont_care & 0x1) { /* don't care bit */
576 *key |= ICE_DC_KEY << 7;
577 *key_inv |= ICE_DC_KEYINV << 7;
578 } else if (nvr_mtch & 0x1) { /* never match bit */
579 *key |= ICE_NM_KEY << 7;
580 *key_inv |= ICE_NM_KEYINV << 7;
581 } else if (val & 0x01) { /* exact 1 match */
582 *key |= ICE_1_KEY << 7;
583 *key_inv |= ICE_1_KEYINV << 7;
584 } else { /* exact 0 match */
585 *key |= ICE_0_KEY << 7;
586 *key_inv |= ICE_0_KEYINV << 7;
601 * ice_bits_max_set - determine if the number of bits set is within a maximum
602 * @mask: pointer to the byte array which is the mask
603 * @size: the number of bytes in the mask
604 * @max: the max number of set bits
606 * This function determines if there are at most 'max' number of bits set in an
607 * array. Returns true if the number for bits set is <= max or will return false
610 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
615 /* check each byte */
616 for (i = 0; i < size; i++) {
617 /* if 0, go to next byte */
621 /* We know there is at least one set bit in this byte because of
622 * the above check; if we already have found 'max' number of
623 * bits set, then we can return failure now.
628 /* count the bits in this byte, checking threshold */
629 count += ice_hweight8(mask[i]);
638 * ice_set_key - generate a variable sized key with multiples of 16-bits
639 * @key: pointer to where the key will be stored
640 * @size: the size of the complete key in bytes (must be even)
641 * @val: array of 8-bit values that makes up the value portion of the key
642 * @upd: array of 8-bit masks that determine what key portion to update
643 * @dc: array of 8-bit masks that make up the don't care mask
644 * @nm: array of 8-bit masks that make up the never match mask
645 * @off: the offset of the first byte in the key to update
646 * @len: the number of bytes in the key update
648 * This function generates a key from a value, a don't care mask and a never
650 * upd, dc, and nm are optional parameters, and can be NULL:
651 * upd == NULL --> upd mask is all 1's (update all bits)
652 * dc == NULL --> dc mask is all 0's (no don't care bits)
653 * nm == NULL --> nm mask is all 0's (no never match bits)
656 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
662 /* size must be a multiple of 2 bytes. */
665 half_size = size / 2;
667 if (off + len > half_size)
670 /* Make sure at most one bit is set in the never match mask. Having more
671 * than one never match mask bit set will cause HW to consume excessive
672 * power otherwise; this is a power management efficiency check.
674 #define ICE_NVR_MTCH_BITS_MAX 1
675 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
678 for (i = 0; i < len; i++)
679 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
680 dc ? dc[i] : 0, nm ? nm[i] : 0,
681 key + off + i, key + half_size + off + i))
688 * ice_acquire_global_cfg_lock
689 * @hw: pointer to the HW structure
690 * @access: access type (read or write)
692 * This function will request ownership of the global config lock for reading
693 * or writing of the package. When attempting to obtain write access, the
694 * caller must check for the following two return values:
696 * ICE_SUCCESS - Means the caller has acquired the global config lock
697 * and can perform writing of the package.
698 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
699 * package or has found that no update was necessary; in
700 * this case, the caller can just skip performing any
701 * update of the package.
703 static enum ice_status
704 ice_acquire_global_cfg_lock(struct ice_hw *hw,
705 enum ice_aq_res_access_type access)
707 enum ice_status status;
709 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
711 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
712 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
714 if (status == ICE_ERR_AQ_NO_WORK)
715 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
721 * ice_release_global_cfg_lock
722 * @hw: pointer to the HW structure
724 * This function will release the global config lock.
726 static void ice_release_global_cfg_lock(struct ice_hw *hw)
728 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
732 * ice_acquire_change_lock
733 * @hw: pointer to the HW structure
734 * @access: access type (read or write)
736 * This function will request ownership of the change lock.
739 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
741 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
743 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
744 ICE_CHANGE_LOCK_TIMEOUT);
748 * ice_release_change_lock
749 * @hw: pointer to the HW structure
751 * This function will release the change lock using the proper Admin Command.
753 void ice_release_change_lock(struct ice_hw *hw)
755 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
757 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
761 * ice_aq_download_pkg
762 * @hw: pointer to the hardware structure
763 * @pkg_buf: the package buffer to transfer
764 * @buf_size: the size of the package buffer
765 * @last_buf: last buffer indicator
766 * @error_offset: returns error offset
767 * @error_info: returns error information
768 * @cd: pointer to command details structure or NULL
770 * Download Package (0x0C40)
772 static enum ice_status
773 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
774 u16 buf_size, bool last_buf, u32 *error_offset,
775 u32 *error_info, struct ice_sq_cd *cd)
777 struct ice_aqc_download_pkg *cmd;
778 struct ice_aq_desc desc;
779 enum ice_status status;
781 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
788 cmd = &desc.params.download_pkg;
789 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
790 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
793 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
795 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
796 if (status == ICE_ERR_AQ_ERROR) {
797 /* Read error from buffer only when the FW returned an error */
798 struct ice_aqc_download_pkg_resp *resp;
800 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
802 *error_offset = LE32_TO_CPU(resp->error_offset);
804 *error_info = LE32_TO_CPU(resp->error_info);
811 * ice_aq_upload_section
812 * @hw: pointer to the hardware structure
813 * @pkg_buf: the package buffer which will receive the section
814 * @buf_size: the size of the package buffer
815 * @cd: pointer to command details structure or NULL
817 * Upload Section (0x0C41)
820 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
821 u16 buf_size, struct ice_sq_cd *cd)
823 struct ice_aq_desc desc;
825 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
826 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
827 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
829 return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
834 * @hw: pointer to the hardware structure
835 * @pkg_buf: the package cmd buffer
836 * @buf_size: the size of the package cmd buffer
837 * @last_buf: last buffer indicator
838 * @error_offset: returns error offset
839 * @error_info: returns error information
840 * @cd: pointer to command details structure or NULL
842 * Update Package (0x0C42)
844 static enum ice_status
845 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
846 bool last_buf, u32 *error_offset, u32 *error_info,
847 struct ice_sq_cd *cd)
849 struct ice_aqc_download_pkg *cmd;
850 struct ice_aq_desc desc;
851 enum ice_status status;
853 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
860 cmd = &desc.params.download_pkg;
861 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
862 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
865 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
867 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
868 if (status == ICE_ERR_AQ_ERROR) {
869 /* Read error from buffer only when the FW returned an error */
870 struct ice_aqc_download_pkg_resp *resp;
872 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
874 *error_offset = LE32_TO_CPU(resp->error_offset);
876 *error_info = LE32_TO_CPU(resp->error_info);
883 * ice_find_seg_in_pkg
884 * @hw: pointer to the hardware structure
885 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
886 * @pkg_hdr: pointer to the package header to be searched
888 * This function searches a package file for a particular segment type. On
889 * success it returns a pointer to the segment header, otherwise it will
892 static struct ice_generic_seg_hdr *
893 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
894 struct ice_pkg_hdr *pkg_hdr)
898 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
899 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
900 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
901 pkg_hdr->pkg_format_ver.update,
902 pkg_hdr->pkg_format_ver.draft);
904 /* Search all package segments for the requested segment type */
905 for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
906 struct ice_generic_seg_hdr *seg;
908 seg = (struct ice_generic_seg_hdr *)
909 ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
911 if (LE32_TO_CPU(seg->seg_type) == seg_type)
920 * @hw: pointer to the hardware structure
921 * @bufs: pointer to an array of buffers
922 * @count: the number of buffers in the array
924 * Obtains change lock and updates package.
927 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
929 enum ice_status status;
932 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
936 for (i = 0; i < count; i++) {
937 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
938 bool last = ((i + 1) == count);
940 status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
941 last, &offset, &info, NULL);
944 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
945 status, offset, info);
950 ice_release_change_lock(hw);
957 * @hw: pointer to the hardware structure
958 * @bufs: pointer to an array of buffers
959 * @count: the number of buffers in the array
961 * Obtains global config lock and downloads the package configuration buffers
962 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
963 * found indicates that the rest of the buffers are all metadata buffers.
965 static enum ice_status
966 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
968 enum ice_status status;
969 struct ice_buf_hdr *bh;
973 return ICE_ERR_PARAM;
975 /* If the first buffer's first section has its metadata bit set
976 * then there are no buffers to be downloaded, and the operation is
977 * considered a success.
979 bh = (struct ice_buf_hdr *)bufs;
980 if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
983 /* reset pkg_dwnld_status in case this function is called in the
986 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
988 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
990 if (status == ICE_ERR_AQ_NO_WORK)
991 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
993 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
997 for (i = 0; i < count; i++) {
998 bool last = ((i + 1) == count);
1001 /* check next buffer for metadata flag */
1002 bh = (struct ice_buf_hdr *)(bufs + i + 1);
1004 /* A set metadata flag in the next buffer will signal
1005 * that the current buffer will be the last buffer
1008 if (LE16_TO_CPU(bh->section_count))
1009 if (LE32_TO_CPU(bh->section_entry[0].type) &
1014 bh = (struct ice_buf_hdr *)(bufs + i);
1016 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
1017 &offset, &info, NULL);
1019 /* Save AQ status from download package */
1020 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1022 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
1023 status, offset, info);
1032 status = ice_set_vlan_mode(hw);
1034 ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
1038 ice_release_global_cfg_lock(hw);
1044 * ice_aq_get_pkg_info_list
1045 * @hw: pointer to the hardware structure
1046 * @pkg_info: the buffer which will receive the information list
1047 * @buf_size: the size of the pkg_info information buffer
1048 * @cd: pointer to command details structure or NULL
1050 * Get Package Info List (0x0C43)
1052 static enum ice_status
1053 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1054 struct ice_aqc_get_pkg_info_resp *pkg_info,
1055 u16 buf_size, struct ice_sq_cd *cd)
1057 struct ice_aq_desc desc;
1059 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1060 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1062 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1067 * @hw: pointer to the hardware structure
1068 * @ice_seg: pointer to the segment of the package to be downloaded
1070 * Handles the download of a complete package.
1072 static enum ice_status
1073 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1075 struct ice_buf_table *ice_buf_tbl;
1077 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1078 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1079 ice_seg->hdr.seg_format_ver.major,
1080 ice_seg->hdr.seg_format_ver.minor,
1081 ice_seg->hdr.seg_format_ver.update,
1082 ice_seg->hdr.seg_format_ver.draft);
1084 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1085 LE32_TO_CPU(ice_seg->hdr.seg_type),
1086 LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1088 ice_buf_tbl = ice_find_buf_table(ice_seg);
1090 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1091 LE32_TO_CPU(ice_buf_tbl->buf_count));
1093 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1094 LE32_TO_CPU(ice_buf_tbl->buf_count));
1099 * @hw: pointer to the hardware structure
1100 * @pkg_hdr: pointer to the driver's package hdr
1102 * Saves off the package details into the HW structure.
1104 static enum ice_status
1105 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1107 struct ice_generic_seg_hdr *seg_hdr;
1109 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1111 return ICE_ERR_PARAM;
1113 seg_hdr = (struct ice_generic_seg_hdr *)
1114 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1116 struct ice_meta_sect *meta;
1117 struct ice_pkg_enum state;
1119 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1121 /* Get package information from the Metadata Section */
1122 meta = (struct ice_meta_sect *)
1123 ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1126 ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1130 hw->pkg_ver = meta->ver;
1131 ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
1132 ICE_NONDMA_TO_NONDMA);
1134 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1135 meta->ver.major, meta->ver.minor, meta->ver.update,
1136 meta->ver.draft, meta->name);
1138 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1139 ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1140 sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
1142 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1143 seg_hdr->seg_format_ver.major,
1144 seg_hdr->seg_format_ver.minor,
1145 seg_hdr->seg_format_ver.update,
1146 seg_hdr->seg_format_ver.draft,
1149 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1158 * @hw: pointer to the hardware structure
1160 * Store details of the package currently loaded in HW into the HW structure.
1162 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1164 struct ice_aqc_get_pkg_info_resp *pkg_info;
1165 enum ice_status status;
1169 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1171 size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1172 pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1174 return ICE_ERR_NO_MEMORY;
1176 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1178 goto init_pkg_free_alloc;
1180 for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
1181 #define ICE_PKG_FLAG_COUNT 4
1182 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1185 if (pkg_info->pkg_info[i].is_active) {
1186 flags[place++] = 'A';
1187 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1188 hw->active_track_id =
1189 LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
1190 ice_memcpy(hw->active_pkg_name,
1191 pkg_info->pkg_info[i].name,
1192 sizeof(pkg_info->pkg_info[i].name),
1193 ICE_NONDMA_TO_NONDMA);
1194 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1196 if (pkg_info->pkg_info[i].is_active_at_boot)
1197 flags[place++] = 'B';
1198 if (pkg_info->pkg_info[i].is_modified)
1199 flags[place++] = 'M';
1200 if (pkg_info->pkg_info[i].is_in_nvm)
1201 flags[place++] = 'N';
1203 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1204 i, pkg_info->pkg_info[i].ver.major,
1205 pkg_info->pkg_info[i].ver.minor,
1206 pkg_info->pkg_info[i].ver.update,
1207 pkg_info->pkg_info[i].ver.draft,
1208 pkg_info->pkg_info[i].name, flags);
1211 init_pkg_free_alloc:
1212 ice_free(hw, pkg_info);
1218 * ice_verify_pkg - verify package
1219 * @pkg: pointer to the package buffer
1220 * @len: size of the package buffer
1222 * Verifies various attributes of the package file, including length, format
1223 * version, and the requirement of at least one segment.
1225 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1230 if (len < ice_struct_size(pkg, seg_offset, 1))
1231 return ICE_ERR_BUF_TOO_SHORT;
1233 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1234 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1235 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1236 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1239 /* pkg must have at least one segment */
1240 seg_count = LE32_TO_CPU(pkg->seg_count);
1244 /* make sure segment array fits in package length */
1245 if (len < ice_struct_size(pkg, seg_offset, seg_count))
1246 return ICE_ERR_BUF_TOO_SHORT;
1248 /* all segments must fit within length */
1249 for (i = 0; i < seg_count; i++) {
1250 u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1251 struct ice_generic_seg_hdr *seg;
1253 /* segment header must fit */
1254 if (len < off + sizeof(*seg))
1255 return ICE_ERR_BUF_TOO_SHORT;
1257 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1259 /* segment body must fit */
1260 if (len < off + LE32_TO_CPU(seg->seg_size))
1261 return ICE_ERR_BUF_TOO_SHORT;
1268 * ice_free_seg - free package segment pointer
1269 * @hw: pointer to the hardware structure
1271 * Frees the package segment pointer in the proper manner, depending on if the
1272 * segment was allocated or just the passed in pointer was stored.
1274 void ice_free_seg(struct ice_hw *hw)
1277 ice_free(hw, hw->pkg_copy);
1278 hw->pkg_copy = NULL;
1285 * ice_init_pkg_regs - initialize additional package registers
1286 * @hw: pointer to the hardware structure
1288 static void ice_init_pkg_regs(struct ice_hw *hw)
1290 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1291 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1292 #define ICE_SW_BLK_IDX 0
1293 if (hw->dcf_enabled)
1296 /* setup Switch block input mask, which is 48-bits in two parts */
1297 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1298 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1302 * ice_chk_pkg_version - check package version for compatibility with driver
1303 * @pkg_ver: pointer to a version structure to check
1305 * Check to make sure that the package about to be downloaded is compatible with
1306 * the driver. To be compatible, the major and minor components of the package
1307 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1310 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1312 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1313 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1314 return ICE_ERR_NOT_SUPPORTED;
1320 * ice_chk_pkg_compat
1321 * @hw: pointer to the hardware structure
1322 * @ospkg: pointer to the package hdr
1323 * @seg: pointer to the package segment hdr
1325 * This function checks the package version compatibility with driver and NVM
1327 static enum ice_status
1328 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1329 struct ice_seg **seg)
1331 struct ice_aqc_get_pkg_info_resp *pkg;
1332 enum ice_status status;
1336 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1338 /* Check package version compatibility */
1339 status = ice_chk_pkg_version(&hw->pkg_ver);
1341 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1345 /* find ICE segment in given package */
1346 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1349 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1353 /* Check if FW is compatible with the OS package */
1354 size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
1355 pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1357 return ICE_ERR_NO_MEMORY;
1359 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1361 goto fw_ddp_compat_free_alloc;
1363 for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
1364 /* loop till we find the NVM package */
1365 if (!pkg->pkg_info[i].is_in_nvm)
1367 if ((*seg)->hdr.seg_format_ver.major !=
1368 pkg->pkg_info[i].ver.major ||
1369 (*seg)->hdr.seg_format_ver.minor >
1370 pkg->pkg_info[i].ver.minor) {
1371 status = ICE_ERR_FW_DDP_MISMATCH;
1372 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1374 /* done processing NVM package so break */
1377 fw_ddp_compat_free_alloc:
1384 * @sect_type: section type
1385 * @section: pointer to section
1386 * @index: index of the field vector entry to be returned
1387 * @offset: ptr to variable that receives the offset in the field vector table
1389 * This is a callback function that can be passed to ice_pkg_enum_entry.
1390 * This function treats the given section as of type ice_sw_fv_section and
1391 * enumerates offset field. "offset" is an index into the field vector table.
1394 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1396 struct ice_sw_fv_section *fv_section =
1397 (struct ice_sw_fv_section *)section;
1399 if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1401 if (index >= LE16_TO_CPU(fv_section->count))
1404 /* "index" passed in to this function is relative to a given
1405 * 4k block. To get to the true index into the field vector
1406 * table need to add the relative index to the base_offset
1407 * field of this section
1409 *offset = LE16_TO_CPU(fv_section->base_offset) + index;
1410 return fv_section->fv + index;
1414 * ice_get_prof_index_max - get the max profile index for used profile
1415 * @hw: pointer to the HW struct
1417 * Calling this function will get the max profile index for used profile
1418 * and store the index number in struct ice_switch_info *switch_info
1419 * in hw for following use.
1421 static int ice_get_prof_index_max(struct ice_hw *hw)
1423 u16 prof_index = 0, j, max_prof_index = 0;
1424 struct ice_pkg_enum state;
1425 struct ice_seg *ice_seg;
1430 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1433 return ICE_ERR_PARAM;
1438 fv = (struct ice_fv *)
1439 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1440 &offset, ice_sw_fv_handler);
1445 /* in the profile that not be used, the prot_id is set to 0xff
1446 * and the off is set to 0x1ff for all the field vectors.
1448 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1449 if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1450 fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1452 if (flag && prof_index > max_prof_index)
1453 max_prof_index = prof_index;
1459 hw->switch_info->max_used_prof_index = max_prof_index;
1465 * ice_init_pkg - initialize/download package
1466 * @hw: pointer to the hardware structure
1467 * @buf: pointer to the package buffer
1468 * @len: size of the package buffer
1470 * This function initializes a package. The package contains HW tables
1471 * required to do packet processing. First, the function extracts package
1472 * information such as version. Then it finds the ice configuration segment
1473 * within the package; this function then saves a copy of the segment pointer
1474 * within the supplied package buffer. Next, the function will cache any hints
1475 * from the package, followed by downloading the package itself. Note, that if
1476 * a previous PF driver has already downloaded the package successfully, then
1477 * the current driver will not have to download the package again.
1479 * The local package contents will be used to query default behavior and to
1480 * update specific sections of the HW's version of the package (e.g. to update
1481 * the parse graph to understand new protocols).
1483 * This function stores a pointer to the package buffer memory, and it is
1484 * expected that the supplied buffer will not be freed immediately. If the
1485 * package buffer needs to be freed, such as when read from a file, use
1486 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1489 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1491 struct ice_pkg_hdr *pkg;
1492 enum ice_status status;
1493 struct ice_seg *seg;
1496 return ICE_ERR_PARAM;
1498 pkg = (struct ice_pkg_hdr *)buf;
1499 status = ice_verify_pkg(pkg, len);
1501 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1506 /* initialize package info */
1507 status = ice_init_pkg_info(hw, pkg);
1511 /* before downloading the package, check package version for
1512 * compatibility with driver
1514 status = ice_chk_pkg_compat(hw, pkg, &seg);
1518 /* initialize package hints and then download package */
1519 ice_init_pkg_hints(hw, seg);
1520 status = ice_download_pkg(hw, seg);
1521 if (status == ICE_ERR_AQ_NO_WORK) {
1522 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1523 status = ICE_SUCCESS;
1526 /* Get information on the package currently loaded in HW, then make sure
1527 * the driver is compatible with this version.
1530 status = ice_get_pkg_info(hw);
1532 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1537 /* on successful package download update other required
1538 * registers to support the package and fill HW tables
1539 * with package content.
1541 ice_init_pkg_regs(hw);
1542 ice_fill_blk_tbls(hw);
1543 ice_get_prof_index_max(hw);
1545 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1553 * ice_copy_and_init_pkg - initialize/download a copy of the package
1554 * @hw: pointer to the hardware structure
1555 * @buf: pointer to the package buffer
1556 * @len: size of the package buffer
1558 * This function copies the package buffer, and then calls ice_init_pkg() to
1559 * initialize the copied package contents.
1561 * The copying is necessary if the package buffer supplied is constant, or if
1562 * the memory may disappear shortly after calling this function.
1564 * If the package buffer resides in the data segment and can be modified, the
1565 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1567 * However, if the package buffer needs to be copied first, such as when being
1568 * read from a file, the caller should use ice_copy_and_init_pkg().
1570 * This function will first copy the package buffer, before calling
1571 * ice_init_pkg(). The caller is free to immediately destroy the original
1572 * package buffer, as the new copy will be managed by this function and
1575 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1577 enum ice_status status;
1581 return ICE_ERR_PARAM;
1583 buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1585 status = ice_init_pkg(hw, buf_copy, len);
1587 /* Free the copy, since we failed to initialize the package */
1588 ice_free(hw, buf_copy);
1590 /* Track the copied pkg so we can free it later */
1591 hw->pkg_copy = buf_copy;
1600 * @hw: pointer to the HW structure
1602 * Allocates a package buffer and returns a pointer to the buffer header.
1603 * Note: all package contents must be in Little Endian form.
1605 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1607 struct ice_buf_build *bld;
1608 struct ice_buf_hdr *buf;
1610 bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1614 buf = (struct ice_buf_hdr *)bld;
1615 buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
1621 * ice_get_sw_prof_type - determine switch profile type
1622 * @hw: pointer to the HW structure
1623 * @fv: pointer to the switch field vector
1625 static enum ice_prof_type
1626 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
1630 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1631 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1632 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1633 fv->ew[i].off == ICE_VNI_OFFSET)
1634 return ICE_PROF_TUN_UDP;
1636 /* GRE tunnel will have GRE protocol */
1637 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1638 return ICE_PROF_TUN_GRE;
1640 /* PPPOE tunnel will have PPPOE protocol */
1641 if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
1642 return ICE_PROF_TUN_PPPOE;
1645 return ICE_PROF_NON_TUN;
1649 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1650 * @hw: pointer to hardware structure
1651 * @req_profs: type of profiles requested
1652 * @bm: pointer to memory for returning the bitmap of field vectors
1655 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1658 struct ice_pkg_enum state;
1659 struct ice_seg *ice_seg;
1662 if (req_profs == ICE_PROF_ALL) {
1663 ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
1667 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1668 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1671 enum ice_prof_type prof_type;
1674 fv = (struct ice_fv *)
1675 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1676 &offset, ice_sw_fv_handler);
1680 /* Determine field vector type */
1681 prof_type = ice_get_sw_prof_type(hw, fv);
1683 if (req_profs & prof_type)
1684 ice_set_bit((u16)offset, bm);
1690 * ice_get_sw_fv_list
1691 * @hw: pointer to the HW structure
1692 * @prot_ids: field vector to search for with a given protocol ID
1693 * @ids_cnt: lookup/protocol count
1694 * @bm: bitmap of field vectors to consider
1695 * @fv_list: Head of a list
1697 * Finds all the field vector entries from switch block that contain
1698 * a given protocol ID and returns a list of structures of type
1699 * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1700 * definition and profile ID information
1701 * NOTE: The caller of the function is responsible for freeing the memory
1702 * allocated for every list entry.
1705 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
1706 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1708 struct ice_sw_fv_list_entry *fvl;
1709 struct ice_sw_fv_list_entry *tmp;
1710 struct ice_pkg_enum state;
1711 struct ice_seg *ice_seg;
1715 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1717 if (!ids_cnt || !hw->seg)
1718 return ICE_ERR_PARAM;
1724 fv = (struct ice_fv *)
1725 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1726 &offset, ice_sw_fv_handler);
1731 /* If field vector is not in the bitmap list, then skip this
1734 if (!ice_is_bit_set(bm, (u16)offset))
1737 for (i = 0; i < ids_cnt; i++) {
1740 /* This code assumes that if a switch field vector line
1741 * has a matching protocol, then this line will contain
1742 * the entries necessary to represent every field in
1743 * that protocol header.
1745 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1746 if (fv->ew[j].prot_id == prot_ids[i])
1748 if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1750 if (i + 1 == ids_cnt) {
1751 fvl = (struct ice_sw_fv_list_entry *)
1752 ice_malloc(hw, sizeof(*fvl));
1756 fvl->profile_id = offset;
1757 LIST_ADD(&fvl->list_entry, fv_list);
1762 if (LIST_EMPTY(fv_list))
1767 LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1769 LIST_DEL(&fvl->list_entry);
1773 return ICE_ERR_NO_MEMORY;
1777 * ice_init_prof_result_bm - Initialize the profile result index bitmap
1778 * @hw: pointer to hardware structure
1780 void ice_init_prof_result_bm(struct ice_hw *hw)
1782 struct ice_pkg_enum state;
1783 struct ice_seg *ice_seg;
1786 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1796 fv = (struct ice_fv *)
1797 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1798 &off, ice_sw_fv_handler);
1803 ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1806 /* Determine empty field vector indices, these can be
1807 * used for recipe results. Skip index 0, since it is
1808 * always used for Switch ID.
1810 for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1811 if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1812 fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1814 hw->switch_info->prof_res_bm[off]);
1820 * @hw: pointer to the HW structure
1821 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1823 * Frees a package buffer
1825 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1831 * ice_pkg_buf_reserve_section
1832 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1833 * @count: the number of sections to reserve
1835 * Reserves one or more section table entries in a package buffer. This routine
1836 * can be called multiple times as long as they are made before calling
1837 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1838 * is called once, the number of sections that can be allocated will not be able
1839 * to be increased; not using all reserved sections is fine, but this will
1840 * result in some wasted space in the buffer.
1841 * Note: all package contents must be in Little Endian form.
1843 static enum ice_status
1844 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1846 struct ice_buf_hdr *buf;
1851 return ICE_ERR_PARAM;
1853 buf = (struct ice_buf_hdr *)&bld->buf;
1855 /* already an active section, can't increase table size */
1856 section_count = LE16_TO_CPU(buf->section_count);
1857 if (section_count > 0)
1860 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1862 bld->reserved_section_table_entries += count;
1864 data_end = LE16_TO_CPU(buf->data_end) +
1865 FLEX_ARRAY_SIZE(buf, section_entry, count);
1866 buf->data_end = CPU_TO_LE16(data_end);
1872 * ice_pkg_buf_alloc_section
1873 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1874 * @type: the section type value
1875 * @size: the size of the section to reserve (in bytes)
1877 * Reserves memory in the buffer for a section's content and updates the
1878 * buffers' status accordingly. This routine returns a pointer to the first
1879 * byte of the section start within the buffer, which is used to fill in the
1881 * Note: all package contents must be in Little Endian form.
1884 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1886 struct ice_buf_hdr *buf;
1890 if (!bld || !type || !size)
1893 buf = (struct ice_buf_hdr *)&bld->buf;
1895 /* check for enough space left in buffer */
1896 data_end = LE16_TO_CPU(buf->data_end);
1898 /* section start must align on 4 byte boundary */
1899 data_end = ICE_ALIGN(data_end, 4);
1901 if ((data_end + size) > ICE_MAX_S_DATA_END)
1904 /* check for more available section table entries */
1905 sect_count = LE16_TO_CPU(buf->section_count);
1906 if (sect_count < bld->reserved_section_table_entries) {
1907 void *section_ptr = ((u8 *)buf) + data_end;
1909 buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
1910 buf->section_entry[sect_count].size = CPU_TO_LE16(size);
1911 buf->section_entry[sect_count].type = CPU_TO_LE32(type);
1914 buf->data_end = CPU_TO_LE16(data_end);
1916 buf->section_count = CPU_TO_LE16(sect_count + 1);
1920 /* no free section table entries */
1925 * ice_pkg_buf_alloc_single_section
1926 * @hw: pointer to the HW structure
1927 * @type: the section type value
1928 * @size: the size of the section to reserve (in bytes)
1929 * @section: returns pointer to the section
1931 * Allocates a package buffer with a single section.
1932 * Note: all package contents must be in Little Endian form.
1934 struct ice_buf_build *
1935 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
1938 struct ice_buf_build *buf;
1943 buf = ice_pkg_buf_alloc(hw);
1947 if (ice_pkg_buf_reserve_section(buf, 1))
1948 goto ice_pkg_buf_alloc_single_section_err;
1950 *section = ice_pkg_buf_alloc_section(buf, type, size);
1952 goto ice_pkg_buf_alloc_single_section_err;
1956 ice_pkg_buf_alloc_single_section_err:
1957 ice_pkg_buf_free(hw, buf);
1962 * ice_pkg_buf_get_active_sections
1963 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1965 * Returns the number of active sections. Before using the package buffer
1966 * in an update package command, the caller should make sure that there is at
1967 * least one active section - otherwise, the buffer is not legal and should
1969 * Note: all package contents must be in Little Endian form.
1971 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1973 struct ice_buf_hdr *buf;
1978 buf = (struct ice_buf_hdr *)&bld->buf;
1979 return LE16_TO_CPU(buf->section_count);
1984 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1986 * Return a pointer to the buffer's header
1988 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1997 * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
1998 * @hw: pointer to the HW structure
1999 * @port: port to search for
2000 * @index: optionally returns index
2002 * Returns whether a port is already in use as a tunnel, and optionally its
2005 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
2009 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2010 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2020 * ice_tunnel_port_in_use
2021 * @hw: pointer to the HW structure
2022 * @port: port to search for
2023 * @index: optionally returns index
2025 * Returns whether a port is already in use as a tunnel, and optionally its
2028 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
2032 ice_acquire_lock(&hw->tnl_lock);
2033 res = ice_tunnel_port_in_use_hlpr(hw, port, index);
2034 ice_release_lock(&hw->tnl_lock);
2040 * ice_tunnel_get_type
2041 * @hw: pointer to the HW structure
2042 * @port: port to search for
2043 * @type: returns tunnel index
2045 * For a given port number, will return the type of tunnel.
2048 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
2053 ice_acquire_lock(&hw->tnl_lock);
2055 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2056 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2057 *type = hw->tnl.tbl[i].type;
2062 ice_release_lock(&hw->tnl_lock);
2068 * ice_find_free_tunnel_entry
2069 * @hw: pointer to the HW structure
2070 * @type: tunnel type
2071 * @index: optionally returns index
2073 * Returns whether there is a free tunnel entry, and optionally its index
2076 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
2081 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2082 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
2083 hw->tnl.tbl[i].type == type) {
2093 * ice_get_open_tunnel_port - retrieve an open tunnel port
2094 * @hw: pointer to the HW structure
2095 * @type: tunnel type (TNL_ALL will return any open port)
2096 * @port: returns open port
2099 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
2105 ice_acquire_lock(&hw->tnl_lock);
2107 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2108 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2109 (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
2110 *port = hw->tnl.tbl[i].port;
2115 ice_release_lock(&hw->tnl_lock);
2122 * @hw: pointer to the HW structure
2123 * @type: type of tunnel
2124 * @port: port of tunnel to create
2126 * Create a tunnel by updating the parse graph in the parser. We do that by
2127 * creating a package buffer with the tunnel info and issuing an update package
2131 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
2133 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2134 enum ice_status status = ICE_ERR_MAX_LIMIT;
2135 struct ice_buf_build *bld;
2138 ice_acquire_lock(&hw->tnl_lock);
2140 if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
2141 hw->tnl.tbl[index].ref++;
2142 status = ICE_SUCCESS;
2143 goto ice_create_tunnel_end;
2146 if (!ice_find_free_tunnel_entry(hw, type, &index)) {
2147 status = ICE_ERR_OUT_OF_RANGE;
2148 goto ice_create_tunnel_end;
2151 bld = ice_pkg_buf_alloc(hw);
2153 status = ICE_ERR_NO_MEMORY;
2154 goto ice_create_tunnel_end;
2157 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2158 if (ice_pkg_buf_reserve_section(bld, 2))
2159 goto ice_create_tunnel_err;
2161 sect_rx = (struct ice_boost_tcam_section *)
2162 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2163 ice_struct_size(sect_rx, tcam, 1));
2165 goto ice_create_tunnel_err;
2166 sect_rx->count = CPU_TO_LE16(1);
2168 sect_tx = (struct ice_boost_tcam_section *)
2169 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2170 ice_struct_size(sect_tx, tcam, 1));
2172 goto ice_create_tunnel_err;
2173 sect_tx->count = CPU_TO_LE16(1);
2175 /* copy original boost entry to update package buffer */
2176 ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2177 sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
2179 /* over-write the never-match dest port key bits with the encoded port
2182 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2183 (u8 *)&port, NULL, NULL, NULL,
2184 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
2185 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
2187 /* exact copy of entry to Tx section entry */
2188 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
2189 ICE_NONDMA_TO_NONDMA);
2191 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2193 hw->tnl.tbl[index].port = port;
2194 hw->tnl.tbl[index].in_use = true;
2195 hw->tnl.tbl[index].ref = 1;
2198 ice_create_tunnel_err:
2199 ice_pkg_buf_free(hw, bld);
2201 ice_create_tunnel_end:
2202 ice_release_lock(&hw->tnl_lock);
2208 * ice_destroy_tunnel
2209 * @hw: pointer to the HW structure
2210 * @port: port of tunnel to destroy (ignored if the all parameter is true)
2211 * @all: flag that states to destroy all tunnels
2213 * Destroys a tunnel or all tunnels by creating an update package buffer
2214 * targeting the specific updates requested and then performing an update
2217 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
2219 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2220 enum ice_status status = ICE_ERR_MAX_LIMIT;
2221 struct ice_buf_build *bld;
2227 ice_acquire_lock(&hw->tnl_lock);
2229 if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
2230 if (hw->tnl.tbl[index].ref > 1) {
2231 hw->tnl.tbl[index].ref--;
2232 status = ICE_SUCCESS;
2233 goto ice_destroy_tunnel_end;
2236 /* determine count */
2237 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2238 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2239 (all || hw->tnl.tbl[i].port == port))
2243 status = ICE_ERR_PARAM;
2244 goto ice_destroy_tunnel_end;
2247 /* size of section - there is at least one entry */
2248 size = ice_struct_size(sect_rx, tcam, count);
2250 bld = ice_pkg_buf_alloc(hw);
2252 status = ICE_ERR_NO_MEMORY;
2253 goto ice_destroy_tunnel_end;
2256 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2257 if (ice_pkg_buf_reserve_section(bld, 2))
2258 goto ice_destroy_tunnel_err;
2260 sect_rx = (struct ice_boost_tcam_section *)
2261 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2264 goto ice_destroy_tunnel_err;
2265 sect_rx->count = CPU_TO_LE16(count);
2267 sect_tx = (struct ice_boost_tcam_section *)
2268 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2271 goto ice_destroy_tunnel_err;
2272 sect_tx->count = CPU_TO_LE16(count);
2274 /* copy original boost entry to update package buffer, one copy to Rx
2275 * section, another copy to the Tx section
2277 for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2278 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2279 (all || hw->tnl.tbl[i].port == port)) {
2280 ice_memcpy(sect_rx->tcam + j,
2281 hw->tnl.tbl[i].boost_entry,
2282 sizeof(*sect_rx->tcam),
2283 ICE_NONDMA_TO_NONDMA);
2284 ice_memcpy(sect_tx->tcam + j,
2285 hw->tnl.tbl[i].boost_entry,
2286 sizeof(*sect_tx->tcam),
2287 ICE_NONDMA_TO_NONDMA);
2288 hw->tnl.tbl[i].marked = true;
2292 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2294 for (i = 0; i < hw->tnl.count &&
2295 i < ICE_TUNNEL_MAX_ENTRIES; i++)
2296 if (hw->tnl.tbl[i].marked) {
2297 hw->tnl.tbl[i].ref = 0;
2298 hw->tnl.tbl[i].port = 0;
2299 hw->tnl.tbl[i].in_use = false;
2300 hw->tnl.tbl[i].marked = false;
2303 ice_destroy_tunnel_err:
2304 ice_pkg_buf_free(hw, bld);
2306 ice_destroy_tunnel_end:
2307 ice_release_lock(&hw->tnl_lock);
2313 * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
2314 * @hw: pointer to the hardware structure
2315 * @blk: hardware block
2317 * @fv_idx: field vector word index
2318 * @prot: variable to receive the protocol ID
2319 * @off: variable to receive the protocol offset
2322 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
2325 struct ice_fv_word *fv_ext;
2327 if (prof >= hw->blk[blk].es.count)
2328 return ICE_ERR_PARAM;
2330 if (fv_idx >= hw->blk[blk].es.fvw)
2331 return ICE_ERR_PARAM;
2333 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2335 *prot = fv_ext[fv_idx].prot_id;
2336 *off = fv_ext[fv_idx].off;
2341 /* PTG Management */
2344 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
2345 * @hw: pointer to the hardware structure
2347 * @ptype: the ptype to search for
2348 * @ptg: pointer to variable that receives the PTG
2350 * This function will search the PTGs for a particular ptype, returning the
2351 * PTG ID that contains it through the PTG parameter, with the value of
2352 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
2354 static enum ice_status
2355 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2357 if (ptype >= ICE_XLT1_CNT || !ptg)
2358 return ICE_ERR_PARAM;
2360 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2365 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
2366 * @hw: pointer to the hardware structure
2368 * @ptg: the PTG to allocate
2370 * This function allocates a given packet type group ID specified by the PTG
2373 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2375 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2379 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
2380 * @hw: pointer to the hardware structure
2382 * @ptype: the ptype to remove
2383 * @ptg: the PTG to remove the ptype from
2385 * This function will remove the ptype from the specific PTG, and move it to
2386 * the default PTG (ICE_DEFAULT_PTG).
2388 static enum ice_status
2389 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2391 struct ice_ptg_ptype **ch;
2392 struct ice_ptg_ptype *p;
2394 if (ptype > ICE_XLT1_CNT - 1)
2395 return ICE_ERR_PARAM;
2397 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2398 return ICE_ERR_DOES_NOT_EXIST;
2400 /* Should not happen if .in_use is set, bad config */
2401 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2404 /* find the ptype within this PTG, and bypass the link over it */
2405 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2406 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2408 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2409 *ch = p->next_ptype;
2413 ch = &p->next_ptype;
2417 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2418 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2424 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
2425 * @hw: pointer to the hardware structure
2427 * @ptype: the ptype to add or move
2428 * @ptg: the PTG to add or move the ptype to
2430 * This function will either add or move a ptype to a particular PTG depending
2431 * on if the ptype is already part of another group. Note that using a
2432 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
2435 static enum ice_status
2436 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2438 enum ice_status status;
2441 if (ptype > ICE_XLT1_CNT - 1)
2442 return ICE_ERR_PARAM;
2444 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2445 return ICE_ERR_DOES_NOT_EXIST;
2447 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2451 /* Is ptype already in the correct PTG? */
2452 if (original_ptg == ptg)
2455 /* Remove from original PTG and move back to the default PTG */
2456 if (original_ptg != ICE_DEFAULT_PTG)
2457 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2459 /* Moving to default PTG? Then we're done with this request */
2460 if (ptg == ICE_DEFAULT_PTG)
2463 /* Add ptype to PTG at beginning of list */
2464 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2465 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2466 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2467 &hw->blk[blk].xlt1.ptypes[ptype];
2469 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2470 hw->blk[blk].xlt1.t[ptype] = ptg;
2475 /* Block / table size info */
2476 struct ice_blk_size_details {
2477 u16 xlt1; /* # XLT1 entries */
2478 u16 xlt2; /* # XLT2 entries */
2479 u16 prof_tcam; /* # profile ID TCAM entries */
2480 u16 prof_id; /* # profile IDs */
2481 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
2482 u16 prof_redir; /* # profile redirection entries */
2483 u16 es; /* # extraction sequence entries */
2484 u16 fvw; /* # field vector words */
2485 u8 overwrite; /* overwrite existing entries allowed */
2486 u8 reverse; /* reverse FV order */
2489 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2492 * XLT1 - Number of entries in XLT1 table
2493 * XLT2 - Number of entries in XLT2 table
2494 * TCAM - Number of entries Profile ID TCAM table
2495 * CDID - Control Domain ID of the hardware block
2496 * PRED - Number of entries in the Profile Redirection Table
2497 * FV - Number of entries in the Field Vector
2498 * FVW - Width (in WORDs) of the Field Vector
2499 * OVR - Overwrite existing table entries
2502 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
2503 /* Overwrite , Reverse FV */
2504 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2506 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2508 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2510 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2512 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2517 ICE_SID_XLT1_OFF = 0,
2520 ICE_SID_PR_REDIR_OFF,
2525 /* Characteristic handling */
2528 * ice_match_prop_lst - determine if properties of two lists match
2529 * @list1: first properties list
2530 * @list2: second properties list
2532 * Count, cookies and the order must match in order to be considered equivalent.
2535 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
2537 struct ice_vsig_prof *tmp1;
2538 struct ice_vsig_prof *tmp2;
2542 /* compare counts */
2543 LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list)
2545 LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
2547 if (!count || count != chk_count)
2550 tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
2551 tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
2553 /* profile cookies must compare, and in the exact same order to take
2554 * into account priority
2557 if (tmp2->profile_cookie != tmp1->profile_cookie)
2560 tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
2561 tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
2567 /* VSIG Management */
2570 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2571 * @hw: pointer to the hardware structure
2573 * @vsi: VSI of interest
2574 * @vsig: pointer to receive the VSI group
2576 * This function will lookup the VSI entry in the XLT2 list and return
2577 * the VSI group its associated with.
2580 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2582 if (!vsig || vsi >= ICE_MAX_VSI)
2583 return ICE_ERR_PARAM;
2585 /* As long as there's a default or valid VSIG associated with the input
2586 * VSI, the functions returns a success. Any handling of VSIG will be
2587 * done by the following add, update or remove functions.
2589 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2595 * ice_vsig_alloc_val - allocate a new VSIG by value
2596 * @hw: pointer to the hardware structure
2598 * @vsig: the VSIG to allocate
2600 * This function will allocate a given VSIG specified by the VSIG parameter.
2602 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2604 u16 idx = vsig & ICE_VSIG_IDX_M;
2606 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2607 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2608 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2611 return ICE_VSIG_VALUE(idx, hw->pf_id);
2615 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2616 * @hw: pointer to the hardware structure
2619 * This function will iterate through the VSIG list and mark the first
2620 * unused entry for the new VSIG entry as used and return that value.
2622 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2626 for (i = 1; i < ICE_MAX_VSIGS; i++)
2627 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2628 return ice_vsig_alloc_val(hw, blk, i);
2630 return ICE_DEFAULT_VSIG;
2634 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2635 * @hw: pointer to the hardware structure
2637 * @chs: characteristic list
2638 * @vsig: returns the VSIG with the matching profiles, if found
2640 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2641 * a group have the same characteristic set. To check if there exists a VSIG
2642 * which has the same characteristics as the input characteristics; this
2643 * function will iterate through the XLT2 list and return the VSIG that has a
2644 * matching configuration. In order to make sure that priorities are accounted
2645 * for, the list must match exactly, including the order in which the
2646 * characteristics are listed.
2648 static enum ice_status
2649 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2650 struct LIST_HEAD_TYPE *chs, u16 *vsig)
2652 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2655 for (i = 0; i < xlt2->count; i++)
2656 if (xlt2->vsig_tbl[i].in_use &&
2657 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2658 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2662 return ICE_ERR_DOES_NOT_EXIST;
2666 * ice_vsig_free - free VSI group
2667 * @hw: pointer to the hardware structure
2669 * @vsig: VSIG to remove
2671 * The function will remove all VSIs associated with the input VSIG and move
2672 * them to the DEFAULT_VSIG and mark the VSIG available.
2674 static enum ice_status
2675 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2677 struct ice_vsig_prof *dtmp, *del;
2678 struct ice_vsig_vsi *vsi_cur;
2681 idx = vsig & ICE_VSIG_IDX_M;
2682 if (idx >= ICE_MAX_VSIGS)
2683 return ICE_ERR_PARAM;
2685 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2686 return ICE_ERR_DOES_NOT_EXIST;
2688 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2690 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2691 /* If the VSIG has at least 1 VSI then iterate through the
2692 * list and remove the VSIs before deleting the group.
2695 /* remove all vsis associated with this VSIG XLT2 entry */
2697 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2699 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2700 vsi_cur->changed = 1;
2701 vsi_cur->next_vsi = NULL;
2705 /* NULL terminate head of VSI list */
2706 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2709 /* free characteristic list */
2710 LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
2711 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2712 ice_vsig_prof, list) {
2713 LIST_DEL(&del->list);
2717 /* if VSIG characteristic list was cleared for reset
2718 * re-initialize the list head
2720 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2726 * ice_vsig_remove_vsi - remove VSI from VSIG
2727 * @hw: pointer to the hardware structure
2729 * @vsi: VSI to remove
2730 * @vsig: VSI group to remove from
2732 * The function will remove the input VSI from its VSI group and move it
2733 * to the DEFAULT_VSIG.
2735 static enum ice_status
2736 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2738 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2741 idx = vsig & ICE_VSIG_IDX_M;
2743 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2744 return ICE_ERR_PARAM;
2746 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2747 return ICE_ERR_DOES_NOT_EXIST;
2749 /* entry already in default VSIG, don't have to remove */
2750 if (idx == ICE_DEFAULT_VSIG)
2753 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2757 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2758 vsi_cur = (*vsi_head);
2760 /* iterate the VSI list, skip over the entry to be removed */
2762 if (vsi_tgt == vsi_cur) {
2763 (*vsi_head) = vsi_cur->next_vsi;
2766 vsi_head = &vsi_cur->next_vsi;
2767 vsi_cur = vsi_cur->next_vsi;
2770 /* verify if VSI was removed from group list */
2772 return ICE_ERR_DOES_NOT_EXIST;
2774 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2775 vsi_cur->changed = 1;
2776 vsi_cur->next_vsi = NULL;
2782 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2783 * @hw: pointer to the hardware structure
2786 * @vsig: destination VSI group
2788 * This function will move or add the input VSI to the target VSIG.
2789 * The function will find the original VSIG the VSI belongs to and
2790 * move the entry to the DEFAULT_VSIG, update the original VSIG and
2791 * then move entry to the new VSIG.
2793 static enum ice_status
2794 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2796 struct ice_vsig_vsi *tmp;
2797 enum ice_status status;
2800 idx = vsig & ICE_VSIG_IDX_M;
2802 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2803 return ICE_ERR_PARAM;
2805 /* if VSIG not in use and VSIG is not default type this VSIG
2808 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2809 vsig != ICE_DEFAULT_VSIG)
2810 return ICE_ERR_DOES_NOT_EXIST;
2812 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2816 /* no update required if vsigs match */
2817 if (orig_vsig == vsig)
2820 if (orig_vsig != ICE_DEFAULT_VSIG) {
2821 /* remove entry from orig_vsig and add to default VSIG */
2822 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2827 if (idx == ICE_DEFAULT_VSIG)
2830 /* Create VSI entry and add VSIG and prop_mask values */
2831 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2832 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2834 /* Add new entry to the head of the VSIG list */
2835 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2836 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2837 &hw->blk[blk].xlt2.vsis[vsi];
2838 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2839 hw->blk[blk].xlt2.t[vsi] = vsig;
2845 * ice_prof_has_mask_idx - determine if profile index masking is identical
2846 * @hw: pointer to the hardware structure
2848 * @prof: profile to check
2849 * @idx: profile index to check
2850 * @mask: mask to match
2853 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
2856 bool expect_no_mask = false;
2861 /* If mask is 0x0000 or 0xffff, then there is no masking */
2862 if (mask == 0 || mask == 0xffff)
2863 expect_no_mask = true;
2865 /* Scan the enabled masks on this profile, for the specified idx */
2866 for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
2867 hw->blk[blk].masks.count; i++)
2868 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
2869 if (hw->blk[blk].masks.masks[i].in_use &&
2870 hw->blk[blk].masks.masks[i].idx == idx) {
2872 if (hw->blk[blk].masks.masks[i].mask == mask)
2877 if (expect_no_mask) {
2889 * ice_prof_has_mask - determine if profile masking is identical
2890 * @hw: pointer to the hardware structure
2892 * @prof: profile to check
2893 * @masks: masks to match
2896 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
2900 /* es->mask_ena[prof] will have the mask */
2901 for (i = 0; i < hw->blk[blk].es.fvw; i++)
2902 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
2909 * ice_find_prof_id_with_mask - find profile ID for a given field vector
2910 * @hw: pointer to the hardware structure
2912 * @fv: field vector to search for
2913 * @masks: masks for fv
2914 * @prof_id: receives the profile ID
2916 static enum ice_status
2917 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
2918 struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
2920 struct ice_es *es = &hw->blk[blk].es;
2923 /* For FD and RSS, we don't want to re-use an existed profile with the
2924 * same field vector and mask. This will cause rule interference.
2926 if (blk == ICE_BLK_FD || blk == ICE_BLK_RSS)
2927 return ICE_ERR_DOES_NOT_EXIST;
2929 for (i = 0; i < (u8)es->count; i++) {
2930 u16 off = i * es->fvw;
2932 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2935 /* check if masks settings are the same for this profile */
2936 if (masks && !ice_prof_has_mask(hw, blk, i, masks))
2943 return ICE_ERR_DOES_NOT_EXIST;
2947 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
2948 * @blk: the block type
2949 * @rsrc_type: pointer to variable to receive the resource type
2951 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2955 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
2958 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
2961 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2964 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2967 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
2976 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
2977 * @blk: the block type
2978 * @rsrc_type: pointer to variable to receive the resource type
2980 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2984 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
2987 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
2990 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2993 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2996 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
3005 * ice_alloc_tcam_ent - allocate hardware TCAM entry
3006 * @hw: pointer to the HW struct
3007 * @blk: the block to allocate the TCAM for
3008 * @btm: true to allocate from bottom of table, false to allocate from top
3009 * @tcam_idx: pointer to variable to receive the TCAM entry
3011 * This function allocates a new entry in a Profile ID TCAM for a specific
3014 static enum ice_status
3015 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
3020 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3021 return ICE_ERR_PARAM;
3023 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
3027 * ice_free_tcam_ent - free hardware TCAM entry
3028 * @hw: pointer to the HW struct
3029 * @blk: the block from which to free the TCAM entry
3030 * @tcam_idx: the TCAM entry to free
3032 * This function frees an entry in a Profile ID TCAM for a specific block.
3034 static enum ice_status
3035 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
3039 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3040 return ICE_ERR_PARAM;
3042 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
3046 * ice_alloc_prof_id - allocate profile ID
3047 * @hw: pointer to the HW struct
3048 * @blk: the block to allocate the profile ID for
3049 * @prof_id: pointer to variable to receive the profile ID
3051 * This function allocates a new profile ID, which also corresponds to a Field
3052 * Vector (Extraction Sequence) entry.
3054 static enum ice_status
3055 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
3057 enum ice_status status;
3061 if (!ice_prof_id_rsrc_type(blk, &res_type))
3062 return ICE_ERR_PARAM;
3064 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
3066 *prof_id = (u8)get_prof;
3072 * ice_free_prof_id - free profile ID
3073 * @hw: pointer to the HW struct
3074 * @blk: the block from which to free the profile ID
3075 * @prof_id: the profile ID to free
3077 * This function frees a profile ID, which also corresponds to a Field Vector.
3079 static enum ice_status
3080 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3082 u16 tmp_prof_id = (u16)prof_id;
3085 if (!ice_prof_id_rsrc_type(blk, &res_type))
3086 return ICE_ERR_PARAM;
3088 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
3092 * ice_prof_inc_ref - increment reference count for profile
3093 * @hw: pointer to the HW struct
3094 * @blk: the block from which to free the profile ID
3095 * @prof_id: the profile ID for which to increment the reference count
3097 static enum ice_status
3098 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3100 if (prof_id > hw->blk[blk].es.count)
3101 return ICE_ERR_PARAM;
3103 hw->blk[blk].es.ref_count[prof_id]++;
3109 * ice_write_prof_mask_reg - write profile mask register
3110 * @hw: pointer to the HW struct
3111 * @blk: hardware block
3112 * @mask_idx: mask index
3113 * @idx: index of the FV which will use the mask
3114 * @mask: the 16-bit mask
3117 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
3125 offset = GLQF_HMASK(mask_idx);
3126 val = (idx << GLQF_HMASK_MSK_INDEX_S) &
3127 GLQF_HMASK_MSK_INDEX_M;
3128 val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
3131 offset = GLQF_FDMASK(mask_idx);
3132 val = (idx << GLQF_FDMASK_MSK_INDEX_S) &
3133 GLQF_FDMASK_MSK_INDEX_M;
3134 val |= (mask << GLQF_FDMASK_MASK_S) &
3138 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
3143 wr32(hw, offset, val);
3144 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
3145 blk, idx, offset, val);
3149 * ice_write_prof_mask_enable_res - write profile mask enable register
3150 * @hw: pointer to the HW struct
3151 * @blk: hardware block
3152 * @prof_id: profile ID
3153 * @enable_mask: enable mask
3156 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
3157 u16 prof_id, u32 enable_mask)
3163 offset = GLQF_HMASK_SEL(prof_id);
3166 offset = GLQF_FDMASK_SEL(prof_id);
3169 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
3174 wr32(hw, offset, enable_mask);
3175 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
3176 blk, prof_id, offset, enable_mask);
3180 * ice_init_prof_masks - initial prof masks
3181 * @hw: pointer to the HW struct
3182 * @blk: hardware block
3184 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
3189 ice_init_lock(&hw->blk[blk].masks.lock);
3191 per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
3193 hw->blk[blk].masks.count = per_pf;
3194 hw->blk[blk].masks.first = hw->pf_id * per_pf;
3196 ice_memset(hw->blk[blk].masks.masks, 0,
3197 sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM);
3199 for (i = hw->blk[blk].masks.first;
3200 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3201 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3205 * ice_init_all_prof_masks - initial all prof masks
3206 * @hw: pointer to the HW struct
3208 void ice_init_all_prof_masks(struct ice_hw *hw)
3210 ice_init_prof_masks(hw, ICE_BLK_RSS);
3211 ice_init_prof_masks(hw, ICE_BLK_FD);
3215 * ice_alloc_prof_mask - allocate profile mask
3216 * @hw: pointer to the HW struct
3217 * @blk: hardware block
3218 * @idx: index of FV which will use the mask
3219 * @mask: the 16-bit mask
3220 * @mask_idx: variable to receive the mask index
3222 static enum ice_status
3223 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
3226 bool found_unused = false, found_copy = false;
3227 enum ice_status status = ICE_ERR_MAX_LIMIT;
3228 u16 unused_idx = 0, copy_idx = 0;
3231 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3232 return ICE_ERR_PARAM;
3234 ice_acquire_lock(&hw->blk[blk].masks.lock);
3236 for (i = hw->blk[blk].masks.first;
3237 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3238 if (hw->blk[blk].masks.masks[i].in_use) {
3239 /* if mask is in use and it exactly duplicates the
3240 * desired mask and index, then in can be reused
3242 if (hw->blk[blk].masks.masks[i].mask == mask &&
3243 hw->blk[blk].masks.masks[i].idx == idx) {
3249 /* save off unused index, but keep searching in case
3250 * there is an exact match later on
3252 if (!found_unused) {
3253 found_unused = true;
3260 else if (found_unused)
3263 goto err_ice_alloc_prof_mask;
3265 /* update mask for a new entry */
3267 hw->blk[blk].masks.masks[i].in_use = true;
3268 hw->blk[blk].masks.masks[i].mask = mask;
3269 hw->blk[blk].masks.masks[i].idx = idx;
3270 hw->blk[blk].masks.masks[i].ref = 0;
3271 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
3274 hw->blk[blk].masks.masks[i].ref++;
3276 status = ICE_SUCCESS;
3278 err_ice_alloc_prof_mask:
3279 ice_release_lock(&hw->blk[blk].masks.lock);
3285 * ice_free_prof_mask - free profile mask
3286 * @hw: pointer to the HW struct
3287 * @blk: hardware block
3288 * @mask_idx: index of mask
3290 static enum ice_status
3291 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
3293 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3294 return ICE_ERR_PARAM;
3296 if (!(mask_idx >= hw->blk[blk].masks.first &&
3297 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
3298 return ICE_ERR_DOES_NOT_EXIST;
3300 ice_acquire_lock(&hw->blk[blk].masks.lock);
3302 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
3303 goto exit_ice_free_prof_mask;
3305 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
3306 hw->blk[blk].masks.masks[mask_idx].ref--;
3307 goto exit_ice_free_prof_mask;
3311 hw->blk[blk].masks.masks[mask_idx].in_use = false;
3312 hw->blk[blk].masks.masks[mask_idx].mask = 0;
3313 hw->blk[blk].masks.masks[mask_idx].idx = 0;
3315 /* update mask as unused entry */
3316 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
3318 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
3320 exit_ice_free_prof_mask:
3321 ice_release_lock(&hw->blk[blk].masks.lock);
3327 * ice_free_prof_masks - free all profile masks for a profile
3328 * @hw: pointer to the HW struct
3329 * @blk: hardware block
3330 * @prof_id: profile ID
3332 static enum ice_status
3333 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
3338 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3339 return ICE_ERR_PARAM;
3341 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
3342 for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
3343 if (mask_bm & BIT(i))
3344 ice_free_prof_mask(hw, blk, i);
3350 * ice_shutdown_prof_masks - releases lock for masking
3351 * @hw: pointer to the HW struct
3352 * @blk: hardware block
3354 * This should be called before unloading the driver
3356 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
3360 ice_acquire_lock(&hw->blk[blk].masks.lock);
3362 for (i = hw->blk[blk].masks.first;
3363 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
3364 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3366 hw->blk[blk].masks.masks[i].in_use = false;
3367 hw->blk[blk].masks.masks[i].idx = 0;
3368 hw->blk[blk].masks.masks[i].mask = 0;
3371 ice_release_lock(&hw->blk[blk].masks.lock);
3372 ice_destroy_lock(&hw->blk[blk].masks.lock);
3376 * ice_shutdown_all_prof_masks - releases all locks for masking
3377 * @hw: pointer to the HW struct
3379 * This should be called before unloading the driver
3381 void ice_shutdown_all_prof_masks(struct ice_hw *hw)
3383 ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
3384 ice_shutdown_prof_masks(hw, ICE_BLK_FD);
3388 * ice_update_prof_masking - set registers according to masking
3389 * @hw: pointer to the HW struct
3390 * @blk: hardware block
3391 * @prof_id: profile ID
3394 static enum ice_status
3395 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
3403 /* Only support FD and RSS masking, otherwise nothing to be done */
3404 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3407 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3408 if (masks[i] && masks[i] != 0xFFFF) {
3409 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
3410 ena_mask |= BIT(idx);
3412 /* not enough bitmaps */
3419 /* free any bitmaps we have allocated */
3420 for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
3421 if (ena_mask & BIT(i))
3422 ice_free_prof_mask(hw, blk, i);
3424 return ICE_ERR_OUT_OF_RANGE;
3427 /* enable the masks for this profile */
3428 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
3430 /* store enabled masks with profile so that they can be freed later */
3431 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
3437 * ice_write_es - write an extraction sequence to hardware
3438 * @hw: pointer to the HW struct
3439 * @blk: the block in which to write the extraction sequence
3440 * @prof_id: the profile ID to write
3441 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
3444 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3445 struct ice_fv_word *fv)
3449 off = prof_id * hw->blk[blk].es.fvw;
3451 ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
3452 sizeof(*fv), ICE_NONDMA_MEM);
3453 hw->blk[blk].es.written[prof_id] = false;
3455 ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
3456 sizeof(*fv), ICE_NONDMA_TO_NONDMA);
3461 * ice_prof_dec_ref - decrement reference count for profile
3462 * @hw: pointer to the HW struct
3463 * @blk: the block from which to free the profile ID
3464 * @prof_id: the profile ID for which to decrement the reference count
3466 static enum ice_status
3467 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3469 if (prof_id > hw->blk[blk].es.count)
3470 return ICE_ERR_PARAM;
3472 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3473 if (!--hw->blk[blk].es.ref_count[prof_id]) {
3474 ice_write_es(hw, blk, prof_id, NULL);
3475 ice_free_prof_masks(hw, blk, prof_id);
3476 return ice_free_prof_id(hw, blk, prof_id);
3483 /* Block / table section IDs */
3484 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3488 ICE_SID_PROFID_TCAM_SW,
3489 ICE_SID_PROFID_REDIR_SW,
3496 ICE_SID_PROFID_TCAM_ACL,
3497 ICE_SID_PROFID_REDIR_ACL,
3504 ICE_SID_PROFID_TCAM_FD,
3505 ICE_SID_PROFID_REDIR_FD,
3512 ICE_SID_PROFID_TCAM_RSS,
3513 ICE_SID_PROFID_REDIR_RSS,
3520 ICE_SID_PROFID_TCAM_PE,
3521 ICE_SID_PROFID_REDIR_PE,
3527 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3528 * @hw: pointer to the hardware structure
3529 * @blk: the HW block to initialize
3531 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3535 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3538 ptg = hw->blk[blk].xlt1.t[pt];
3539 if (ptg != ICE_DEFAULT_PTG) {
3540 ice_ptg_alloc_val(hw, blk, ptg);
3541 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3547 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3548 * @hw: pointer to the hardware structure
3549 * @blk: the HW block to initialize
3551 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3555 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3558 vsig = hw->blk[blk].xlt2.t[vsi];
3560 ice_vsig_alloc_val(hw, blk, vsig);
3561 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3562 /* no changes at this time, since this has been
3563 * initialized from the original package
3565 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3571 * ice_init_sw_db - init software database from HW tables
3572 * @hw: pointer to the hardware structure
3574 static void ice_init_sw_db(struct ice_hw *hw)
3578 for (i = 0; i < ICE_BLK_COUNT; i++) {
3579 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3580 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3585 * ice_fill_tbl - Reads content of a single table type into database
3586 * @hw: pointer to the hardware structure
3587 * @block_id: Block ID of the table to copy
3588 * @sid: Section ID of the table to copy
3590 * Will attempt to read the entire content of a given table of a single block
3591 * into the driver database. We assume that the buffer will always
3592 * be as large or larger than the data contained in the package. If
3593 * this condition is not met, there is most likely an error in the package
3596 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3598 u32 dst_len, sect_len, offset = 0;
3599 struct ice_prof_redir_section *pr;
3600 struct ice_prof_id_section *pid;
3601 struct ice_xlt1_section *xlt1;
3602 struct ice_xlt2_section *xlt2;
3603 struct ice_sw_fv_section *es;
3604 struct ice_pkg_enum state;
3608 /* if the HW segment pointer is null then the first iteration of
3609 * ice_pkg_enum_section() will fail. In this case the HW tables will
3610 * not be filled and return success.
3613 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3617 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
3619 sect = ice_pkg_enum_section(hw->seg, &state, sid);
3623 case ICE_SID_XLT1_SW:
3624 case ICE_SID_XLT1_FD:
3625 case ICE_SID_XLT1_RSS:
3626 case ICE_SID_XLT1_ACL:
3627 case ICE_SID_XLT1_PE:
3628 xlt1 = (struct ice_xlt1_section *)sect;
3630 sect_len = LE16_TO_CPU(xlt1->count) *
3631 sizeof(*hw->blk[block_id].xlt1.t);
3632 dst = hw->blk[block_id].xlt1.t;
3633 dst_len = hw->blk[block_id].xlt1.count *
3634 sizeof(*hw->blk[block_id].xlt1.t);
3636 case ICE_SID_XLT2_SW:
3637 case ICE_SID_XLT2_FD:
3638 case ICE_SID_XLT2_RSS:
3639 case ICE_SID_XLT2_ACL:
3640 case ICE_SID_XLT2_PE:
3641 xlt2 = (struct ice_xlt2_section *)sect;
3642 src = (_FORCE_ u8 *)xlt2->value;
3643 sect_len = LE16_TO_CPU(xlt2->count) *
3644 sizeof(*hw->blk[block_id].xlt2.t);
3645 dst = (u8 *)hw->blk[block_id].xlt2.t;
3646 dst_len = hw->blk[block_id].xlt2.count *
3647 sizeof(*hw->blk[block_id].xlt2.t);
3649 case ICE_SID_PROFID_TCAM_SW:
3650 case ICE_SID_PROFID_TCAM_FD:
3651 case ICE_SID_PROFID_TCAM_RSS:
3652 case ICE_SID_PROFID_TCAM_ACL:
3653 case ICE_SID_PROFID_TCAM_PE:
3654 pid = (struct ice_prof_id_section *)sect;
3655 src = (u8 *)pid->entry;
3656 sect_len = LE16_TO_CPU(pid->count) *
3657 sizeof(*hw->blk[block_id].prof.t);
3658 dst = (u8 *)hw->blk[block_id].prof.t;
3659 dst_len = hw->blk[block_id].prof.count *
3660 sizeof(*hw->blk[block_id].prof.t);
3662 case ICE_SID_PROFID_REDIR_SW:
3663 case ICE_SID_PROFID_REDIR_FD:
3664 case ICE_SID_PROFID_REDIR_RSS:
3665 case ICE_SID_PROFID_REDIR_ACL:
3666 case ICE_SID_PROFID_REDIR_PE:
3667 pr = (struct ice_prof_redir_section *)sect;
3668 src = pr->redir_value;
3669 sect_len = LE16_TO_CPU(pr->count) *
3670 sizeof(*hw->blk[block_id].prof_redir.t);
3671 dst = hw->blk[block_id].prof_redir.t;
3672 dst_len = hw->blk[block_id].prof_redir.count *
3673 sizeof(*hw->blk[block_id].prof_redir.t);
3675 case ICE_SID_FLD_VEC_SW:
3676 case ICE_SID_FLD_VEC_FD:
3677 case ICE_SID_FLD_VEC_RSS:
3678 case ICE_SID_FLD_VEC_ACL:
3679 case ICE_SID_FLD_VEC_PE:
3680 es = (struct ice_sw_fv_section *)sect;
3682 sect_len = (u32)(LE16_TO_CPU(es->count) *
3683 hw->blk[block_id].es.fvw) *
3684 sizeof(*hw->blk[block_id].es.t);
3685 dst = (u8 *)hw->blk[block_id].es.t;
3686 dst_len = (u32)(hw->blk[block_id].es.count *
3687 hw->blk[block_id].es.fvw) *
3688 sizeof(*hw->blk[block_id].es.t);
3694 /* if the section offset exceeds destination length, terminate
3697 if (offset > dst_len)
3700 /* if the sum of section size and offset exceed destination size
3701 * then we are out of bounds of the HW table size for that PF.
3702 * Changing section length to fill the remaining table space
3705 if ((offset + sect_len) > dst_len)
3706 sect_len = dst_len - offset;
3708 ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
3710 sect = ice_pkg_enum_section(NULL, &state, sid);
3715 * ice_fill_blk_tbls - Read package context for tables
3716 * @hw: pointer to the hardware structure
3718 * Reads the current package contents and populates the driver
3719 * database with the data iteratively for all advanced feature
3720 * blocks. Assume that the HW tables have been allocated.
3722 void ice_fill_blk_tbls(struct ice_hw *hw)
3726 for (i = 0; i < ICE_BLK_COUNT; i++) {
3727 enum ice_block blk_id = (enum ice_block)i;
3729 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3730 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3731 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3732 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3733 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3740 * ice_free_prof_map - free profile map
3741 * @hw: pointer to the hardware structure
3742 * @blk_idx: HW block index
3744 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3746 struct ice_es *es = &hw->blk[blk_idx].es;
3747 struct ice_prof_map *del, *tmp;
3749 ice_acquire_lock(&es->prof_map_lock);
3750 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
3751 ice_prof_map, list) {
3752 LIST_DEL(&del->list);
3755 INIT_LIST_HEAD(&es->prof_map);
3756 ice_release_lock(&es->prof_map_lock);
3760 * ice_free_flow_profs - free flow profile entries
3761 * @hw: pointer to the hardware structure
3762 * @blk_idx: HW block index
3764 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3766 struct ice_flow_prof *p, *tmp;
3768 ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
3769 LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
3770 ice_flow_prof, l_entry) {
3771 struct ice_flow_entry *e, *t;
3773 LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
3774 ice_flow_entry, l_entry)
3775 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
3776 ICE_FLOW_ENTRY_HNDL(e));
3778 LIST_DEL(&p->l_entry);
3780 ice_free(hw, p->acts);
3782 ice_destroy_lock(&p->entries_lock);
3785 ice_release_lock(&hw->fl_profs_locks[blk_idx]);
3787 /* if driver is in reset and tables are being cleared
3788 * re-initialize the flow profile list heads
3790 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3794 * ice_free_vsig_tbl - free complete VSIG table entries
3795 * @hw: pointer to the hardware structure
3796 * @blk: the HW block on which to free the VSIG table entries
3798 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3802 if (!hw->blk[blk].xlt2.vsig_tbl)
3805 for (i = 1; i < ICE_MAX_VSIGS; i++)
3806 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3807 ice_vsig_free(hw, blk, i);
3811 * ice_free_hw_tbls - free hardware table memory
3812 * @hw: pointer to the hardware structure
3814 void ice_free_hw_tbls(struct ice_hw *hw)
3816 struct ice_rss_cfg *r, *rt;
3819 for (i = 0; i < ICE_BLK_COUNT; i++) {
3820 if (hw->blk[i].is_list_init) {
3821 struct ice_es *es = &hw->blk[i].es;
3823 ice_free_prof_map(hw, i);
3824 ice_destroy_lock(&es->prof_map_lock);
3825 ice_free_flow_profs(hw, i);
3826 ice_destroy_lock(&hw->fl_profs_locks[i]);
3828 hw->blk[i].is_list_init = false;
3830 ice_free_vsig_tbl(hw, (enum ice_block)i);
3831 ice_free(hw, hw->blk[i].xlt1.ptypes);
3832 ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
3833 ice_free(hw, hw->blk[i].xlt1.t);
3834 ice_free(hw, hw->blk[i].xlt2.t);
3835 ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
3836 ice_free(hw, hw->blk[i].xlt2.vsis);
3837 ice_free(hw, hw->blk[i].prof.t);
3838 ice_free(hw, hw->blk[i].prof_redir.t);
3839 ice_free(hw, hw->blk[i].es.t);
3840 ice_free(hw, hw->blk[i].es.ref_count);
3841 ice_free(hw, hw->blk[i].es.written);
3842 ice_free(hw, hw->blk[i].es.mask_ena);
3845 LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
3846 ice_rss_cfg, l_entry) {
3847 LIST_DEL(&r->l_entry);
3850 ice_destroy_lock(&hw->rss_locks);
3851 if (!hw->dcf_enabled)
3852 ice_shutdown_all_prof_masks(hw);
3853 ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
3857 * ice_init_flow_profs - init flow profile locks and list heads
3858 * @hw: pointer to the hardware structure
3859 * @blk_idx: HW block index
3861 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3863 ice_init_lock(&hw->fl_profs_locks[blk_idx]);
3864 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3868 * ice_clear_hw_tbls - clear HW tables and flow profiles
3869 * @hw: pointer to the hardware structure
3871 void ice_clear_hw_tbls(struct ice_hw *hw)
3875 for (i = 0; i < ICE_BLK_COUNT; i++) {
3876 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3877 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3878 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3879 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3880 struct ice_es *es = &hw->blk[i].es;
3882 if (hw->blk[i].is_list_init) {
3883 ice_free_prof_map(hw, i);
3884 ice_free_flow_profs(hw, i);
3887 ice_free_vsig_tbl(hw, (enum ice_block)i);
3889 ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
3891 ice_memset(xlt1->ptg_tbl, 0,
3892 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
3894 ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
3897 ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
3899 ice_memset(xlt2->vsig_tbl, 0,
3900 xlt2->count * sizeof(*xlt2->vsig_tbl),
3902 ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
3905 ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
3907 ice_memset(prof_redir->t, 0,
3908 prof_redir->count * sizeof(*prof_redir->t),
3911 ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
3913 ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
3915 ice_memset(es->written, 0, es->count * sizeof(*es->written),
3917 ice_memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena),
3923 * ice_init_hw_tbls - init hardware table memory
3924 * @hw: pointer to the hardware structure
3926 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3930 ice_init_lock(&hw->rss_locks);
3931 INIT_LIST_HEAD(&hw->rss_list_head);
3932 if (!hw->dcf_enabled)
3933 ice_init_all_prof_masks(hw);
3934 for (i = 0; i < ICE_BLK_COUNT; i++) {
3935 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3936 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3937 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3938 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3939 struct ice_es *es = &hw->blk[i].es;
3942 if (hw->blk[i].is_list_init)
3945 ice_init_flow_profs(hw, i);
3946 ice_init_lock(&es->prof_map_lock);
3947 INIT_LIST_HEAD(&es->prof_map);
3948 hw->blk[i].is_list_init = true;
3950 hw->blk[i].overwrite = blk_sizes[i].overwrite;
3951 es->reverse = blk_sizes[i].reverse;
3953 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3954 xlt1->count = blk_sizes[i].xlt1;
3956 xlt1->ptypes = (struct ice_ptg_ptype *)
3957 ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
3962 xlt1->ptg_tbl = (struct ice_ptg_entry *)
3963 ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
3968 xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
3972 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3973 xlt2->count = blk_sizes[i].xlt2;
3975 xlt2->vsis = (struct ice_vsig_vsi *)
3976 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
3981 xlt2->vsig_tbl = (struct ice_vsig_entry *)
3982 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
3983 if (!xlt2->vsig_tbl)
3986 for (j = 0; j < xlt2->count; j++)
3987 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3989 xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
3993 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3994 prof->count = blk_sizes[i].prof_tcam;
3995 prof->max_prof_id = blk_sizes[i].prof_id;
3996 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3997 prof->t = (struct ice_prof_tcam_entry *)
3998 ice_calloc(hw, prof->count, sizeof(*prof->t));
4003 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
4004 prof_redir->count = blk_sizes[i].prof_redir;
4005 prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
4006 sizeof(*prof_redir->t));
4011 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
4012 es->count = blk_sizes[i].es;
4013 es->fvw = blk_sizes[i].fvw;
4014 es->t = (struct ice_fv_word *)
4015 ice_calloc(hw, (u32)(es->count * es->fvw),
4020 es->ref_count = (u16 *)
4021 ice_calloc(hw, es->count, sizeof(*es->ref_count));
4026 es->written = (u8 *)
4027 ice_calloc(hw, es->count, sizeof(*es->written));
4032 es->mask_ena = (u32 *)
4033 ice_calloc(hw, es->count, sizeof(*es->mask_ena));
4041 ice_free_hw_tbls(hw);
4042 return ICE_ERR_NO_MEMORY;
4046 * ice_prof_gen_key - generate profile ID key
4047 * @hw: pointer to the HW struct
4048 * @blk: the block in which to write profile ID to
4049 * @ptg: packet type group (PTG) portion of key
4050 * @vsig: VSIG portion of key
4051 * @cdid: CDID portion of key
4052 * @flags: flag portion of key
4053 * @vl_msk: valid mask
4054 * @dc_msk: don't care mask
4055 * @nm_msk: never match mask
4056 * @key: output of profile ID key
4058 static enum ice_status
4059 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
4060 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4061 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
4062 u8 key[ICE_TCAM_KEY_SZ])
4064 struct ice_prof_id_key inkey;
4067 inkey.xlt2_cdid = CPU_TO_LE16(vsig);
4068 inkey.flags = CPU_TO_LE16(flags);
4070 switch (hw->blk[blk].prof.cdid_bits) {
4074 #define ICE_CD_2_M 0xC000U
4075 #define ICE_CD_2_S 14
4076 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
4077 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
4080 #define ICE_CD_4_M 0xF000U
4081 #define ICE_CD_4_S 12
4082 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
4083 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
4086 #define ICE_CD_8_M 0xFF00U
4087 #define ICE_CD_8_S 16
4088 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
4089 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
4092 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
4096 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
4097 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
4101 * ice_tcam_write_entry - write TCAM entry
4102 * @hw: pointer to the HW struct
4103 * @blk: the block in which to write profile ID to
4104 * @idx: the entry index to write to
4105 * @prof_id: profile ID
4106 * @ptg: packet type group (PTG) portion of key
4107 * @vsig: VSIG portion of key
4108 * @cdid: CDID portion of key
4109 * @flags: flag portion of key
4110 * @vl_msk: valid mask
4111 * @dc_msk: don't care mask
4112 * @nm_msk: never match mask
4114 static enum ice_status
4115 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
4116 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
4117 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4118 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
4119 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
4121 struct ice_prof_tcam_entry;
4122 enum ice_status status;
4124 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
4125 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
4127 hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
4128 hw->blk[blk].prof.t[idx].prof_id = prof_id;
4135 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
4136 * @hw: pointer to the hardware structure
4138 * @vsig: VSIG to query
4139 * @refs: pointer to variable to receive the reference count
4141 static enum ice_status
4142 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
4144 u16 idx = vsig & ICE_VSIG_IDX_M;
4145 struct ice_vsig_vsi *ptr;
4149 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
4150 return ICE_ERR_DOES_NOT_EXIST;
4152 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4155 ptr = ptr->next_vsi;
4162 * ice_has_prof_vsig - check to see if VSIG has a specific profile
4163 * @hw: pointer to the hardware structure
4165 * @vsig: VSIG to check against
4166 * @hdl: profile handle
4169 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
4171 u16 idx = vsig & ICE_VSIG_IDX_M;
4172 struct ice_vsig_prof *ent;
4174 LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4175 ice_vsig_prof, list)
4176 if (ent->profile_cookie == hdl)
4179 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
4185 * ice_prof_bld_es - build profile ID extraction sequence changes
4186 * @hw: pointer to the HW struct
4187 * @blk: hardware block
4188 * @bld: the update package buffer build to add to
4189 * @chgs: the list of changes to make in hardware
4191 static enum ice_status
4192 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
4193 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4195 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
4196 struct ice_chs_chg *tmp;
4198 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4199 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
4200 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
4201 struct ice_pkg_es *p;
4204 id = ice_sect_id(blk, ICE_VEC_TBL);
4205 p = (struct ice_pkg_es *)
4206 ice_pkg_buf_alloc_section(bld, id,
4207 ice_struct_size(p, es,
4213 return ICE_ERR_MAX_LIMIT;
4215 p->count = CPU_TO_LE16(1);
4216 p->offset = CPU_TO_LE16(tmp->prof_id);
4218 ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
4219 ICE_NONDMA_TO_NONDMA);
4226 * ice_prof_bld_tcam - build profile ID TCAM changes
4227 * @hw: pointer to the HW struct
4228 * @blk: hardware block
4229 * @bld: the update package buffer build to add to
4230 * @chgs: the list of changes to make in hardware
4232 static enum ice_status
4233 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
4234 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4236 struct ice_chs_chg *tmp;
4238 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4239 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
4240 struct ice_prof_id_section *p;
4243 id = ice_sect_id(blk, ICE_PROF_TCAM);
4244 p = (struct ice_prof_id_section *)
4245 ice_pkg_buf_alloc_section(bld, id,
4251 return ICE_ERR_MAX_LIMIT;
4253 p->count = CPU_TO_LE16(1);
4254 p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
4255 p->entry[0].prof_id = tmp->prof_id;
4257 ice_memcpy(p->entry[0].key,
4258 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
4259 sizeof(hw->blk[blk].prof.t->key),
4260 ICE_NONDMA_TO_NONDMA);
4267 * ice_prof_bld_xlt1 - build XLT1 changes
4268 * @blk: hardware block
4269 * @bld: the update package buffer build to add to
4270 * @chgs: the list of changes to make in hardware
4272 static enum ice_status
4273 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
4274 struct LIST_HEAD_TYPE *chgs)
4276 struct ice_chs_chg *tmp;
4278 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4279 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
4280 struct ice_xlt1_section *p;
4283 id = ice_sect_id(blk, ICE_XLT1);
4284 p = (struct ice_xlt1_section *)
4285 ice_pkg_buf_alloc_section(bld, id,
4291 return ICE_ERR_MAX_LIMIT;
4293 p->count = CPU_TO_LE16(1);
4294 p->offset = CPU_TO_LE16(tmp->ptype);
4295 p->value[0] = tmp->ptg;
4302 * ice_prof_bld_xlt2 - build XLT2 changes
4303 * @blk: hardware block
4304 * @bld: the update package buffer build to add to
4305 * @chgs: the list of changes to make in hardware
4307 static enum ice_status
4308 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4309 struct LIST_HEAD_TYPE *chgs)
4311 struct ice_chs_chg *tmp;
4313 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4314 struct ice_xlt2_section *p;
4317 switch (tmp->type) {
4321 id = ice_sect_id(blk, ICE_XLT2);
4322 p = (struct ice_xlt2_section *)
4323 ice_pkg_buf_alloc_section(bld, id,
4329 return ICE_ERR_MAX_LIMIT;
4331 p->count = CPU_TO_LE16(1);
4332 p->offset = CPU_TO_LE16(tmp->vsi);
4333 p->value[0] = CPU_TO_LE16(tmp->vsig);
4344 * ice_upd_prof_hw - update hardware using the change list
4345 * @hw: pointer to the HW struct
4346 * @blk: hardware block
4347 * @chgs: the list of changes to make in hardware
4349 static enum ice_status
4350 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4351 struct LIST_HEAD_TYPE *chgs)
4353 struct ice_buf_build *b;
4354 struct ice_chs_chg *tmp;
4355 enum ice_status status;
4363 /* count number of sections we need */
4364 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4365 switch (tmp->type) {
4366 case ICE_PTG_ES_ADD:
4384 sects = xlt1 + xlt2 + tcam + es;
4389 /* Build update package buffer */
4390 b = ice_pkg_buf_alloc(hw);
4392 return ICE_ERR_NO_MEMORY;
4394 status = ice_pkg_buf_reserve_section(b, sects);
4398 /* Preserve order of table update: ES, TCAM, PTG, VSIG */
4400 status = ice_prof_bld_es(hw, blk, b, chgs);
4406 status = ice_prof_bld_tcam(hw, blk, b, chgs);
4412 status = ice_prof_bld_xlt1(blk, b, chgs);
4418 status = ice_prof_bld_xlt2(blk, b, chgs);
4423 /* After package buffer build check if the section count in buffer is
4424 * non-zero and matches the number of sections detected for package
4427 pkg_sects = ice_pkg_buf_get_active_sections(b);
4428 if (!pkg_sects || pkg_sects != sects) {
4429 status = ICE_ERR_INVAL_SIZE;
4433 /* update package */
4434 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4435 if (status == ICE_ERR_AQ_ERROR)
4436 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
4439 ice_pkg_buf_free(hw, b);
4444 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
4445 * @hw: pointer to the HW struct
4446 * @prof_id: profile ID
4447 * @mask_sel: mask select
4449 * This function enable any of the masks selected by the mask select parameter
4450 * for the profile specified.
4452 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
4454 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
4456 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
4457 GLQF_FDMASK_SEL(prof_id), mask_sel);
4460 struct ice_fd_src_dst_pair {
4466 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
4467 /* These are defined in pairs */
4468 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
4469 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
4471 { ICE_PROT_IPV4_IL, 2, 12 },
4472 { ICE_PROT_IPV4_IL, 2, 16 },
4474 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
4475 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
4477 { ICE_PROT_IPV6_IL, 8, 8 },
4478 { ICE_PROT_IPV6_IL, 8, 24 },
4480 { ICE_PROT_TCP_IL, 1, 0 },
4481 { ICE_PROT_TCP_IL, 1, 2 },
4483 { ICE_PROT_UDP_OF, 1, 0 },
4484 { ICE_PROT_UDP_OF, 1, 2 },
4486 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
4487 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
4489 { ICE_PROT_SCTP_IL, 1, 0 },
4490 { ICE_PROT_SCTP_IL, 1, 2 }
4493 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
4496 * ice_update_fd_swap - set register appropriately for a FD FV extraction
4497 * @hw: pointer to the HW struct
4498 * @prof_id: profile ID
4499 * @es: extraction sequence (length of array is determined by the block)
4501 static enum ice_status
4502 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
4504 ice_declare_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4505 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
4506 #define ICE_FD_FV_NOT_FOUND (-2)
4507 s8 first_free = ICE_FD_FV_NOT_FOUND;
4508 u8 used[ICE_MAX_FV_WORDS] = { 0 };
4513 ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4515 /* This code assumes that the Flow Director field vectors are assigned
4516 * from the end of the FV indexes working towards the zero index, that
4517 * only complete fields will be included and will be consecutive, and
4518 * that there are no gaps between valid indexes.
4521 /* Determine swap fields present */
4522 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
4523 /* Find the first free entry, assuming right to left population.
4524 * This is where we can start adding additional pairs if needed.
4526 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4530 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4531 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4532 es[i].off == ice_fd_pairs[j].off) {
4533 ice_set_bit(j, pair_list);
4538 orig_free = first_free;
4540 /* determine missing swap fields that need to be added */
4541 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4542 u8 bit1 = ice_is_bit_set(pair_list, i + 1);
4543 u8 bit0 = ice_is_bit_set(pair_list, i);
4548 /* add the appropriate 'paired' entry */
4554 /* check for room */
4555 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4556 return ICE_ERR_MAX_LIMIT;
4558 /* place in extraction sequence */
4559 for (k = 0; k < ice_fd_pairs[index].count; k++) {
4560 es[first_free - k].prot_id =
4561 ice_fd_pairs[index].prot_id;
4562 es[first_free - k].off =
4563 ice_fd_pairs[index].off + (k * 2);
4566 return ICE_ERR_OUT_OF_RANGE;
4568 /* keep track of non-relevant fields */
4569 mask_sel |= BIT(first_free - k);
4572 pair_start[index] = first_free;
4573 first_free -= ice_fd_pairs[index].count;
4577 /* fill in the swap array */
4578 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4580 u8 indexes_used = 1;
4582 /* assume flat at this index */
4583 #define ICE_SWAP_VALID 0x80
4584 used[si] = si | ICE_SWAP_VALID;
4586 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4591 /* check for a swap location */
4592 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4593 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4594 es[si].off == ice_fd_pairs[j].off) {
4597 /* determine the appropriate matching field */
4598 idx = j + ((j % 2) ? -1 : 1);
4600 indexes_used = ice_fd_pairs[idx].count;
4601 for (k = 0; k < indexes_used; k++) {
4602 used[si - k] = (pair_start[idx] - k) |
4612 /* for each set of 4 swap and 4 inset indexes, write the appropriate
4615 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4619 for (k = 0; k < 4; k++) {
4623 if (used[idx] && !(mask_sel & BIT(idx))) {
4624 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4625 #define ICE_INSET_DFLT 0x9f
4626 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4630 /* write the appropriate swap register set */
4631 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4633 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4634 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4636 /* write the appropriate inset register set */
4637 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4639 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4640 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4643 /* initially clear the mask select for this profile */
4644 ice_update_fd_mask(hw, prof_id, 0);
4649 /* The entries here needs to match the order of enum ice_ptype_attrib */
4650 static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4651 { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
4652 { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
4653 { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
4654 { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
4658 * ice_get_ptype_attrib_info - get ptype attribute information
4659 * @type: attribute type
4660 * @info: pointer to variable to the attribute information
4663 ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4664 struct ice_ptype_attrib_info *info)
4666 *info = ice_ptype_attributes[type];
4670 * ice_add_prof_attrib - add any PTG with attributes to profile
4671 * @prof: pointer to the profile to which PTG entries will be added
4672 * @ptg: PTG to be added
4673 * @ptype: PTYPE that needs to be looked up
4674 * @attr: array of attributes that will be considered
4675 * @attr_cnt: number of elements in the attribute array
4677 static enum ice_status
4678 ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4679 const struct ice_ptype_attributes *attr, u16 attr_cnt)
4684 for (i = 0; i < attr_cnt; i++) {
4685 if (attr[i].ptype == ptype) {
4688 prof->ptg[prof->ptg_cnt] = ptg;
4689 ice_get_ptype_attrib_info(attr[i].attrib,
4690 &prof->attr[prof->ptg_cnt]);
4692 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4693 return ICE_ERR_MAX_LIMIT;
4698 return ICE_ERR_DOES_NOT_EXIST;
4704 * ice_add_prof - add profile
4705 * @hw: pointer to the HW struct
4706 * @blk: hardware block
4707 * @id: profile tracking ID
4708 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4709 * @attr: array of attributes
4710 * @attr_cnt: number of elements in attrib array
4711 * @es: extraction sequence (length of array is determined by the block)
4712 * @masks: mask for extraction sequence
4714 * This function registers a profile, which matches a set of PTYPES with a
4715 * particular extraction sequence. While the hardware profile is allocated
4716 * it will not be written until the first call to ice_add_flow that specifies
4717 * the ID value used here.
4720 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4721 const struct ice_ptype_attributes *attr, u16 attr_cnt,
4722 struct ice_fv_word *es, u16 *masks)
4724 u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4725 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
4726 struct ice_prof_map *prof;
4727 enum ice_status status;
4731 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
4733 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4735 /* search for existing profile */
4736 status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4738 /* allocate profile ID */
4739 status = ice_alloc_prof_id(hw, blk, &prof_id);
4741 goto err_ice_add_prof;
4742 if (blk == ICE_BLK_FD) {
4743 /* For Flow Director block, the extraction sequence may
4744 * need to be altered in the case where there are paired
4745 * fields that have no match. This is necessary because
4746 * for Flow Director, src and dest fields need to paired
4747 * for filter programming and these values are swapped
4750 status = ice_update_fd_swap(hw, prof_id, es);
4752 goto err_ice_add_prof;
4754 status = ice_update_prof_masking(hw, blk, prof_id, masks);
4756 goto err_ice_add_prof;
4758 /* and write new es */
4759 ice_write_es(hw, blk, prof_id, es);
4762 ice_prof_inc_ref(hw, blk, prof_id);
4764 /* add profile info */
4766 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
4768 goto err_ice_add_prof;
4770 prof->profile_cookie = id;
4771 prof->prof_id = prof_id;
4775 /* build list of ptgs */
4776 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4779 if (!ptypes[byte]) {
4785 /* Examine 8 bits per byte */
4786 ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
4791 ptype = byte * BITS_PER_BYTE + bit;
4793 /* The package should place all ptypes in a non-zero
4794 * PTG, so the following call should never fail.
4796 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4799 /* If PTG is already added, skip and continue */
4800 if (ice_is_bit_set(ptgs_used, ptg))
4803 ice_set_bit(ptg, ptgs_used);
4804 /* Check to see there are any attributes for this
4805 * ptype, and add them if found.
4807 status = ice_add_prof_attrib(prof, ptg, ptype, attr,
4809 if (status == ICE_ERR_MAX_LIMIT)
4812 /* This is simple a ptype/PTG with no
4815 prof->ptg[prof->ptg_cnt] = ptg;
4816 prof->attr[prof->ptg_cnt].flags = 0;
4817 prof->attr[prof->ptg_cnt].mask = 0;
4819 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4828 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
4829 status = ICE_SUCCESS;
4832 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4837 * ice_search_prof_id - Search for a profile tracking ID
4838 * @hw: pointer to the HW struct
4839 * @blk: hardware block
4840 * @id: profile tracking ID
4842 * This will search for a profile tracking ID which was previously added.
4843 * The profile map lock should be held before calling this function.
4845 struct ice_prof_map *
4846 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4848 struct ice_prof_map *entry = NULL;
4849 struct ice_prof_map *map;
4851 LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
4852 if (map->profile_cookie == id) {
4861 * ice_vsig_prof_id_count - count profiles in a VSIG
4862 * @hw: pointer to the HW struct
4863 * @blk: hardware block
4864 * @vsig: VSIG to remove the profile from
4867 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4869 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4870 struct ice_vsig_prof *p;
4872 LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4873 ice_vsig_prof, list)
4880 * ice_rel_tcam_idx - release a TCAM index
4881 * @hw: pointer to the HW struct
4882 * @blk: hardware block
4883 * @idx: the index to release
4885 static enum ice_status
4886 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4888 /* Masks to invoke a never match entry */
4889 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4890 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4891 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4892 enum ice_status status;
4894 /* write the TCAM entry */
4895 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4900 /* release the TCAM entry */
4901 status = ice_free_tcam_ent(hw, blk, idx);
4907 * ice_rem_prof_id - remove one profile from a VSIG
4908 * @hw: pointer to the HW struct
4909 * @blk: hardware block
4910 * @prof: pointer to profile structure to remove
4912 static enum ice_status
4913 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4914 struct ice_vsig_prof *prof)
4916 enum ice_status status;
4919 for (i = 0; i < prof->tcam_count; i++)
4920 if (prof->tcam[i].in_use) {
4921 prof->tcam[i].in_use = false;
4922 status = ice_rel_tcam_idx(hw, blk,
4923 prof->tcam[i].tcam_idx);
4925 return ICE_ERR_HW_TABLE;
4932 * ice_rem_vsig - remove VSIG
4933 * @hw: pointer to the HW struct
4934 * @blk: hardware block
4935 * @vsig: the VSIG to remove
4936 * @chg: the change list
4938 static enum ice_status
4939 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4940 struct LIST_HEAD_TYPE *chg)
4942 u16 idx = vsig & ICE_VSIG_IDX_M;
4943 struct ice_vsig_vsi *vsi_cur;
4944 struct ice_vsig_prof *d, *t;
4945 enum ice_status status;
4947 /* remove TCAM entries */
4948 LIST_FOR_EACH_ENTRY_SAFE(d, t,
4949 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4950 ice_vsig_prof, list) {
4951 status = ice_rem_prof_id(hw, blk, d);
4959 /* Move all VSIS associated with this VSIG to the default VSIG */
4960 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4961 /* If the VSIG has at least 1 VSI then iterate through the list
4962 * and remove the VSIs before deleting the group.
4966 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4967 struct ice_chs_chg *p;
4969 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4971 return ICE_ERR_NO_MEMORY;
4973 p->type = ICE_VSIG_REM;
4974 p->orig_vsig = vsig;
4975 p->vsig = ICE_DEFAULT_VSIG;
4976 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4978 LIST_ADD(&p->list_entry, chg);
4983 return ice_vsig_free(hw, blk, vsig);
4987 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
4988 * @hw: pointer to the HW struct
4989 * @blk: hardware block
4990 * @vsig: VSIG to remove the profile from
4991 * @hdl: profile handle indicating which profile to remove
4992 * @chg: list to receive a record of changes
4994 static enum ice_status
4995 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4996 struct LIST_HEAD_TYPE *chg)
4998 u16 idx = vsig & ICE_VSIG_IDX_M;
4999 struct ice_vsig_prof *p, *t;
5000 enum ice_status status;
5002 LIST_FOR_EACH_ENTRY_SAFE(p, t,
5003 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5004 ice_vsig_prof, list)
5005 if (p->profile_cookie == hdl) {
5006 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
5007 /* this is the last profile, remove the VSIG */
5008 return ice_rem_vsig(hw, blk, vsig, chg);
5010 status = ice_rem_prof_id(hw, blk, p);
5018 return ICE_ERR_DOES_NOT_EXIST;
5022 * ice_rem_flow_all - remove all flows with a particular profile
5023 * @hw: pointer to the HW struct
5024 * @blk: hardware block
5025 * @id: profile tracking ID
5027 static enum ice_status
5028 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
5030 struct ice_chs_chg *del, *tmp;
5031 struct LIST_HEAD_TYPE chg;
5032 enum ice_status status;
5035 INIT_LIST_HEAD(&chg);
5037 for (i = 1; i < ICE_MAX_VSIGS; i++)
5038 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
5039 if (ice_has_prof_vsig(hw, blk, i, id)) {
5040 status = ice_rem_prof_id_vsig(hw, blk, i, id,
5043 goto err_ice_rem_flow_all;
5047 status = ice_upd_prof_hw(hw, blk, &chg);
5049 err_ice_rem_flow_all:
5050 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5051 LIST_DEL(&del->list_entry);
5059 * ice_rem_prof - remove profile
5060 * @hw: pointer to the HW struct
5061 * @blk: hardware block
5062 * @id: profile tracking ID
5064 * This will remove the profile specified by the ID parameter, which was
5065 * previously created through ice_add_prof. If any existing entries
5066 * are associated with this profile, they will be removed as well.
5068 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
5070 struct ice_prof_map *pmap;
5071 enum ice_status status;
5073 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5075 pmap = ice_search_prof_id(hw, blk, id);
5077 status = ICE_ERR_DOES_NOT_EXIST;
5078 goto err_ice_rem_prof;
5081 /* remove all flows with this profile */
5082 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
5084 goto err_ice_rem_prof;
5086 /* dereference profile, and possibly remove */
5087 ice_prof_dec_ref(hw, blk, pmap->prof_id);
5089 LIST_DEL(&pmap->list);
5093 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5098 * ice_get_prof - get profile
5099 * @hw: pointer to the HW struct
5100 * @blk: hardware block
5101 * @hdl: profile handle
5104 static enum ice_status
5105 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
5106 struct LIST_HEAD_TYPE *chg)
5108 enum ice_status status = ICE_SUCCESS;
5109 struct ice_prof_map *map;
5110 struct ice_chs_chg *p;
5113 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5114 /* Get the details on the profile specified by the handle ID */
5115 map = ice_search_prof_id(hw, blk, hdl);
5117 status = ICE_ERR_DOES_NOT_EXIST;
5118 goto err_ice_get_prof;
5121 for (i = 0; i < map->ptg_cnt; i++)
5122 if (!hw->blk[blk].es.written[map->prof_id]) {
5123 /* add ES to change list */
5124 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5126 status = ICE_ERR_NO_MEMORY;
5127 goto err_ice_get_prof;
5130 p->type = ICE_PTG_ES_ADD;
5132 p->ptg = map->ptg[i];
5133 p->attr = map->attr[i];
5137 p->prof_id = map->prof_id;
5139 hw->blk[blk].es.written[map->prof_id] = true;
5141 LIST_ADD(&p->list_entry, chg);
5145 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5146 /* let caller clean up the change list */
5151 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
5152 * @hw: pointer to the HW struct
5153 * @blk: hardware block
5154 * @vsig: VSIG from which to copy the list
5157 * This routine makes a copy of the list of profiles in the specified VSIG.
5159 static enum ice_status
5160 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5161 struct LIST_HEAD_TYPE *lst)
5163 struct ice_vsig_prof *ent1, *ent2;
5164 u16 idx = vsig & ICE_VSIG_IDX_M;
5166 LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5167 ice_vsig_prof, list) {
5168 struct ice_vsig_prof *p;
5170 /* copy to the input list */
5171 p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
5172 ICE_NONDMA_TO_NONDMA);
5174 goto err_ice_get_profs_vsig;
5176 LIST_ADD_TAIL(&p->list, lst);
5181 err_ice_get_profs_vsig:
5182 LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
5183 LIST_DEL(&ent1->list);
5187 return ICE_ERR_NO_MEMORY;
5191 * ice_add_prof_to_lst - add profile entry to a list
5192 * @hw: pointer to the HW struct
5193 * @blk: hardware block
5194 * @lst: the list to be added to
5195 * @hdl: profile handle of entry to add
5197 static enum ice_status
5198 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
5199 struct LIST_HEAD_TYPE *lst, u64 hdl)
5201 enum ice_status status = ICE_SUCCESS;
5202 struct ice_prof_map *map;
5203 struct ice_vsig_prof *p;
5206 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5207 map = ice_search_prof_id(hw, blk, hdl);
5209 status = ICE_ERR_DOES_NOT_EXIST;
5210 goto err_ice_add_prof_to_lst;
5213 p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
5215 status = ICE_ERR_NO_MEMORY;
5216 goto err_ice_add_prof_to_lst;
5219 p->profile_cookie = map->profile_cookie;
5220 p->prof_id = map->prof_id;
5221 p->tcam_count = map->ptg_cnt;
5223 for (i = 0; i < map->ptg_cnt; i++) {
5224 p->tcam[i].prof_id = map->prof_id;
5225 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
5226 p->tcam[i].ptg = map->ptg[i];
5227 p->tcam[i].attr = map->attr[i];
5230 LIST_ADD(&p->list, lst);
5232 err_ice_add_prof_to_lst:
5233 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5238 * ice_move_vsi - move VSI to another VSIG
5239 * @hw: pointer to the HW struct
5240 * @blk: hardware block
5241 * @vsi: the VSI to move
5242 * @vsig: the VSIG to move the VSI to
5243 * @chg: the change list
5245 static enum ice_status
5246 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
5247 struct LIST_HEAD_TYPE *chg)
5249 enum ice_status status;
5250 struct ice_chs_chg *p;
5253 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5255 return ICE_ERR_NO_MEMORY;
5257 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
5259 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
5266 p->type = ICE_VSI_MOVE;
5268 p->orig_vsig = orig_vsig;
5271 LIST_ADD(&p->list_entry, chg);
5277 * ice_set_tcam_flags - set TCAM flag don't care mask
5278 * @mask: mask for flags
5279 * @dc_mask: pointer to the don't care mask
5281 static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
5285 /* flags are lowest u16 */
5286 flag_word = (u16 *)dc_mask;
5291 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
5292 * @hw: pointer to the HW struct
5293 * @idx: the index of the TCAM entry to remove
5294 * @chg: the list of change structures to search
5297 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
5299 struct ice_chs_chg *pos, *tmp;
5301 LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry)
5302 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
5303 LIST_DEL(&tmp->list_entry);
5309 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
5310 * @hw: pointer to the HW struct
5311 * @blk: hardware block
5312 * @enable: true to enable, false to disable
5313 * @vsig: the VSIG of the TCAM entry
5314 * @tcam: pointer the TCAM info structure of the TCAM to disable
5315 * @chg: the change list
5317 * This function appends an enable or disable TCAM entry in the change log
5319 static enum ice_status
5320 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
5321 u16 vsig, struct ice_tcam_inf *tcam,
5322 struct LIST_HEAD_TYPE *chg)
5324 enum ice_status status;
5325 struct ice_chs_chg *p;
5327 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5328 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5329 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5331 /* if disabling, free the TCAM */
5333 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
5335 /* if we have already created a change for this TCAM entry, then
5336 * we need to remove that entry, in order to prevent writing to
5337 * a TCAM entry we no longer will have ownership of.
5339 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
5345 /* for re-enabling, reallocate a TCAM */
5346 /* for entries with empty attribute masks, allocate entry from
5347 * the bottom of the TCAM table; otherwise, allocate from the
5348 * top of the table in order to give it higher priority
5350 status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
5355 /* add TCAM to change list */
5356 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5358 return ICE_ERR_NO_MEMORY;
5360 /* set don't care masks for TCAM flags */
5361 ice_set_tcam_flags(tcam->attr.mask, dc_msk);
5363 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
5364 tcam->ptg, vsig, 0, tcam->attr.flags,
5365 vl_msk, dc_msk, nm_msk);
5367 goto err_ice_prof_tcam_ena_dis;
5371 p->type = ICE_TCAM_ADD;
5372 p->add_tcam_idx = true;
5373 p->prof_id = tcam->prof_id;
5376 p->tcam_idx = tcam->tcam_idx;
5379 LIST_ADD(&p->list_entry, chg);
5383 err_ice_prof_tcam_ena_dis:
5389 * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use
5390 * @ptg_attr: pointer to the PTG and attribute pair to check
5391 * @ptgs_used: bitmap that denotes which PTGs are in use
5392 * @attr_used: array of PTG and attributes pairs already used
5393 * @attr_cnt: count of entries in the attr_used array
5396 ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, ice_bitmap_t *ptgs_used,
5397 struct ice_tcam_inf *attr_used[], u16 attr_cnt)
5401 if (!ice_is_bit_set(ptgs_used, ptg_attr->ptg))
5404 /* the PTG is used, so now look for correct attributes */
5405 for (i = 0; i < attr_cnt; i++)
5406 if (attr_used[i]->ptg == ptg_attr->ptg &&
5407 attr_used[i]->attr.flags == ptg_attr->attr.flags &&
5408 attr_used[i]->attr.mask == ptg_attr->attr.mask)
5415 * ice_adj_prof_priorities - adjust profile based on priorities
5416 * @hw: pointer to the HW struct
5417 * @blk: hardware block
5418 * @vsig: the VSIG for which to adjust profile priorities
5419 * @chg: the change list
5421 static enum ice_status
5422 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5423 struct LIST_HEAD_TYPE *chg)
5425 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
5426 struct ice_tcam_inf **attr_used;
5427 enum ice_status status = ICE_SUCCESS;
5428 struct ice_vsig_prof *t;
5429 u16 attr_used_cnt = 0;
5432 #define ICE_MAX_PTG_ATTRS 1024
5433 attr_used = (struct ice_tcam_inf **)ice_calloc(hw, ICE_MAX_PTG_ATTRS,
5434 sizeof(*attr_used));
5436 return ICE_ERR_NO_MEMORY;
5438 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
5439 idx = vsig & ICE_VSIG_IDX_M;
5441 /* Priority is based on the order in which the profiles are added. The
5442 * newest added profile has highest priority and the oldest added
5443 * profile has the lowest priority. Since the profile property list for
5444 * a VSIG is sorted from newest to oldest, this code traverses the list
5445 * in order and enables the first of each PTG that it finds (that is not
5446 * already enabled); it also disables any duplicate PTGs that it finds
5447 * in the older profiles (that are currently enabled).
5450 LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5451 ice_vsig_prof, list) {
5454 for (i = 0; i < t->tcam_count; i++) {
5457 /* Scan the priorities from newest to oldest.
5458 * Make sure that the newest profiles take priority.
5460 used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used,
5461 attr_used, attr_used_cnt);
5463 if (used && t->tcam[i].in_use) {
5464 /* need to mark this PTG as never match, as it
5465 * was already in use and therefore duplicate
5466 * (and lower priority)
5468 status = ice_prof_tcam_ena_dis(hw, blk, false,
5473 goto err_ice_adj_prof_priorities;
5474 } else if (!used && !t->tcam[i].in_use) {
5475 /* need to enable this PTG, as it in not in use
5476 * and not enabled (highest priority)
5478 status = ice_prof_tcam_ena_dis(hw, blk, true,
5483 goto err_ice_adj_prof_priorities;
5486 /* keep track of used ptgs */
5487 ice_set_bit(t->tcam[i].ptg, ptgs_used);
5488 if (attr_used_cnt < ICE_MAX_PTG_ATTRS)
5489 attr_used[attr_used_cnt++] = &t->tcam[i];
5491 ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n");
5495 err_ice_adj_prof_priorities:
5496 ice_free(hw, attr_used);
5501 * ice_add_prof_id_vsig - add profile to VSIG
5502 * @hw: pointer to the HW struct
5503 * @blk: hardware block
5504 * @vsig: the VSIG to which this profile is to be added
5505 * @hdl: the profile handle indicating the profile to add
5506 * @rev: true to add entries to the end of the list
5507 * @chg: the change list
5509 static enum ice_status
5510 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5511 bool rev, struct LIST_HEAD_TYPE *chg)
5513 /* Masks that ignore flags */
5514 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5515 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5516 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5517 enum ice_status status = ICE_SUCCESS;
5518 struct ice_prof_map *map;
5519 struct ice_vsig_prof *t;
5520 struct ice_chs_chg *p;
5523 /* Error, if this VSIG already has this profile */
5524 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5525 return ICE_ERR_ALREADY_EXISTS;
5527 /* new VSIG profile structure */
5528 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5530 return ICE_ERR_NO_MEMORY;
5532 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5533 /* Get the details on the profile specified by the handle ID */
5534 map = ice_search_prof_id(hw, blk, hdl);
5536 status = ICE_ERR_DOES_NOT_EXIST;
5537 goto err_ice_add_prof_id_vsig;
5540 t->profile_cookie = map->profile_cookie;
5541 t->prof_id = map->prof_id;
5542 t->tcam_count = map->ptg_cnt;
5544 /* create TCAM entries */
5545 for (i = 0; i < map->ptg_cnt; i++) {
5548 /* add TCAM to change list */
5549 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5551 status = ICE_ERR_NO_MEMORY;
5552 goto err_ice_add_prof_id_vsig;
5555 /* allocate the TCAM entry index */
5556 /* for entries with empty attribute masks, allocate entry from
5557 * the bottom of the TCAM table; otherwise, allocate from the
5558 * top of the table in order to give it higher priority
5560 status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
5564 goto err_ice_add_prof_id_vsig;
5567 t->tcam[i].ptg = map->ptg[i];
5568 t->tcam[i].prof_id = map->prof_id;
5569 t->tcam[i].tcam_idx = tcam_idx;
5570 t->tcam[i].attr = map->attr[i];
5571 t->tcam[i].in_use = true;
5573 p->type = ICE_TCAM_ADD;
5574 p->add_tcam_idx = true;
5575 p->prof_id = t->tcam[i].prof_id;
5576 p->ptg = t->tcam[i].ptg;
5578 p->tcam_idx = t->tcam[i].tcam_idx;
5580 /* set don't care masks for TCAM flags */
5581 ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
5583 /* write the TCAM entry */
5584 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5586 t->tcam[i].ptg, vsig, 0,
5587 t->tcam[i].attr.flags, vl_msk,
5591 goto err_ice_add_prof_id_vsig;
5595 LIST_ADD(&p->list_entry, chg);
5598 /* add profile to VSIG */
5599 vsig_idx = vsig & ICE_VSIG_IDX_M;
5601 LIST_ADD_TAIL(&t->list,
5602 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5605 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5607 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5610 err_ice_add_prof_id_vsig:
5611 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5612 /* let caller clean up the change list */
5618 * ice_create_prof_id_vsig - add a new VSIG with a single profile
5619 * @hw: pointer to the HW struct
5620 * @blk: hardware block
5621 * @vsi: the initial VSI that will be in VSIG
5622 * @hdl: the profile handle of the profile that will be added to the VSIG
5623 * @chg: the change list
5625 static enum ice_status
5626 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5627 struct LIST_HEAD_TYPE *chg)
5629 enum ice_status status;
5630 struct ice_chs_chg *p;
5633 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5635 return ICE_ERR_NO_MEMORY;
5637 new_vsig = ice_vsig_alloc(hw, blk);
5639 status = ICE_ERR_HW_TABLE;
5640 goto err_ice_create_prof_id_vsig;
5643 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5645 goto err_ice_create_prof_id_vsig;
5647 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5649 goto err_ice_create_prof_id_vsig;
5651 p->type = ICE_VSIG_ADD;
5653 p->orig_vsig = ICE_DEFAULT_VSIG;
5656 LIST_ADD(&p->list_entry, chg);
5660 err_ice_create_prof_id_vsig:
5661 /* let caller clean up the change list */
5667 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
5668 * @hw: pointer to the HW struct
5669 * @blk: hardware block
5670 * @vsi: the initial VSI that will be in VSIG
5671 * @lst: the list of profile that will be added to the VSIG
5672 * @new_vsig: return of new VSIG
5673 * @chg: the change list
5675 static enum ice_status
5676 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5677 struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
5678 struct LIST_HEAD_TYPE *chg)
5680 struct ice_vsig_prof *t;
5681 enum ice_status status;
5684 vsig = ice_vsig_alloc(hw, blk);
5686 return ICE_ERR_HW_TABLE;
5688 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5692 LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
5693 /* Reverse the order here since we are copying the list */
5694 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5706 * ice_find_prof_vsig - find a VSIG with a specific profile handle
5707 * @hw: pointer to the HW struct
5708 * @blk: hardware block
5709 * @hdl: the profile handle of the profile to search for
5710 * @vsig: returns the VSIG with the matching profile
5713 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5715 struct ice_vsig_prof *t;
5716 struct LIST_HEAD_TYPE lst;
5717 enum ice_status status;
5719 INIT_LIST_HEAD(&lst);
5721 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5725 t->profile_cookie = hdl;
5726 LIST_ADD(&t->list, &lst);
5728 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5733 return status == ICE_SUCCESS;
5737 * ice_add_vsi_flow - add VSI flow
5738 * @hw: pointer to the HW struct
5739 * @blk: hardware block
5741 * @vsig: target VSIG to include the input VSI
5743 * Calling this function will add the VSI to a given VSIG and
5744 * update the HW tables accordingly. This call can be used to
5745 * add multiple VSIs to a VSIG if we know beforehand that those
5746 * VSIs have the same characteristics of the VSIG. This will
5747 * save time in generating a new VSIG and TCAMs till a match is
5748 * found and subsequent rollback when a matching VSIG is found.
5751 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
5753 struct ice_chs_chg *tmp, *del;
5754 struct LIST_HEAD_TYPE chg;
5755 enum ice_status status;
5757 /* if target VSIG is default the move is invalid */
5758 if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
5759 return ICE_ERR_PARAM;
5761 INIT_LIST_HEAD(&chg);
5763 /* move VSI to the VSIG that matches */
5764 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5765 /* update hardware if success */
5767 status = ice_upd_prof_hw(hw, blk, &chg);
5769 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5770 LIST_DEL(&del->list_entry);
5778 * ice_add_prof_id_flow - add profile flow
5779 * @hw: pointer to the HW struct
5780 * @blk: hardware block
5781 * @vsi: the VSI to enable with the profile specified by ID
5782 * @hdl: profile handle
5784 * Calling this function will update the hardware tables to enable the
5785 * profile indicated by the ID parameter for the VSIs specified in the VSI
5786 * array. Once successfully called, the flow will be enabled.
5789 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5791 struct ice_vsig_prof *tmp1, *del1;
5792 struct LIST_HEAD_TYPE union_lst;
5793 struct ice_chs_chg *tmp, *del;
5794 struct LIST_HEAD_TYPE chg;
5795 enum ice_status status;
5798 INIT_LIST_HEAD(&union_lst);
5799 INIT_LIST_HEAD(&chg);
5802 status = ice_get_prof(hw, blk, hdl, &chg);
5806 /* determine if VSI is already part of a VSIG */
5807 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5808 if (!status && vsig) {
5816 /* make sure that there is no overlap/conflict between the new
5817 * characteristics and the existing ones; we don't support that
5820 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5821 status = ICE_ERR_ALREADY_EXISTS;
5822 goto err_ice_add_prof_id_flow;
5825 /* last VSI in the VSIG? */
5826 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5828 goto err_ice_add_prof_id_flow;
5829 only_vsi = (ref == 1);
5831 /* create a union of the current profiles and the one being
5834 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5836 goto err_ice_add_prof_id_flow;
5838 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5840 goto err_ice_add_prof_id_flow;
5842 /* search for an existing VSIG with an exact charc match */
5843 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5845 /* move VSI to the VSIG that matches */
5846 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5848 goto err_ice_add_prof_id_flow;
5850 /* VSI has been moved out of or_vsig. If the or_vsig had
5851 * only that VSI it is now empty and can be removed.
5854 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5856 goto err_ice_add_prof_id_flow;
5858 } else if (only_vsi) {
5859 /* If the original VSIG only contains one VSI, then it
5860 * will be the requesting VSI. In this case the VSI is
5861 * not sharing entries and we can simply add the new
5862 * profile to the VSIG.
5864 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5867 goto err_ice_add_prof_id_flow;
5869 /* Adjust priorities */
5870 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5872 goto err_ice_add_prof_id_flow;
5874 /* No match, so we need a new VSIG */
5875 status = ice_create_vsig_from_lst(hw, blk, vsi,
5879 goto err_ice_add_prof_id_flow;
5881 /* Adjust priorities */
5882 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5884 goto err_ice_add_prof_id_flow;
5887 /* need to find or add a VSIG */
5888 /* search for an existing VSIG with an exact charc match */
5889 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5890 /* found an exact match */
5891 /* add or move VSI to the VSIG that matches */
5892 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5894 goto err_ice_add_prof_id_flow;
5896 /* we did not find an exact match */
5897 /* we need to add a VSIG */
5898 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5901 goto err_ice_add_prof_id_flow;
5905 /* update hardware */
5907 status = ice_upd_prof_hw(hw, blk, &chg);
5909 err_ice_add_prof_id_flow:
5910 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5911 LIST_DEL(&del->list_entry);
5915 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
5916 LIST_DEL(&del1->list);
5924 * ice_rem_prof_from_list - remove a profile from list
5925 * @hw: pointer to the HW struct
5926 * @lst: list to remove the profile from
5927 * @hdl: the profile handle indicating the profile to remove
5929 static enum ice_status
5930 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
5932 struct ice_vsig_prof *ent, *tmp;
5934 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list)
5935 if (ent->profile_cookie == hdl) {
5936 LIST_DEL(&ent->list);
5941 return ICE_ERR_DOES_NOT_EXIST;
5945 * ice_rem_prof_id_flow - remove flow
5946 * @hw: pointer to the HW struct
5947 * @blk: hardware block
5948 * @vsi: the VSI from which to remove the profile specified by ID
5949 * @hdl: profile tracking handle
5951 * Calling this function will update the hardware tables to remove the
5952 * profile indicated by the ID parameter for the VSIs specified in the VSI
5953 * array. Once successfully called, the flow will be disabled.
5956 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5958 struct ice_vsig_prof *tmp1, *del1;
5959 struct LIST_HEAD_TYPE chg, copy;
5960 struct ice_chs_chg *tmp, *del;
5961 enum ice_status status;
5964 INIT_LIST_HEAD(©);
5965 INIT_LIST_HEAD(&chg);
5967 /* determine if VSI is already part of a VSIG */
5968 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5969 if (!status && vsig) {
5975 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5976 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5978 goto err_ice_rem_prof_id_flow;
5979 only_vsi = (ref == 1);
5982 /* If the original VSIG only contains one reference,
5983 * which will be the requesting VSI, then the VSI is not
5984 * sharing entries and we can simply remove the specific
5985 * characteristics from the VSIG.
5989 /* If there are no profiles left for this VSIG,
5990 * then simply remove the VSIG.
5992 status = ice_rem_vsig(hw, blk, vsig, &chg);
5994 goto err_ice_rem_prof_id_flow;
5996 status = ice_rem_prof_id_vsig(hw, blk, vsig,
5999 goto err_ice_rem_prof_id_flow;
6001 /* Adjust priorities */
6002 status = ice_adj_prof_priorities(hw, blk, vsig,
6005 goto err_ice_rem_prof_id_flow;
6009 /* Make a copy of the VSIG's list of Profiles */
6010 status = ice_get_profs_vsig(hw, blk, vsig, ©);
6012 goto err_ice_rem_prof_id_flow;
6014 /* Remove specified profile entry from the list */
6015 status = ice_rem_prof_from_list(hw, ©, hdl);
6017 goto err_ice_rem_prof_id_flow;
6019 if (LIST_EMPTY(©)) {
6020 status = ice_move_vsi(hw, blk, vsi,
6021 ICE_DEFAULT_VSIG, &chg);
6023 goto err_ice_rem_prof_id_flow;
6025 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
6027 /* found an exact match */
6028 /* add or move VSI to the VSIG that matches */
6029 /* Search for a VSIG with a matching profile
6033 /* Found match, move VSI to the matching VSIG */
6034 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6036 goto err_ice_rem_prof_id_flow;
6038 /* since no existing VSIG supports this
6039 * characteristic pattern, we need to create a
6040 * new VSIG and TCAM entries
6042 status = ice_create_vsig_from_lst(hw, blk, vsi,
6046 goto err_ice_rem_prof_id_flow;
6048 /* Adjust priorities */
6049 status = ice_adj_prof_priorities(hw, blk, vsig,
6052 goto err_ice_rem_prof_id_flow;
6056 status = ICE_ERR_DOES_NOT_EXIST;
6059 /* update hardware tables */
6061 status = ice_upd_prof_hw(hw, blk, &chg);
6063 err_ice_rem_prof_id_flow:
6064 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
6065 LIST_DEL(&del->list_entry);
6069 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) {
6070 LIST_DEL(&del1->list);