1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
6 #include "ice_flex_pipe.h"
7 #include "ice_protocol_type.h"
10 /* To support tunneling entries by PF, the package will append the PF number to
11 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
13 static const struct ice_tunnel_type_scan tnls[] = {
14 { TNL_VXLAN, "TNL_VXLAN_PF" },
15 { TNL_GENEVE, "TNL_GENEVE_PF" },
16 { TNL_ECPRI, "TNL_UDP_ECPRI_PF" },
20 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
24 ICE_SID_XLT_KEY_BUILDER_SW,
27 ICE_SID_PROFID_TCAM_SW,
28 ICE_SID_PROFID_REDIR_SW,
30 ICE_SID_CDID_KEY_BUILDER_SW,
37 ICE_SID_XLT_KEY_BUILDER_ACL,
40 ICE_SID_PROFID_TCAM_ACL,
41 ICE_SID_PROFID_REDIR_ACL,
43 ICE_SID_CDID_KEY_BUILDER_ACL,
44 ICE_SID_CDID_REDIR_ACL
50 ICE_SID_XLT_KEY_BUILDER_FD,
53 ICE_SID_PROFID_TCAM_FD,
54 ICE_SID_PROFID_REDIR_FD,
56 ICE_SID_CDID_KEY_BUILDER_FD,
63 ICE_SID_XLT_KEY_BUILDER_RSS,
66 ICE_SID_PROFID_TCAM_RSS,
67 ICE_SID_PROFID_REDIR_RSS,
69 ICE_SID_CDID_KEY_BUILDER_RSS,
70 ICE_SID_CDID_REDIR_RSS
76 ICE_SID_XLT_KEY_BUILDER_PE,
79 ICE_SID_PROFID_TCAM_PE,
80 ICE_SID_PROFID_REDIR_PE,
82 ICE_SID_CDID_KEY_BUILDER_PE,
88 * ice_sect_id - returns section ID
92 * This helper function returns the proper section ID given a block type and a
95 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
97 return ice_sect_lkup[blk][sect];
102 * @buf: pointer to the ice buffer
104 * This helper function validates a buffer's header.
106 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
108 struct ice_buf_hdr *hdr;
112 hdr = (struct ice_buf_hdr *)buf->buf;
114 section_count = LE16_TO_CPU(hdr->section_count);
115 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
118 data_end = LE16_TO_CPU(hdr->data_end);
119 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
127 * @ice_seg: pointer to the ice segment
129 * Returns the address of the buffer table within the ice segment.
131 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
133 struct ice_nvm_table *nvms;
135 nvms = (struct ice_nvm_table *)
136 (ice_seg->device_table +
137 LE32_TO_CPU(ice_seg->device_table_count));
139 return (_FORCE_ struct ice_buf_table *)
140 (nvms->vers + LE32_TO_CPU(nvms->table_count));
145 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
146 * @state: pointer to the enum state
148 * This function will enumerate all the buffers in the ice segment. The first
149 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
150 * ice_seg is set to NULL which continues the enumeration. When the function
151 * returns a NULL pointer, then the end of the buffers has been reached, or an
152 * unexpected value has been detected (for example an invalid section count or
153 * an invalid buffer end value).
155 static struct ice_buf_hdr *
156 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
159 state->buf_table = ice_find_buf_table(ice_seg);
160 if (!state->buf_table)
164 return ice_pkg_val_buf(state->buf_table->buf_array);
167 if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
168 return ice_pkg_val_buf(state->buf_table->buf_array +
175 * ice_pkg_advance_sect
176 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
177 * @state: pointer to the enum state
179 * This helper function will advance the section within the ice segment,
180 * also advancing the buffer if needed.
183 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
185 if (!ice_seg && !state->buf)
188 if (!ice_seg && state->buf)
189 if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
192 state->buf = ice_pkg_enum_buf(ice_seg, state);
196 /* start of new buffer, reset section index */
202 * ice_pkg_enum_section
203 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
204 * @state: pointer to the enum state
205 * @sect_type: section type to enumerate
207 * This function will enumerate all the sections of a particular type in the
208 * ice segment. The first call is made with the ice_seg parameter non-NULL;
209 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
210 * When the function returns a NULL pointer, then the end of the matching
211 * sections has been reached.
214 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
220 state->type = sect_type;
222 if (!ice_pkg_advance_sect(ice_seg, state))
225 /* scan for next matching section */
226 while (state->buf->section_entry[state->sect_idx].type !=
227 CPU_TO_LE32(state->type))
228 if (!ice_pkg_advance_sect(NULL, state))
231 /* validate section */
232 offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
233 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
236 size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
237 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
240 /* make sure the section fits in the buffer */
241 if (offset + size > ICE_PKG_BUF_SIZE)
245 LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
247 /* calc pointer to this section */
248 state->sect = ((u8 *)state->buf) +
249 LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
256 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
257 * @state: pointer to the enum state
258 * @sect_type: section type to enumerate
259 * @offset: pointer to variable that receives the offset in the table (optional)
260 * @handler: function that handles access to the entries into the section type
262 * This function will enumerate all the entries in particular section type in
263 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
264 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
265 * When the function returns a NULL pointer, then the end of the entries has
268 * Since each section may have a different header and entry size, the handler
269 * function is needed to determine the number and location entries in each
272 * The offset parameter is optional, but should be used for sections that
273 * contain an offset for each section table. For such cases, the section handler
274 * function must return the appropriate offset + index to give the absolution
275 * offset for each entry. For example, if the base for a section's header
276 * indicates a base offset of 10, and the index for the entry is 2, then
277 * section handler function should set the offset to 10 + 2 = 12.
280 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
281 u32 sect_type, u32 *offset,
282 void *(*handler)(u32 sect_type, void *section,
283 u32 index, u32 *offset))
291 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
294 state->entry_idx = 0;
295 state->handler = handler;
304 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
307 /* end of a section, look for another section of this type */
308 if (!ice_pkg_enum_section(NULL, state, 0))
311 state->entry_idx = 0;
312 entry = state->handler(state->sect_type, state->sect,
313 state->entry_idx, offset);
320 * ice_hw_ptype_ena - check if the PTYPE is enabled or not
321 * @hw: pointer to the HW structure
322 * @ptype: the hardware PTYPE
324 bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
326 return ptype < ICE_FLOW_PTYPE_MAX &&
327 ice_is_bit_set(hw->hw_ptype, ptype);
331 * ice_marker_ptype_tcam_handler
332 * @sect_type: section type
333 * @section: pointer to section
334 * @index: index of the Marker PType TCAM entry to be returned
335 * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
337 * This is a callback function that can be passed to ice_pkg_enum_entry.
338 * Handles enumeration of individual Marker PType TCAM entries.
341 ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
344 struct ice_marker_ptype_tcam_section *marker_ptype;
349 if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
352 /* cppcheck-suppress nullPointer */
353 if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
359 marker_ptype = (struct ice_marker_ptype_tcam_section *)section;
360 if (index >= LE16_TO_CPU(marker_ptype->count))
363 return marker_ptype->tcam + index;
367 * ice_fill_hw_ptype - fill the enabled PTYPE bit information
368 * @hw: pointer to the HW structure
371 ice_fill_hw_ptype(struct ice_hw *hw)
373 struct ice_marker_ptype_tcam_entry *tcam;
374 struct ice_seg *seg = hw->seg;
375 struct ice_pkg_enum state;
377 ice_zero_bitmap(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
381 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
384 tcam = (struct ice_marker_ptype_tcam_entry *)
385 ice_pkg_enum_entry(seg, &state,
386 ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
387 ice_marker_ptype_tcam_handler);
389 LE16_TO_CPU(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
390 LE16_TO_CPU(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
391 ice_set_bit(LE16_TO_CPU(tcam->ptype), hw->hw_ptype);
398 * ice_boost_tcam_handler
399 * @sect_type: section type
400 * @section: pointer to section
401 * @index: index of the boost TCAM entry to be returned
402 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
404 * This is a callback function that can be passed to ice_pkg_enum_entry.
405 * Handles enumeration of individual boost TCAM entries.
408 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
410 struct ice_boost_tcam_section *boost;
415 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
418 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
424 boost = (struct ice_boost_tcam_section *)section;
425 if (index >= LE16_TO_CPU(boost->count))
428 return boost->tcam + index;
432 * ice_find_boost_entry
433 * @ice_seg: pointer to the ice segment (non-NULL)
434 * @addr: Boost TCAM address of entry to search for
435 * @entry: returns pointer to the entry
437 * Finds a particular Boost TCAM entry and returns a pointer to that entry
438 * if it is found. The ice_seg parameter must not be NULL since the first call
439 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
441 static enum ice_status
442 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
443 struct ice_boost_tcam_entry **entry)
445 struct ice_boost_tcam_entry *tcam;
446 struct ice_pkg_enum state;
448 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
451 return ICE_ERR_PARAM;
454 tcam = (struct ice_boost_tcam_entry *)
455 ice_pkg_enum_entry(ice_seg, &state,
456 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
457 ice_boost_tcam_handler);
458 if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
471 * ice_label_enum_handler
472 * @sect_type: section type
473 * @section: pointer to section
474 * @index: index of the label entry to be returned
475 * @offset: pointer to receive absolute offset, always zero for label sections
477 * This is a callback function that can be passed to ice_pkg_enum_entry.
478 * Handles enumeration of individual label entries.
481 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
484 struct ice_label_section *labels;
489 if (index > ICE_MAX_LABELS_IN_BUF)
495 labels = (struct ice_label_section *)section;
496 if (index >= LE16_TO_CPU(labels->count))
499 return labels->label + index;
504 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
505 * @type: the section type that will contain the label (0 on subsequent calls)
506 * @state: ice_pkg_enum structure that will hold the state of the enumeration
507 * @value: pointer to a value that will return the label's value if found
509 * Enumerates a list of labels in the package. The caller will call
510 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
511 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
512 * the end of the list has been reached.
515 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
518 struct ice_label *label;
520 /* Check for valid label section on first call */
521 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
524 label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
526 ice_label_enum_handler);
530 *value = LE16_TO_CPU(label->value);
536 * @hw: pointer to the HW structure
537 * @ice_seg: pointer to the segment of the package scan (non-NULL)
539 * This function will scan the package and save off relevant information
540 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
541 * since the first call to ice_enum_labels requires a pointer to an actual
544 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
546 struct ice_pkg_enum state;
551 ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
552 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
557 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
560 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
561 for (i = 0; tnls[i].type != TNL_LAST; i++) {
562 size_t len = strlen(tnls[i].label_prefix);
564 /* Look for matching label start, before continuing */
565 if (strncmp(label_name, tnls[i].label_prefix, len))
568 /* Make sure this label matches our PF. Note that the PF
569 * character ('0' - '7') will be located where our
570 * prefix string's null terminator is located.
572 if ((label_name[len] - '0') == hw->pf_id) {
573 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
574 hw->tnl.tbl[hw->tnl.count].valid = false;
575 hw->tnl.tbl[hw->tnl.count].in_use = false;
576 hw->tnl.tbl[hw->tnl.count].marked = false;
577 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
578 hw->tnl.tbl[hw->tnl.count].port = 0;
584 label_name = ice_enum_labels(NULL, 0, &state, &val);
587 /* Cache the appropriate boost TCAM entry pointers */
588 for (i = 0; i < hw->tnl.count; i++) {
589 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
590 &hw->tnl.tbl[i].boost_entry);
591 if (hw->tnl.tbl[i].boost_entry)
592 hw->tnl.tbl[i].valid = true;
598 #define ICE_DC_KEY 0x1 /* don't care */
599 #define ICE_DC_KEYINV 0x1
600 #define ICE_NM_KEY 0x0 /* never match */
601 #define ICE_NM_KEYINV 0x0
602 #define ICE_0_KEY 0x1 /* match 0 */
603 #define ICE_0_KEYINV 0x0
604 #define ICE_1_KEY 0x0 /* match 1 */
605 #define ICE_1_KEYINV 0x1
608 * ice_gen_key_word - generate 16-bits of a key/mask word
610 * @valid: valid bits mask (change only the valid bits)
611 * @dont_care: don't care mask
612 * @nvr_mtch: never match mask
613 * @key: pointer to an array of where the resulting key portion
614 * @key_inv: pointer to an array of where the resulting key invert portion
616 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
617 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
618 * of key and 8 bits of key invert.
620 * '0' = b01, always match a 0 bit
621 * '1' = b10, always match a 1 bit
622 * '?' = b11, don't care bit (always matches)
623 * '~' = b00, never match bit
627 * dont_care: b0 0 1 1 0 0
628 * never_mtch: b0 0 0 0 1 1
629 * ------------------------------
630 * Result: key: b01 10 11 11 00 00
632 static enum ice_status
633 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
636 u8 in_key = *key, in_key_inv = *key_inv;
639 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
640 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
646 /* encode the 8 bits into 8-bit key and 8-bit key invert */
647 for (i = 0; i < 8; i++) {
651 if (!(valid & 0x1)) { /* change only valid bits */
652 *key |= (in_key & 0x1) << 7;
653 *key_inv |= (in_key_inv & 0x1) << 7;
654 } else if (dont_care & 0x1) { /* don't care bit */
655 *key |= ICE_DC_KEY << 7;
656 *key_inv |= ICE_DC_KEYINV << 7;
657 } else if (nvr_mtch & 0x1) { /* never match bit */
658 *key |= ICE_NM_KEY << 7;
659 *key_inv |= ICE_NM_KEYINV << 7;
660 } else if (val & 0x01) { /* exact 1 match */
661 *key |= ICE_1_KEY << 7;
662 *key_inv |= ICE_1_KEYINV << 7;
663 } else { /* exact 0 match */
664 *key |= ICE_0_KEY << 7;
665 *key_inv |= ICE_0_KEYINV << 7;
680 * ice_bits_max_set - determine if the number of bits set is within a maximum
681 * @mask: pointer to the byte array which is the mask
682 * @size: the number of bytes in the mask
683 * @max: the max number of set bits
685 * This function determines if there are at most 'max' number of bits set in an
686 * array. Returns true if the number for bits set is <= max or will return false
689 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
694 /* check each byte */
695 for (i = 0; i < size; i++) {
696 /* if 0, go to next byte */
700 /* We know there is at least one set bit in this byte because of
701 * the above check; if we already have found 'max' number of
702 * bits set, then we can return failure now.
707 /* count the bits in this byte, checking threshold */
708 count += ice_hweight8(mask[i]);
717 * ice_set_key - generate a variable sized key with multiples of 16-bits
718 * @key: pointer to where the key will be stored
719 * @size: the size of the complete key in bytes (must be even)
720 * @val: array of 8-bit values that makes up the value portion of the key
721 * @upd: array of 8-bit masks that determine what key portion to update
722 * @dc: array of 8-bit masks that make up the don't care mask
723 * @nm: array of 8-bit masks that make up the never match mask
724 * @off: the offset of the first byte in the key to update
725 * @len: the number of bytes in the key update
727 * This function generates a key from a value, a don't care mask and a never
729 * upd, dc, and nm are optional parameters, and can be NULL:
730 * upd == NULL --> upd mask is all 1's (update all bits)
731 * dc == NULL --> dc mask is all 0's (no don't care bits)
732 * nm == NULL --> nm mask is all 0's (no never match bits)
735 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
741 /* size must be a multiple of 2 bytes. */
744 half_size = size / 2;
746 if (off + len > half_size)
749 /* Make sure at most one bit is set in the never match mask. Having more
750 * than one never match mask bit set will cause HW to consume excessive
751 * power otherwise; this is a power management efficiency check.
753 #define ICE_NVR_MTCH_BITS_MAX 1
754 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
757 for (i = 0; i < len; i++)
758 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
759 dc ? dc[i] : 0, nm ? nm[i] : 0,
760 key + off + i, key + half_size + off + i))
767 * ice_acquire_global_cfg_lock
768 * @hw: pointer to the HW structure
769 * @access: access type (read or write)
771 * This function will request ownership of the global config lock for reading
772 * or writing of the package. When attempting to obtain write access, the
773 * caller must check for the following two return values:
775 * ICE_SUCCESS - Means the caller has acquired the global config lock
776 * and can perform writing of the package.
777 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
778 * package or has found that no update was necessary; in
779 * this case, the caller can just skip performing any
780 * update of the package.
782 static enum ice_status
783 ice_acquire_global_cfg_lock(struct ice_hw *hw,
784 enum ice_aq_res_access_type access)
786 enum ice_status status;
788 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
790 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
791 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
793 if (status == ICE_ERR_AQ_NO_WORK)
794 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
800 * ice_release_global_cfg_lock
801 * @hw: pointer to the HW structure
803 * This function will release the global config lock.
805 static void ice_release_global_cfg_lock(struct ice_hw *hw)
807 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
811 * ice_acquire_change_lock
812 * @hw: pointer to the HW structure
813 * @access: access type (read or write)
815 * This function will request ownership of the change lock.
818 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
820 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
822 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
823 ICE_CHANGE_LOCK_TIMEOUT);
827 * ice_release_change_lock
828 * @hw: pointer to the HW structure
830 * This function will release the change lock using the proper Admin Command.
832 void ice_release_change_lock(struct ice_hw *hw)
834 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
836 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
840 * ice_aq_download_pkg
841 * @hw: pointer to the hardware structure
842 * @pkg_buf: the package buffer to transfer
843 * @buf_size: the size of the package buffer
844 * @last_buf: last buffer indicator
845 * @error_offset: returns error offset
846 * @error_info: returns error information
847 * @cd: pointer to command details structure or NULL
849 * Download Package (0x0C40)
851 static enum ice_status
852 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
853 u16 buf_size, bool last_buf, u32 *error_offset,
854 u32 *error_info, struct ice_sq_cd *cd)
856 struct ice_aqc_download_pkg *cmd;
857 struct ice_aq_desc desc;
858 enum ice_status status;
860 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
867 cmd = &desc.params.download_pkg;
868 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
869 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
872 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
874 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
875 if (status == ICE_ERR_AQ_ERROR) {
876 /* Read error from buffer only when the FW returned an error */
877 struct ice_aqc_download_pkg_resp *resp;
879 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
881 *error_offset = LE32_TO_CPU(resp->error_offset);
883 *error_info = LE32_TO_CPU(resp->error_info);
890 * ice_aq_upload_section
891 * @hw: pointer to the hardware structure
892 * @pkg_buf: the package buffer which will receive the section
893 * @buf_size: the size of the package buffer
894 * @cd: pointer to command details structure or NULL
896 * Upload Section (0x0C41)
899 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
900 u16 buf_size, struct ice_sq_cd *cd)
902 struct ice_aq_desc desc;
904 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
905 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
906 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
908 return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
913 * @hw: pointer to the hardware structure
914 * @pkg_buf: the package cmd buffer
915 * @buf_size: the size of the package cmd buffer
916 * @last_buf: last buffer indicator
917 * @error_offset: returns error offset
918 * @error_info: returns error information
919 * @cd: pointer to command details structure or NULL
921 * Update Package (0x0C42)
923 static enum ice_status
924 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
925 bool last_buf, u32 *error_offset, u32 *error_info,
926 struct ice_sq_cd *cd)
928 struct ice_aqc_download_pkg *cmd;
929 struct ice_aq_desc desc;
930 enum ice_status status;
932 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
939 cmd = &desc.params.download_pkg;
940 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
941 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
944 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
946 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
947 if (status == ICE_ERR_AQ_ERROR) {
948 /* Read error from buffer only when the FW returned an error */
949 struct ice_aqc_download_pkg_resp *resp;
951 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
953 *error_offset = LE32_TO_CPU(resp->error_offset);
955 *error_info = LE32_TO_CPU(resp->error_info);
962 * ice_find_seg_in_pkg
963 * @hw: pointer to the hardware structure
964 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
965 * @pkg_hdr: pointer to the package header to be searched
967 * This function searches a package file for a particular segment type. On
968 * success it returns a pointer to the segment header, otherwise it will
971 static struct ice_generic_seg_hdr *
972 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
973 struct ice_pkg_hdr *pkg_hdr)
977 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
978 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
979 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
980 pkg_hdr->pkg_format_ver.update,
981 pkg_hdr->pkg_format_ver.draft);
983 /* Search all package segments for the requested segment type */
984 for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
985 struct ice_generic_seg_hdr *seg;
987 seg = (struct ice_generic_seg_hdr *)
988 ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
990 if (LE32_TO_CPU(seg->seg_type) == seg_type)
999 * @hw: pointer to the hardware structure
1000 * @bufs: pointer to an array of buffers
1001 * @count: the number of buffers in the array
1003 * Obtains change lock and updates package.
1006 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1008 enum ice_status status;
1009 u32 offset, info, i;
1011 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
1015 for (i = 0; i < count; i++) {
1016 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
1017 bool last = ((i + 1) == count);
1019 status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
1020 last, &offset, &info, NULL);
1023 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
1024 status, offset, info);
1029 ice_release_change_lock(hw);
1035 * ice_dwnld_cfg_bufs
1036 * @hw: pointer to the hardware structure
1037 * @bufs: pointer to an array of buffers
1038 * @count: the number of buffers in the array
1040 * Obtains global config lock and downloads the package configuration buffers
1041 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
1042 * found indicates that the rest of the buffers are all metadata buffers.
1044 static enum ice_status
1045 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1047 enum ice_status status;
1048 struct ice_buf_hdr *bh;
1049 u32 offset, info, i;
1051 if (!bufs || !count)
1052 return ICE_ERR_PARAM;
1054 /* If the first buffer's first section has its metadata bit set
1055 * then there are no buffers to be downloaded, and the operation is
1056 * considered a success.
1058 bh = (struct ice_buf_hdr *)bufs;
1059 if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
1062 /* reset pkg_dwnld_status in case this function is called in the
1063 * reset/rebuild flow
1065 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
1067 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
1069 if (status == ICE_ERR_AQ_NO_WORK)
1070 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
1072 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1076 for (i = 0; i < count; i++) {
1077 bool last = ((i + 1) == count);
1080 /* check next buffer for metadata flag */
1081 bh = (struct ice_buf_hdr *)(bufs + i + 1);
1083 /* A set metadata flag in the next buffer will signal
1084 * that the current buffer will be the last buffer
1087 if (LE16_TO_CPU(bh->section_count))
1088 if (LE32_TO_CPU(bh->section_entry[0].type) &
1093 bh = (struct ice_buf_hdr *)(bufs + i);
1095 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
1096 &offset, &info, NULL);
1098 /* Save AQ status from download package */
1099 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1101 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
1102 status, offset, info);
1111 status = ice_set_vlan_mode(hw);
1113 ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
1117 ice_release_global_cfg_lock(hw);
1123 * ice_aq_get_pkg_info_list
1124 * @hw: pointer to the hardware structure
1125 * @pkg_info: the buffer which will receive the information list
1126 * @buf_size: the size of the pkg_info information buffer
1127 * @cd: pointer to command details structure or NULL
1129 * Get Package Info List (0x0C43)
1131 static enum ice_status
1132 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1133 struct ice_aqc_get_pkg_info_resp *pkg_info,
1134 u16 buf_size, struct ice_sq_cd *cd)
1136 struct ice_aq_desc desc;
1138 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1139 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1141 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1146 * @hw: pointer to the hardware structure
1147 * @ice_seg: pointer to the segment of the package to be downloaded
1149 * Handles the download of a complete package.
1151 static enum ice_status
1152 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1154 struct ice_buf_table *ice_buf_tbl;
1155 enum ice_status status;
1157 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1158 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1159 ice_seg->hdr.seg_format_ver.major,
1160 ice_seg->hdr.seg_format_ver.minor,
1161 ice_seg->hdr.seg_format_ver.update,
1162 ice_seg->hdr.seg_format_ver.draft);
1164 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1165 LE32_TO_CPU(ice_seg->hdr.seg_type),
1166 LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1168 ice_buf_tbl = ice_find_buf_table(ice_seg);
1170 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1171 LE32_TO_CPU(ice_buf_tbl->buf_count));
1173 status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1174 LE32_TO_CPU(ice_buf_tbl->buf_count));
1176 ice_cache_vlan_mode(hw);
1183 * @hw: pointer to the hardware structure
1184 * @pkg_hdr: pointer to the driver's package hdr
1186 * Saves off the package details into the HW structure.
1188 static enum ice_status
1189 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1191 struct ice_generic_seg_hdr *seg_hdr;
1193 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1195 return ICE_ERR_PARAM;
1197 seg_hdr = (struct ice_generic_seg_hdr *)
1198 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1200 struct ice_meta_sect *meta;
1201 struct ice_pkg_enum state;
1203 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1205 /* Get package information from the Metadata Section */
1206 meta = (struct ice_meta_sect *)
1207 ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1210 ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1214 hw->pkg_ver = meta->ver;
1215 ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
1216 ICE_NONDMA_TO_NONDMA);
1218 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1219 meta->ver.major, meta->ver.minor, meta->ver.update,
1220 meta->ver.draft, meta->name);
1222 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1223 ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1224 sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
1226 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1227 seg_hdr->seg_format_ver.major,
1228 seg_hdr->seg_format_ver.minor,
1229 seg_hdr->seg_format_ver.update,
1230 seg_hdr->seg_format_ver.draft,
1233 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1242 * @hw: pointer to the hardware structure
1244 * Store details of the package currently loaded in HW into the HW structure.
1246 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1248 struct ice_aqc_get_pkg_info_resp *pkg_info;
1249 enum ice_status status;
1253 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1255 size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1256 pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1258 return ICE_ERR_NO_MEMORY;
1260 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1262 goto init_pkg_free_alloc;
1264 for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
1265 #define ICE_PKG_FLAG_COUNT 4
1266 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1269 if (pkg_info->pkg_info[i].is_active) {
1270 flags[place++] = 'A';
1271 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1272 hw->active_track_id =
1273 LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
1274 ice_memcpy(hw->active_pkg_name,
1275 pkg_info->pkg_info[i].name,
1276 sizeof(pkg_info->pkg_info[i].name),
1277 ICE_NONDMA_TO_NONDMA);
1278 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1280 if (pkg_info->pkg_info[i].is_active_at_boot)
1281 flags[place++] = 'B';
1282 if (pkg_info->pkg_info[i].is_modified)
1283 flags[place++] = 'M';
1284 if (pkg_info->pkg_info[i].is_in_nvm)
1285 flags[place++] = 'N';
1287 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1288 i, pkg_info->pkg_info[i].ver.major,
1289 pkg_info->pkg_info[i].ver.minor,
1290 pkg_info->pkg_info[i].ver.update,
1291 pkg_info->pkg_info[i].ver.draft,
1292 pkg_info->pkg_info[i].name, flags);
1295 init_pkg_free_alloc:
1296 ice_free(hw, pkg_info);
1302 * ice_verify_pkg - verify package
1303 * @pkg: pointer to the package buffer
1304 * @len: size of the package buffer
1306 * Verifies various attributes of the package file, including length, format
1307 * version, and the requirement of at least one segment.
1309 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1314 if (len < ice_struct_size(pkg, seg_offset, 1))
1315 return ICE_ERR_BUF_TOO_SHORT;
1317 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1318 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1319 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1320 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1323 /* pkg must have at least one segment */
1324 seg_count = LE32_TO_CPU(pkg->seg_count);
1328 /* make sure segment array fits in package length */
1329 if (len < ice_struct_size(pkg, seg_offset, seg_count))
1330 return ICE_ERR_BUF_TOO_SHORT;
1332 /* all segments must fit within length */
1333 for (i = 0; i < seg_count; i++) {
1334 u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1335 struct ice_generic_seg_hdr *seg;
1337 /* segment header must fit */
1338 if (len < off + sizeof(*seg))
1339 return ICE_ERR_BUF_TOO_SHORT;
1341 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1343 /* segment body must fit */
1344 if (len < off + LE32_TO_CPU(seg->seg_size))
1345 return ICE_ERR_BUF_TOO_SHORT;
1352 * ice_free_seg - free package segment pointer
1353 * @hw: pointer to the hardware structure
1355 * Frees the package segment pointer in the proper manner, depending on if the
1356 * segment was allocated or just the passed in pointer was stored.
1358 void ice_free_seg(struct ice_hw *hw)
1361 ice_free(hw, hw->pkg_copy);
1362 hw->pkg_copy = NULL;
1369 * ice_init_pkg_regs - initialize additional package registers
1370 * @hw: pointer to the hardware structure
1372 static void ice_init_pkg_regs(struct ice_hw *hw)
1374 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1375 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1376 #define ICE_SW_BLK_IDX 0
1377 if (hw->dcf_enabled)
1380 /* setup Switch block input mask, which is 48-bits in two parts */
1381 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1382 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1386 * ice_chk_pkg_version - check package version for compatibility with driver
1387 * @pkg_ver: pointer to a version structure to check
1389 * Check to make sure that the package about to be downloaded is compatible with
1390 * the driver. To be compatible, the major and minor components of the package
1391 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1394 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1396 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1397 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1398 return ICE_ERR_NOT_SUPPORTED;
1404 * ice_chk_pkg_compat
1405 * @hw: pointer to the hardware structure
1406 * @ospkg: pointer to the package hdr
1407 * @seg: pointer to the package segment hdr
1409 * This function checks the package version compatibility with driver and NVM
1411 static enum ice_status
1412 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1413 struct ice_seg **seg)
1415 struct ice_aqc_get_pkg_info_resp *pkg;
1416 enum ice_status status;
1420 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1422 /* Check package version compatibility */
1423 status = ice_chk_pkg_version(&hw->pkg_ver);
1425 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1429 /* find ICE segment in given package */
1430 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1433 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1437 /* Check if FW is compatible with the OS package */
1438 size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
1439 pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1441 return ICE_ERR_NO_MEMORY;
1443 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1445 goto fw_ddp_compat_free_alloc;
1447 for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
1448 /* loop till we find the NVM package */
1449 if (!pkg->pkg_info[i].is_in_nvm)
1451 if ((*seg)->hdr.seg_format_ver.major !=
1452 pkg->pkg_info[i].ver.major ||
1453 (*seg)->hdr.seg_format_ver.minor >
1454 pkg->pkg_info[i].ver.minor) {
1455 status = ICE_ERR_FW_DDP_MISMATCH;
1456 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1458 /* done processing NVM package so break */
1461 fw_ddp_compat_free_alloc:
1468 * @sect_type: section type
1469 * @section: pointer to section
1470 * @index: index of the field vector entry to be returned
1471 * @offset: ptr to variable that receives the offset in the field vector table
1473 * This is a callback function that can be passed to ice_pkg_enum_entry.
1474 * This function treats the given section as of type ice_sw_fv_section and
1475 * enumerates offset field. "offset" is an index into the field vector table.
1478 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1480 struct ice_sw_fv_section *fv_section =
1481 (struct ice_sw_fv_section *)section;
1483 if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1485 if (index >= LE16_TO_CPU(fv_section->count))
1488 /* "index" passed in to this function is relative to a given
1489 * 4k block. To get to the true index into the field vector
1490 * table need to add the relative index to the base_offset
1491 * field of this section
1493 *offset = LE16_TO_CPU(fv_section->base_offset) + index;
1494 return fv_section->fv + index;
1498 * ice_get_prof_index_max - get the max profile index for used profile
1499 * @hw: pointer to the HW struct
1501 * Calling this function will get the max profile index for used profile
1502 * and store the index number in struct ice_switch_info *switch_info
1503 * in hw for following use.
1505 static int ice_get_prof_index_max(struct ice_hw *hw)
1507 u16 prof_index = 0, j, max_prof_index = 0;
1508 struct ice_pkg_enum state;
1509 struct ice_seg *ice_seg;
1514 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1517 return ICE_ERR_PARAM;
1522 fv = (struct ice_fv *)
1523 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1524 &offset, ice_sw_fv_handler);
1529 /* in the profile that not be used, the prot_id is set to 0xff
1530 * and the off is set to 0x1ff for all the field vectors.
1532 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1533 if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1534 fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1536 if (flag && prof_index > max_prof_index)
1537 max_prof_index = prof_index;
1543 hw->switch_info->max_used_prof_index = max_prof_index;
1549 * ice_init_pkg - initialize/download package
1550 * @hw: pointer to the hardware structure
1551 * @buf: pointer to the package buffer
1552 * @len: size of the package buffer
1554 * This function initializes a package. The package contains HW tables
1555 * required to do packet processing. First, the function extracts package
1556 * information such as version. Then it finds the ice configuration segment
1557 * within the package; this function then saves a copy of the segment pointer
1558 * within the supplied package buffer. Next, the function will cache any hints
1559 * from the package, followed by downloading the package itself. Note, that if
1560 * a previous PF driver has already downloaded the package successfully, then
1561 * the current driver will not have to download the package again.
1563 * The local package contents will be used to query default behavior and to
1564 * update specific sections of the HW's version of the package (e.g. to update
1565 * the parse graph to understand new protocols).
1567 * This function stores a pointer to the package buffer memory, and it is
1568 * expected that the supplied buffer will not be freed immediately. If the
1569 * package buffer needs to be freed, such as when read from a file, use
1570 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1573 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1575 struct ice_pkg_hdr *pkg;
1576 enum ice_status status;
1577 struct ice_seg *seg;
1580 return ICE_ERR_PARAM;
1582 pkg = (struct ice_pkg_hdr *)buf;
1583 status = ice_verify_pkg(pkg, len);
1585 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1590 /* initialize package info */
1591 status = ice_init_pkg_info(hw, pkg);
1595 /* before downloading the package, check package version for
1596 * compatibility with driver
1598 status = ice_chk_pkg_compat(hw, pkg, &seg);
1602 /* initialize package hints and then download package */
1603 ice_init_pkg_hints(hw, seg);
1604 status = ice_download_pkg(hw, seg);
1605 if (status == ICE_ERR_AQ_NO_WORK) {
1606 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1607 status = ICE_SUCCESS;
1610 /* Get information on the package currently loaded in HW, then make sure
1611 * the driver is compatible with this version.
1614 status = ice_get_pkg_info(hw);
1616 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1621 /* on successful package download update other required
1622 * registers to support the package and fill HW tables
1623 * with package content.
1625 ice_init_pkg_regs(hw);
1626 ice_fill_blk_tbls(hw);
1627 ice_fill_hw_ptype(hw);
1628 ice_get_prof_index_max(hw);
1630 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1638 * ice_copy_and_init_pkg - initialize/download a copy of the package
1639 * @hw: pointer to the hardware structure
1640 * @buf: pointer to the package buffer
1641 * @len: size of the package buffer
1643 * This function copies the package buffer, and then calls ice_init_pkg() to
1644 * initialize the copied package contents.
1646 * The copying is necessary if the package buffer supplied is constant, or if
1647 * the memory may disappear shortly after calling this function.
1649 * If the package buffer resides in the data segment and can be modified, the
1650 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1652 * However, if the package buffer needs to be copied first, such as when being
1653 * read from a file, the caller should use ice_copy_and_init_pkg().
1655 * This function will first copy the package buffer, before calling
1656 * ice_init_pkg(). The caller is free to immediately destroy the original
1657 * package buffer, as the new copy will be managed by this function and
1660 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1662 enum ice_status status;
1666 return ICE_ERR_PARAM;
1668 buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1670 status = ice_init_pkg(hw, buf_copy, len);
1672 /* Free the copy, since we failed to initialize the package */
1673 ice_free(hw, buf_copy);
1675 /* Track the copied pkg so we can free it later */
1676 hw->pkg_copy = buf_copy;
1685 * @hw: pointer to the HW structure
1687 * Allocates a package buffer and returns a pointer to the buffer header.
1688 * Note: all package contents must be in Little Endian form.
1690 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1692 struct ice_buf_build *bld;
1693 struct ice_buf_hdr *buf;
1695 bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1699 buf = (struct ice_buf_hdr *)bld;
1700 buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
1706 * ice_get_sw_prof_type - determine switch profile type
1707 * @hw: pointer to the HW structure
1708 * @fv: pointer to the switch field vector
1710 static enum ice_prof_type
1711 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
1715 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1716 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1717 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1718 fv->ew[i].off == ICE_VNI_OFFSET)
1719 return ICE_PROF_TUN_UDP;
1721 /* GRE tunnel will have GRE protocol */
1722 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1723 return ICE_PROF_TUN_GRE;
1725 /* PPPOE tunnel will have PPPOE protocol */
1726 if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
1727 return ICE_PROF_TUN_PPPOE;
1730 return ICE_PROF_NON_TUN;
1734 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1735 * @hw: pointer to hardware structure
1736 * @req_profs: type of profiles requested
1737 * @bm: pointer to memory for returning the bitmap of field vectors
1740 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1743 struct ice_pkg_enum state;
1744 struct ice_seg *ice_seg;
1747 if (req_profs == ICE_PROF_ALL) {
1748 ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
1752 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1753 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1756 enum ice_prof_type prof_type;
1759 fv = (struct ice_fv *)
1760 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1761 &offset, ice_sw_fv_handler);
1765 /* Determine field vector type */
1766 prof_type = ice_get_sw_prof_type(hw, fv);
1768 if (req_profs & prof_type)
1769 ice_set_bit((u16)offset, bm);
1775 * ice_get_sw_fv_list
1776 * @hw: pointer to the HW structure
1777 * @prot_ids: field vector to search for with a given protocol ID
1778 * @ids_cnt: lookup/protocol count
1779 * @bm: bitmap of field vectors to consider
1780 * @fv_list: Head of a list
1782 * Finds all the field vector entries from switch block that contain
1783 * a given protocol ID and returns a list of structures of type
1784 * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1785 * definition and profile ID information
1786 * NOTE: The caller of the function is responsible for freeing the memory
1787 * allocated for every list entry.
1790 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
1791 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1793 struct ice_sw_fv_list_entry *fvl;
1794 struct ice_sw_fv_list_entry *tmp;
1795 struct ice_pkg_enum state;
1796 struct ice_seg *ice_seg;
1800 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1802 if (!ids_cnt || !hw->seg)
1803 return ICE_ERR_PARAM;
1809 fv = (struct ice_fv *)
1810 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1811 &offset, ice_sw_fv_handler);
1816 /* If field vector is not in the bitmap list, then skip this
1819 if (!ice_is_bit_set(bm, (u16)offset))
1822 for (i = 0; i < ids_cnt; i++) {
1825 /* This code assumes that if a switch field vector line
1826 * has a matching protocol, then this line will contain
1827 * the entries necessary to represent every field in
1828 * that protocol header.
1830 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1831 if (fv->ew[j].prot_id == prot_ids[i])
1833 if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1835 if (i + 1 == ids_cnt) {
1836 fvl = (struct ice_sw_fv_list_entry *)
1837 ice_malloc(hw, sizeof(*fvl));
1841 fvl->profile_id = offset;
1842 LIST_ADD(&fvl->list_entry, fv_list);
1847 if (LIST_EMPTY(fv_list))
1852 LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1854 LIST_DEL(&fvl->list_entry);
1858 return ICE_ERR_NO_MEMORY;
1862 * ice_init_prof_result_bm - Initialize the profile result index bitmap
1863 * @hw: pointer to hardware structure
1865 void ice_init_prof_result_bm(struct ice_hw *hw)
1867 struct ice_pkg_enum state;
1868 struct ice_seg *ice_seg;
1871 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1881 fv = (struct ice_fv *)
1882 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1883 &off, ice_sw_fv_handler);
1888 ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1891 /* Determine empty field vector indices, these can be
1892 * used for recipe results. Skip index 0, since it is
1893 * always used for Switch ID.
1895 for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1896 if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1897 fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1899 hw->switch_info->prof_res_bm[off]);
1905 * @hw: pointer to the HW structure
1906 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1908 * Frees a package buffer
1910 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1916 * ice_pkg_buf_reserve_section
1917 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1918 * @count: the number of sections to reserve
1920 * Reserves one or more section table entries in a package buffer. This routine
1921 * can be called multiple times as long as they are made before calling
1922 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1923 * is called once, the number of sections that can be allocated will not be able
1924 * to be increased; not using all reserved sections is fine, but this will
1925 * result in some wasted space in the buffer.
1926 * Note: all package contents must be in Little Endian form.
1928 static enum ice_status
1929 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1931 struct ice_buf_hdr *buf;
1936 return ICE_ERR_PARAM;
1938 buf = (struct ice_buf_hdr *)&bld->buf;
1940 /* already an active section, can't increase table size */
1941 section_count = LE16_TO_CPU(buf->section_count);
1942 if (section_count > 0)
1945 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1947 bld->reserved_section_table_entries += count;
1949 data_end = LE16_TO_CPU(buf->data_end) +
1950 FLEX_ARRAY_SIZE(buf, section_entry, count);
1951 buf->data_end = CPU_TO_LE16(data_end);
1957 * ice_pkg_buf_alloc_section
1958 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1959 * @type: the section type value
1960 * @size: the size of the section to reserve (in bytes)
1962 * Reserves memory in the buffer for a section's content and updates the
1963 * buffers' status accordingly. This routine returns a pointer to the first
1964 * byte of the section start within the buffer, which is used to fill in the
1966 * Note: all package contents must be in Little Endian form.
1969 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1971 struct ice_buf_hdr *buf;
1975 if (!bld || !type || !size)
1978 buf = (struct ice_buf_hdr *)&bld->buf;
1980 /* check for enough space left in buffer */
1981 data_end = LE16_TO_CPU(buf->data_end);
1983 /* section start must align on 4 byte boundary */
1984 data_end = ICE_ALIGN(data_end, 4);
1986 if ((data_end + size) > ICE_MAX_S_DATA_END)
1989 /* check for more available section table entries */
1990 sect_count = LE16_TO_CPU(buf->section_count);
1991 if (sect_count < bld->reserved_section_table_entries) {
1992 void *section_ptr = ((u8 *)buf) + data_end;
1994 buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
1995 buf->section_entry[sect_count].size = CPU_TO_LE16(size);
1996 buf->section_entry[sect_count].type = CPU_TO_LE32(type);
1999 buf->data_end = CPU_TO_LE16(data_end);
2001 buf->section_count = CPU_TO_LE16(sect_count + 1);
2005 /* no free section table entries */
2010 * ice_pkg_buf_alloc_single_section
2011 * @hw: pointer to the HW structure
2012 * @type: the section type value
2013 * @size: the size of the section to reserve (in bytes)
2014 * @section: returns pointer to the section
2016 * Allocates a package buffer with a single section.
2017 * Note: all package contents must be in Little Endian form.
2019 struct ice_buf_build *
2020 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
2023 struct ice_buf_build *buf;
2028 buf = ice_pkg_buf_alloc(hw);
2032 if (ice_pkg_buf_reserve_section(buf, 1))
2033 goto ice_pkg_buf_alloc_single_section_err;
2035 *section = ice_pkg_buf_alloc_section(buf, type, size);
2037 goto ice_pkg_buf_alloc_single_section_err;
2041 ice_pkg_buf_alloc_single_section_err:
2042 ice_pkg_buf_free(hw, buf);
2047 * ice_pkg_buf_get_active_sections
2048 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2050 * Returns the number of active sections. Before using the package buffer
2051 * in an update package command, the caller should make sure that there is at
2052 * least one active section - otherwise, the buffer is not legal and should
2054 * Note: all package contents must be in Little Endian form.
2056 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
2058 struct ice_buf_hdr *buf;
2063 buf = (struct ice_buf_hdr *)&bld->buf;
2064 return LE16_TO_CPU(buf->section_count);
2069 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2071 * Return a pointer to the buffer's header
2073 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
2082 * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
2083 * @hw: pointer to the HW structure
2084 * @port: port to search for
2085 * @index: optionally returns index
2087 * Returns whether a port is already in use as a tunnel, and optionally its
2090 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
2094 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2095 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2105 * ice_tunnel_port_in_use
2106 * @hw: pointer to the HW structure
2107 * @port: port to search for
2108 * @index: optionally returns index
2110 * Returns whether a port is already in use as a tunnel, and optionally its
2113 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
2117 ice_acquire_lock(&hw->tnl_lock);
2118 res = ice_tunnel_port_in_use_hlpr(hw, port, index);
2119 ice_release_lock(&hw->tnl_lock);
2125 * ice_tunnel_get_type
2126 * @hw: pointer to the HW structure
2127 * @port: port to search for
2128 * @type: returns tunnel index
2130 * For a given port number, will return the type of tunnel.
2133 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
2138 ice_acquire_lock(&hw->tnl_lock);
2140 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2141 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2142 *type = hw->tnl.tbl[i].type;
2147 ice_release_lock(&hw->tnl_lock);
2153 * ice_find_free_tunnel_entry
2154 * @hw: pointer to the HW structure
2155 * @type: tunnel type
2156 * @index: optionally returns index
2158 * Returns whether there is a free tunnel entry, and optionally its index
2161 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
2166 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2167 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
2168 hw->tnl.tbl[i].type == type) {
2178 * ice_get_open_tunnel_port - retrieve an open tunnel port
2179 * @hw: pointer to the HW structure
2180 * @type: tunnel type (TNL_ALL will return any open port)
2181 * @port: returns open port
2184 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
2190 ice_acquire_lock(&hw->tnl_lock);
2192 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2193 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2194 (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
2195 *port = hw->tnl.tbl[i].port;
2200 ice_release_lock(&hw->tnl_lock);
2207 * @hw: pointer to the HW structure
2208 * @type: type of tunnel
2209 * @port: port of tunnel to create
2211 * Create a tunnel by updating the parse graph in the parser. We do that by
2212 * creating a package buffer with the tunnel info and issuing an update package
2216 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
2218 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2219 enum ice_status status = ICE_ERR_MAX_LIMIT;
2220 struct ice_buf_build *bld;
2223 ice_acquire_lock(&hw->tnl_lock);
2225 if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
2226 hw->tnl.tbl[index].ref++;
2227 status = ICE_SUCCESS;
2228 goto ice_create_tunnel_end;
2231 if (!ice_find_free_tunnel_entry(hw, type, &index)) {
2232 status = ICE_ERR_OUT_OF_RANGE;
2233 goto ice_create_tunnel_end;
2236 bld = ice_pkg_buf_alloc(hw);
2238 status = ICE_ERR_NO_MEMORY;
2239 goto ice_create_tunnel_end;
2242 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2243 if (ice_pkg_buf_reserve_section(bld, 2))
2244 goto ice_create_tunnel_err;
2246 sect_rx = (struct ice_boost_tcam_section *)
2247 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2248 ice_struct_size(sect_rx, tcam, 1));
2250 goto ice_create_tunnel_err;
2251 sect_rx->count = CPU_TO_LE16(1);
2253 sect_tx = (struct ice_boost_tcam_section *)
2254 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2255 ice_struct_size(sect_tx, tcam, 1));
2257 goto ice_create_tunnel_err;
2258 sect_tx->count = CPU_TO_LE16(1);
2260 /* copy original boost entry to update package buffer */
2261 ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2262 sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
2264 /* over-write the never-match dest port key bits with the encoded port
2267 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2268 (u8 *)&port, NULL, NULL, NULL,
2269 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
2270 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
2272 /* exact copy of entry to Tx section entry */
2273 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
2274 ICE_NONDMA_TO_NONDMA);
2276 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2278 hw->tnl.tbl[index].port = port;
2279 hw->tnl.tbl[index].in_use = true;
2280 hw->tnl.tbl[index].ref = 1;
2283 ice_create_tunnel_err:
2284 ice_pkg_buf_free(hw, bld);
2286 ice_create_tunnel_end:
2287 ice_release_lock(&hw->tnl_lock);
2293 * ice_destroy_tunnel
2294 * @hw: pointer to the HW structure
2295 * @port: port of tunnel to destroy (ignored if the all parameter is true)
2296 * @all: flag that states to destroy all tunnels
2298 * Destroys a tunnel or all tunnels by creating an update package buffer
2299 * targeting the specific updates requested and then performing an update
2302 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
2304 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2305 enum ice_status status = ICE_ERR_MAX_LIMIT;
2306 struct ice_buf_build *bld;
2312 ice_acquire_lock(&hw->tnl_lock);
2314 if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
2315 if (hw->tnl.tbl[index].ref > 1) {
2316 hw->tnl.tbl[index].ref--;
2317 status = ICE_SUCCESS;
2318 goto ice_destroy_tunnel_end;
2321 /* determine count */
2322 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2323 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2324 (all || hw->tnl.tbl[i].port == port))
2328 status = ICE_ERR_PARAM;
2329 goto ice_destroy_tunnel_end;
2332 /* size of section - there is at least one entry */
2333 size = ice_struct_size(sect_rx, tcam, count);
2335 bld = ice_pkg_buf_alloc(hw);
2337 status = ICE_ERR_NO_MEMORY;
2338 goto ice_destroy_tunnel_end;
2341 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2342 if (ice_pkg_buf_reserve_section(bld, 2))
2343 goto ice_destroy_tunnel_err;
2345 sect_rx = (struct ice_boost_tcam_section *)
2346 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2349 goto ice_destroy_tunnel_err;
2350 sect_rx->count = CPU_TO_LE16(count);
2352 sect_tx = (struct ice_boost_tcam_section *)
2353 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2356 goto ice_destroy_tunnel_err;
2357 sect_tx->count = CPU_TO_LE16(count);
2359 /* copy original boost entry to update package buffer, one copy to Rx
2360 * section, another copy to the Tx section
2362 for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2363 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2364 (all || hw->tnl.tbl[i].port == port)) {
2365 ice_memcpy(sect_rx->tcam + j,
2366 hw->tnl.tbl[i].boost_entry,
2367 sizeof(*sect_rx->tcam),
2368 ICE_NONDMA_TO_NONDMA);
2369 ice_memcpy(sect_tx->tcam + j,
2370 hw->tnl.tbl[i].boost_entry,
2371 sizeof(*sect_tx->tcam),
2372 ICE_NONDMA_TO_NONDMA);
2373 hw->tnl.tbl[i].marked = true;
2377 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2379 for (i = 0; i < hw->tnl.count &&
2380 i < ICE_TUNNEL_MAX_ENTRIES; i++)
2381 if (hw->tnl.tbl[i].marked) {
2382 hw->tnl.tbl[i].ref = 0;
2383 hw->tnl.tbl[i].port = 0;
2384 hw->tnl.tbl[i].in_use = false;
2385 hw->tnl.tbl[i].marked = false;
2388 ice_destroy_tunnel_err:
2389 ice_pkg_buf_free(hw, bld);
2391 ice_destroy_tunnel_end:
2392 ice_release_lock(&hw->tnl_lock);
2398 * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
2399 * @hw: pointer to the hardware structure
2400 * @blk: hardware block
2402 * @fv_idx: field vector word index
2403 * @prot: variable to receive the protocol ID
2404 * @off: variable to receive the protocol offset
2407 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
2410 struct ice_fv_word *fv_ext;
2412 if (prof >= hw->blk[blk].es.count)
2413 return ICE_ERR_PARAM;
2415 if (fv_idx >= hw->blk[blk].es.fvw)
2416 return ICE_ERR_PARAM;
2418 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2420 *prot = fv_ext[fv_idx].prot_id;
2421 *off = fv_ext[fv_idx].off;
2426 /* PTG Management */
2429 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
2430 * @hw: pointer to the hardware structure
2432 * @ptype: the ptype to search for
2433 * @ptg: pointer to variable that receives the PTG
2435 * This function will search the PTGs for a particular ptype, returning the
2436 * PTG ID that contains it through the PTG parameter, with the value of
2437 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
2439 static enum ice_status
2440 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2442 if (ptype >= ICE_XLT1_CNT || !ptg)
2443 return ICE_ERR_PARAM;
2445 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2450 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
2451 * @hw: pointer to the hardware structure
2453 * @ptg: the PTG to allocate
2455 * This function allocates a given packet type group ID specified by the PTG
2458 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2460 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2464 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
2465 * @hw: pointer to the hardware structure
2467 * @ptype: the ptype to remove
2468 * @ptg: the PTG to remove the ptype from
2470 * This function will remove the ptype from the specific PTG, and move it to
2471 * the default PTG (ICE_DEFAULT_PTG).
2473 static enum ice_status
2474 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2476 struct ice_ptg_ptype **ch;
2477 struct ice_ptg_ptype *p;
2479 if (ptype > ICE_XLT1_CNT - 1)
2480 return ICE_ERR_PARAM;
2482 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2483 return ICE_ERR_DOES_NOT_EXIST;
2485 /* Should not happen if .in_use is set, bad config */
2486 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2489 /* find the ptype within this PTG, and bypass the link over it */
2490 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2491 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2493 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2494 *ch = p->next_ptype;
2498 ch = &p->next_ptype;
2502 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2503 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2509 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
2510 * @hw: pointer to the hardware structure
2512 * @ptype: the ptype to add or move
2513 * @ptg: the PTG to add or move the ptype to
2515 * This function will either add or move a ptype to a particular PTG depending
2516 * on if the ptype is already part of another group. Note that using a
2517 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
2520 static enum ice_status
2521 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2523 enum ice_status status;
2526 if (ptype > ICE_XLT1_CNT - 1)
2527 return ICE_ERR_PARAM;
2529 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2530 return ICE_ERR_DOES_NOT_EXIST;
2532 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2536 /* Is ptype already in the correct PTG? */
2537 if (original_ptg == ptg)
2540 /* Remove from original PTG and move back to the default PTG */
2541 if (original_ptg != ICE_DEFAULT_PTG)
2542 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2544 /* Moving to default PTG? Then we're done with this request */
2545 if (ptg == ICE_DEFAULT_PTG)
2548 /* Add ptype to PTG at beginning of list */
2549 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2550 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2551 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2552 &hw->blk[blk].xlt1.ptypes[ptype];
2554 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2555 hw->blk[blk].xlt1.t[ptype] = ptg;
2560 /* Block / table size info */
2561 struct ice_blk_size_details {
2562 u16 xlt1; /* # XLT1 entries */
2563 u16 xlt2; /* # XLT2 entries */
2564 u16 prof_tcam; /* # profile ID TCAM entries */
2565 u16 prof_id; /* # profile IDs */
2566 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
2567 u16 prof_redir; /* # profile redirection entries */
2568 u16 es; /* # extraction sequence entries */
2569 u16 fvw; /* # field vector words */
2570 u8 overwrite; /* overwrite existing entries allowed */
2571 u8 reverse; /* reverse FV order */
2574 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2577 * XLT1 - Number of entries in XLT1 table
2578 * XLT2 - Number of entries in XLT2 table
2579 * TCAM - Number of entries Profile ID TCAM table
2580 * CDID - Control Domain ID of the hardware block
2581 * PRED - Number of entries in the Profile Redirection Table
2582 * FV - Number of entries in the Field Vector
2583 * FVW - Width (in WORDs) of the Field Vector
2584 * OVR - Overwrite existing table entries
2587 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
2588 /* Overwrite , Reverse FV */
2589 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2591 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2593 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2595 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2597 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2602 ICE_SID_XLT1_OFF = 0,
2605 ICE_SID_PR_REDIR_OFF,
2610 /* Characteristic handling */
2613 * ice_match_prop_lst - determine if properties of two lists match
2614 * @list1: first properties list
2615 * @list2: second properties list
2617 * Count, cookies and the order must match in order to be considered equivalent.
2620 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
2622 struct ice_vsig_prof *tmp1;
2623 struct ice_vsig_prof *tmp2;
2627 /* compare counts */
2628 LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list)
2630 LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
2632 if (!count || count != chk_count)
2635 tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
2636 tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
2638 /* profile cookies must compare, and in the exact same order to take
2639 * into account priority
2642 if (tmp2->profile_cookie != tmp1->profile_cookie)
2645 tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
2646 tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
2652 /* VSIG Management */
2655 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2656 * @hw: pointer to the hardware structure
2658 * @vsi: VSI of interest
2659 * @vsig: pointer to receive the VSI group
2661 * This function will lookup the VSI entry in the XLT2 list and return
2662 * the VSI group its associated with.
2665 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2667 if (!vsig || vsi >= ICE_MAX_VSI)
2668 return ICE_ERR_PARAM;
2670 /* As long as there's a default or valid VSIG associated with the input
2671 * VSI, the functions returns a success. Any handling of VSIG will be
2672 * done by the following add, update or remove functions.
2674 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2680 * ice_vsig_alloc_val - allocate a new VSIG by value
2681 * @hw: pointer to the hardware structure
2683 * @vsig: the VSIG to allocate
2685 * This function will allocate a given VSIG specified by the VSIG parameter.
2687 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2689 u16 idx = vsig & ICE_VSIG_IDX_M;
2691 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2692 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2693 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2696 return ICE_VSIG_VALUE(idx, hw->pf_id);
2700 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2701 * @hw: pointer to the hardware structure
2704 * This function will iterate through the VSIG list and mark the first
2705 * unused entry for the new VSIG entry as used and return that value.
2707 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2711 for (i = 1; i < ICE_MAX_VSIGS; i++)
2712 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2713 return ice_vsig_alloc_val(hw, blk, i);
2715 return ICE_DEFAULT_VSIG;
2719 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2720 * @hw: pointer to the hardware structure
2722 * @chs: characteristic list
2723 * @vsig: returns the VSIG with the matching profiles, if found
2725 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2726 * a group have the same characteristic set. To check if there exists a VSIG
2727 * which has the same characteristics as the input characteristics; this
2728 * function will iterate through the XLT2 list and return the VSIG that has a
2729 * matching configuration. In order to make sure that priorities are accounted
2730 * for, the list must match exactly, including the order in which the
2731 * characteristics are listed.
2733 static enum ice_status
2734 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2735 struct LIST_HEAD_TYPE *chs, u16 *vsig)
2737 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2740 for (i = 0; i < xlt2->count; i++)
2741 if (xlt2->vsig_tbl[i].in_use &&
2742 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2743 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2747 return ICE_ERR_DOES_NOT_EXIST;
2751 * ice_vsig_free - free VSI group
2752 * @hw: pointer to the hardware structure
2754 * @vsig: VSIG to remove
2756 * The function will remove all VSIs associated with the input VSIG and move
2757 * them to the DEFAULT_VSIG and mark the VSIG available.
2759 static enum ice_status
2760 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2762 struct ice_vsig_prof *dtmp, *del;
2763 struct ice_vsig_vsi *vsi_cur;
2766 idx = vsig & ICE_VSIG_IDX_M;
2767 if (idx >= ICE_MAX_VSIGS)
2768 return ICE_ERR_PARAM;
2770 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2771 return ICE_ERR_DOES_NOT_EXIST;
2773 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2775 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2776 /* If the VSIG has at least 1 VSI then iterate through the
2777 * list and remove the VSIs before deleting the group.
2780 /* remove all vsis associated with this VSIG XLT2 entry */
2782 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2784 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2785 vsi_cur->changed = 1;
2786 vsi_cur->next_vsi = NULL;
2790 /* NULL terminate head of VSI list */
2791 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2794 /* free characteristic list */
2795 LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
2796 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2797 ice_vsig_prof, list) {
2798 LIST_DEL(&del->list);
2802 /* if VSIG characteristic list was cleared for reset
2803 * re-initialize the list head
2805 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2811 * ice_vsig_remove_vsi - remove VSI from VSIG
2812 * @hw: pointer to the hardware structure
2814 * @vsi: VSI to remove
2815 * @vsig: VSI group to remove from
2817 * The function will remove the input VSI from its VSI group and move it
2818 * to the DEFAULT_VSIG.
2820 static enum ice_status
2821 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2823 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2826 idx = vsig & ICE_VSIG_IDX_M;
2828 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2829 return ICE_ERR_PARAM;
2831 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2832 return ICE_ERR_DOES_NOT_EXIST;
2834 /* entry already in default VSIG, don't have to remove */
2835 if (idx == ICE_DEFAULT_VSIG)
2838 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2842 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2843 vsi_cur = (*vsi_head);
2845 /* iterate the VSI list, skip over the entry to be removed */
2847 if (vsi_tgt == vsi_cur) {
2848 (*vsi_head) = vsi_cur->next_vsi;
2851 vsi_head = &vsi_cur->next_vsi;
2852 vsi_cur = vsi_cur->next_vsi;
2855 /* verify if VSI was removed from group list */
2857 return ICE_ERR_DOES_NOT_EXIST;
2859 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2860 vsi_cur->changed = 1;
2861 vsi_cur->next_vsi = NULL;
2867 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2868 * @hw: pointer to the hardware structure
2871 * @vsig: destination VSI group
2873 * This function will move or add the input VSI to the target VSIG.
2874 * The function will find the original VSIG the VSI belongs to and
2875 * move the entry to the DEFAULT_VSIG, update the original VSIG and
2876 * then move entry to the new VSIG.
2878 static enum ice_status
2879 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2881 struct ice_vsig_vsi *tmp;
2882 enum ice_status status;
2885 idx = vsig & ICE_VSIG_IDX_M;
2887 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2888 return ICE_ERR_PARAM;
2890 /* if VSIG not in use and VSIG is not default type this VSIG
2893 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2894 vsig != ICE_DEFAULT_VSIG)
2895 return ICE_ERR_DOES_NOT_EXIST;
2897 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2901 /* no update required if vsigs match */
2902 if (orig_vsig == vsig)
2905 if (orig_vsig != ICE_DEFAULT_VSIG) {
2906 /* remove entry from orig_vsig and add to default VSIG */
2907 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2912 if (idx == ICE_DEFAULT_VSIG)
2915 /* Create VSI entry and add VSIG and prop_mask values */
2916 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2917 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2919 /* Add new entry to the head of the VSIG list */
2920 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2921 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2922 &hw->blk[blk].xlt2.vsis[vsi];
2923 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2924 hw->blk[blk].xlt2.t[vsi] = vsig;
2930 * ice_prof_has_mask_idx - determine if profile index masking is identical
2931 * @hw: pointer to the hardware structure
2933 * @prof: profile to check
2934 * @idx: profile index to check
2935 * @mask: mask to match
2938 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
2941 bool expect_no_mask = false;
2946 /* If mask is 0x0000 or 0xffff, then there is no masking */
2947 if (mask == 0 || mask == 0xffff)
2948 expect_no_mask = true;
2950 /* Scan the enabled masks on this profile, for the specified idx */
2951 for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
2952 hw->blk[blk].masks.count; i++)
2953 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
2954 if (hw->blk[blk].masks.masks[i].in_use &&
2955 hw->blk[blk].masks.masks[i].idx == idx) {
2957 if (hw->blk[blk].masks.masks[i].mask == mask)
2962 if (expect_no_mask) {
2974 * ice_prof_has_mask - determine if profile masking is identical
2975 * @hw: pointer to the hardware structure
2977 * @prof: profile to check
2978 * @masks: masks to match
2981 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
2985 /* es->mask_ena[prof] will have the mask */
2986 for (i = 0; i < hw->blk[blk].es.fvw; i++)
2987 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
2994 * ice_find_prof_id_with_mask - find profile ID for a given field vector
2995 * @hw: pointer to the hardware structure
2997 * @fv: field vector to search for
2998 * @masks: masks for fv
2999 * @prof_id: receives the profile ID
3001 static enum ice_status
3002 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
3003 struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
3005 struct ice_es *es = &hw->blk[blk].es;
3008 /* For FD and RSS, we don't want to re-use an existed profile with the
3009 * same field vector and mask. This will cause rule interference.
3011 if (blk == ICE_BLK_FD || blk == ICE_BLK_RSS)
3012 return ICE_ERR_DOES_NOT_EXIST;
3014 for (i = 0; i < (u8)es->count; i++) {
3015 u16 off = i * es->fvw;
3017 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
3020 /* check if masks settings are the same for this profile */
3021 if (masks && !ice_prof_has_mask(hw, blk, i, masks))
3028 return ICE_ERR_DOES_NOT_EXIST;
3032 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
3033 * @blk: the block type
3034 * @rsrc_type: pointer to variable to receive the resource type
3036 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3040 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
3043 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
3046 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
3049 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
3052 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
3061 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
3062 * @blk: the block type
3063 * @rsrc_type: pointer to variable to receive the resource type
3065 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3069 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
3072 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
3075 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
3078 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
3081 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
3090 * ice_alloc_tcam_ent - allocate hardware TCAM entry
3091 * @hw: pointer to the HW struct
3092 * @blk: the block to allocate the TCAM for
3093 * @btm: true to allocate from bottom of table, false to allocate from top
3094 * @tcam_idx: pointer to variable to receive the TCAM entry
3096 * This function allocates a new entry in a Profile ID TCAM for a specific
3099 static enum ice_status
3100 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
3105 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3106 return ICE_ERR_PARAM;
3108 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
3112 * ice_free_tcam_ent - free hardware TCAM entry
3113 * @hw: pointer to the HW struct
3114 * @blk: the block from which to free the TCAM entry
3115 * @tcam_idx: the TCAM entry to free
3117 * This function frees an entry in a Profile ID TCAM for a specific block.
3119 static enum ice_status
3120 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
3124 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3125 return ICE_ERR_PARAM;
3127 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
3131 * ice_alloc_prof_id - allocate profile ID
3132 * @hw: pointer to the HW struct
3133 * @blk: the block to allocate the profile ID for
3134 * @prof_id: pointer to variable to receive the profile ID
3136 * This function allocates a new profile ID, which also corresponds to a Field
3137 * Vector (Extraction Sequence) entry.
3139 static enum ice_status
3140 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
3142 enum ice_status status;
3146 if (!ice_prof_id_rsrc_type(blk, &res_type))
3147 return ICE_ERR_PARAM;
3149 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
3151 *prof_id = (u8)get_prof;
3157 * ice_free_prof_id - free profile ID
3158 * @hw: pointer to the HW struct
3159 * @blk: the block from which to free the profile ID
3160 * @prof_id: the profile ID to free
3162 * This function frees a profile ID, which also corresponds to a Field Vector.
3164 static enum ice_status
3165 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3167 u16 tmp_prof_id = (u16)prof_id;
3170 if (!ice_prof_id_rsrc_type(blk, &res_type))
3171 return ICE_ERR_PARAM;
3173 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
3177 * ice_prof_inc_ref - increment reference count for profile
3178 * @hw: pointer to the HW struct
3179 * @blk: the block from which to free the profile ID
3180 * @prof_id: the profile ID for which to increment the reference count
3182 static enum ice_status
3183 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3185 if (prof_id > hw->blk[blk].es.count)
3186 return ICE_ERR_PARAM;
3188 hw->blk[blk].es.ref_count[prof_id]++;
3194 * ice_write_prof_mask_reg - write profile mask register
3195 * @hw: pointer to the HW struct
3196 * @blk: hardware block
3197 * @mask_idx: mask index
3198 * @idx: index of the FV which will use the mask
3199 * @mask: the 16-bit mask
3202 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
3210 offset = GLQF_HMASK(mask_idx);
3211 val = (idx << GLQF_HMASK_MSK_INDEX_S) &
3212 GLQF_HMASK_MSK_INDEX_M;
3213 val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
3216 offset = GLQF_FDMASK(mask_idx);
3217 val = (idx << GLQF_FDMASK_MSK_INDEX_S) &
3218 GLQF_FDMASK_MSK_INDEX_M;
3219 val |= (mask << GLQF_FDMASK_MASK_S) &
3223 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
3228 wr32(hw, offset, val);
3229 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
3230 blk, idx, offset, val);
3234 * ice_write_prof_mask_enable_res - write profile mask enable register
3235 * @hw: pointer to the HW struct
3236 * @blk: hardware block
3237 * @prof_id: profile ID
3238 * @enable_mask: enable mask
3241 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
3242 u16 prof_id, u32 enable_mask)
3248 offset = GLQF_HMASK_SEL(prof_id);
3251 offset = GLQF_FDMASK_SEL(prof_id);
3254 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
3259 wr32(hw, offset, enable_mask);
3260 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
3261 blk, prof_id, offset, enable_mask);
3265 * ice_init_prof_masks - initial prof masks
3266 * @hw: pointer to the HW struct
3267 * @blk: hardware block
3269 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
3274 ice_init_lock(&hw->blk[blk].masks.lock);
3276 per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
3278 hw->blk[blk].masks.count = per_pf;
3279 hw->blk[blk].masks.first = hw->pf_id * per_pf;
3281 ice_memset(hw->blk[blk].masks.masks, 0,
3282 sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM);
3284 for (i = hw->blk[blk].masks.first;
3285 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3286 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3290 * ice_init_all_prof_masks - initial all prof masks
3291 * @hw: pointer to the HW struct
3293 void ice_init_all_prof_masks(struct ice_hw *hw)
3295 ice_init_prof_masks(hw, ICE_BLK_RSS);
3296 ice_init_prof_masks(hw, ICE_BLK_FD);
3300 * ice_alloc_prof_mask - allocate profile mask
3301 * @hw: pointer to the HW struct
3302 * @blk: hardware block
3303 * @idx: index of FV which will use the mask
3304 * @mask: the 16-bit mask
3305 * @mask_idx: variable to receive the mask index
3307 static enum ice_status
3308 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
3311 bool found_unused = false, found_copy = false;
3312 enum ice_status status = ICE_ERR_MAX_LIMIT;
3313 u16 unused_idx = 0, copy_idx = 0;
3316 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3317 return ICE_ERR_PARAM;
3319 ice_acquire_lock(&hw->blk[blk].masks.lock);
3321 for (i = hw->blk[blk].masks.first;
3322 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3323 if (hw->blk[blk].masks.masks[i].in_use) {
3324 /* if mask is in use and it exactly duplicates the
3325 * desired mask and index, then in can be reused
3327 if (hw->blk[blk].masks.masks[i].mask == mask &&
3328 hw->blk[blk].masks.masks[i].idx == idx) {
3334 /* save off unused index, but keep searching in case
3335 * there is an exact match later on
3337 if (!found_unused) {
3338 found_unused = true;
3345 else if (found_unused)
3348 goto err_ice_alloc_prof_mask;
3350 /* update mask for a new entry */
3352 hw->blk[blk].masks.masks[i].in_use = true;
3353 hw->blk[blk].masks.masks[i].mask = mask;
3354 hw->blk[blk].masks.masks[i].idx = idx;
3355 hw->blk[blk].masks.masks[i].ref = 0;
3356 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
3359 hw->blk[blk].masks.masks[i].ref++;
3361 status = ICE_SUCCESS;
3363 err_ice_alloc_prof_mask:
3364 ice_release_lock(&hw->blk[blk].masks.lock);
3370 * ice_free_prof_mask - free profile mask
3371 * @hw: pointer to the HW struct
3372 * @blk: hardware block
3373 * @mask_idx: index of mask
3375 static enum ice_status
3376 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
3378 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3379 return ICE_ERR_PARAM;
3381 if (!(mask_idx >= hw->blk[blk].masks.first &&
3382 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
3383 return ICE_ERR_DOES_NOT_EXIST;
3385 ice_acquire_lock(&hw->blk[blk].masks.lock);
3387 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
3388 goto exit_ice_free_prof_mask;
3390 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
3391 hw->blk[blk].masks.masks[mask_idx].ref--;
3392 goto exit_ice_free_prof_mask;
3396 hw->blk[blk].masks.masks[mask_idx].in_use = false;
3397 hw->blk[blk].masks.masks[mask_idx].mask = 0;
3398 hw->blk[blk].masks.masks[mask_idx].idx = 0;
3400 /* update mask as unused entry */
3401 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
3403 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
3405 exit_ice_free_prof_mask:
3406 ice_release_lock(&hw->blk[blk].masks.lock);
3412 * ice_free_prof_masks - free all profile masks for a profile
3413 * @hw: pointer to the HW struct
3414 * @blk: hardware block
3415 * @prof_id: profile ID
3417 static enum ice_status
3418 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
3423 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3424 return ICE_ERR_PARAM;
3426 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
3427 for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
3428 if (mask_bm & BIT(i))
3429 ice_free_prof_mask(hw, blk, i);
3435 * ice_shutdown_prof_masks - releases lock for masking
3436 * @hw: pointer to the HW struct
3437 * @blk: hardware block
3439 * This should be called before unloading the driver
3441 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
3445 ice_acquire_lock(&hw->blk[blk].masks.lock);
3447 for (i = hw->blk[blk].masks.first;
3448 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
3449 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3451 hw->blk[blk].masks.masks[i].in_use = false;
3452 hw->blk[blk].masks.masks[i].idx = 0;
3453 hw->blk[blk].masks.masks[i].mask = 0;
3456 ice_release_lock(&hw->blk[blk].masks.lock);
3457 ice_destroy_lock(&hw->blk[blk].masks.lock);
3461 * ice_shutdown_all_prof_masks - releases all locks for masking
3462 * @hw: pointer to the HW struct
3464 * This should be called before unloading the driver
3466 void ice_shutdown_all_prof_masks(struct ice_hw *hw)
3468 ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
3469 ice_shutdown_prof_masks(hw, ICE_BLK_FD);
3473 * ice_update_prof_masking - set registers according to masking
3474 * @hw: pointer to the HW struct
3475 * @blk: hardware block
3476 * @prof_id: profile ID
3479 static enum ice_status
3480 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
3488 /* Only support FD and RSS masking, otherwise nothing to be done */
3489 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3492 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3493 if (masks[i] && masks[i] != 0xFFFF) {
3494 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
3495 ena_mask |= BIT(idx);
3497 /* not enough bitmaps */
3504 /* free any bitmaps we have allocated */
3505 for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
3506 if (ena_mask & BIT(i))
3507 ice_free_prof_mask(hw, blk, i);
3509 return ICE_ERR_OUT_OF_RANGE;
3512 /* enable the masks for this profile */
3513 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
3515 /* store enabled masks with profile so that they can be freed later */
3516 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
3522 * ice_write_es - write an extraction sequence to hardware
3523 * @hw: pointer to the HW struct
3524 * @blk: the block in which to write the extraction sequence
3525 * @prof_id: the profile ID to write
3526 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
3529 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3530 struct ice_fv_word *fv)
3534 off = prof_id * hw->blk[blk].es.fvw;
3536 ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
3537 sizeof(*fv), ICE_NONDMA_MEM);
3538 hw->blk[blk].es.written[prof_id] = false;
3540 ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
3541 sizeof(*fv), ICE_NONDMA_TO_NONDMA);
3546 * ice_prof_dec_ref - decrement reference count for profile
3547 * @hw: pointer to the HW struct
3548 * @blk: the block from which to free the profile ID
3549 * @prof_id: the profile ID for which to decrement the reference count
3551 static enum ice_status
3552 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3554 if (prof_id > hw->blk[blk].es.count)
3555 return ICE_ERR_PARAM;
3557 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3558 if (!--hw->blk[blk].es.ref_count[prof_id]) {
3559 ice_write_es(hw, blk, prof_id, NULL);
3560 ice_free_prof_masks(hw, blk, prof_id);
3561 return ice_free_prof_id(hw, blk, prof_id);
3568 /* Block / table section IDs */
3569 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3573 ICE_SID_PROFID_TCAM_SW,
3574 ICE_SID_PROFID_REDIR_SW,
3581 ICE_SID_PROFID_TCAM_ACL,
3582 ICE_SID_PROFID_REDIR_ACL,
3589 ICE_SID_PROFID_TCAM_FD,
3590 ICE_SID_PROFID_REDIR_FD,
3597 ICE_SID_PROFID_TCAM_RSS,
3598 ICE_SID_PROFID_REDIR_RSS,
3605 ICE_SID_PROFID_TCAM_PE,
3606 ICE_SID_PROFID_REDIR_PE,
3612 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3613 * @hw: pointer to the hardware structure
3614 * @blk: the HW block to initialize
3616 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3620 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3623 ptg = hw->blk[blk].xlt1.t[pt];
3624 if (ptg != ICE_DEFAULT_PTG) {
3625 ice_ptg_alloc_val(hw, blk, ptg);
3626 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3632 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3633 * @hw: pointer to the hardware structure
3634 * @blk: the HW block to initialize
3636 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3640 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3643 vsig = hw->blk[blk].xlt2.t[vsi];
3645 ice_vsig_alloc_val(hw, blk, vsig);
3646 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3647 /* no changes at this time, since this has been
3648 * initialized from the original package
3650 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3656 * ice_init_sw_db - init software database from HW tables
3657 * @hw: pointer to the hardware structure
3659 static void ice_init_sw_db(struct ice_hw *hw)
3663 for (i = 0; i < ICE_BLK_COUNT; i++) {
3664 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3665 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3670 * ice_fill_tbl - Reads content of a single table type into database
3671 * @hw: pointer to the hardware structure
3672 * @block_id: Block ID of the table to copy
3673 * @sid: Section ID of the table to copy
3675 * Will attempt to read the entire content of a given table of a single block
3676 * into the driver database. We assume that the buffer will always
3677 * be as large or larger than the data contained in the package. If
3678 * this condition is not met, there is most likely an error in the package
3681 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3683 u32 dst_len, sect_len, offset = 0;
3684 struct ice_prof_redir_section *pr;
3685 struct ice_prof_id_section *pid;
3686 struct ice_xlt1_section *xlt1;
3687 struct ice_xlt2_section *xlt2;
3688 struct ice_sw_fv_section *es;
3689 struct ice_pkg_enum state;
3693 /* if the HW segment pointer is null then the first iteration of
3694 * ice_pkg_enum_section() will fail. In this case the HW tables will
3695 * not be filled and return success.
3698 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3702 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
3704 sect = ice_pkg_enum_section(hw->seg, &state, sid);
3708 case ICE_SID_XLT1_SW:
3709 case ICE_SID_XLT1_FD:
3710 case ICE_SID_XLT1_RSS:
3711 case ICE_SID_XLT1_ACL:
3712 case ICE_SID_XLT1_PE:
3713 xlt1 = (struct ice_xlt1_section *)sect;
3715 sect_len = LE16_TO_CPU(xlt1->count) *
3716 sizeof(*hw->blk[block_id].xlt1.t);
3717 dst = hw->blk[block_id].xlt1.t;
3718 dst_len = hw->blk[block_id].xlt1.count *
3719 sizeof(*hw->blk[block_id].xlt1.t);
3721 case ICE_SID_XLT2_SW:
3722 case ICE_SID_XLT2_FD:
3723 case ICE_SID_XLT2_RSS:
3724 case ICE_SID_XLT2_ACL:
3725 case ICE_SID_XLT2_PE:
3726 xlt2 = (struct ice_xlt2_section *)sect;
3727 src = (_FORCE_ u8 *)xlt2->value;
3728 sect_len = LE16_TO_CPU(xlt2->count) *
3729 sizeof(*hw->blk[block_id].xlt2.t);
3730 dst = (u8 *)hw->blk[block_id].xlt2.t;
3731 dst_len = hw->blk[block_id].xlt2.count *
3732 sizeof(*hw->blk[block_id].xlt2.t);
3734 case ICE_SID_PROFID_TCAM_SW:
3735 case ICE_SID_PROFID_TCAM_FD:
3736 case ICE_SID_PROFID_TCAM_RSS:
3737 case ICE_SID_PROFID_TCAM_ACL:
3738 case ICE_SID_PROFID_TCAM_PE:
3739 pid = (struct ice_prof_id_section *)sect;
3740 src = (u8 *)pid->entry;
3741 sect_len = LE16_TO_CPU(pid->count) *
3742 sizeof(*hw->blk[block_id].prof.t);
3743 dst = (u8 *)hw->blk[block_id].prof.t;
3744 dst_len = hw->blk[block_id].prof.count *
3745 sizeof(*hw->blk[block_id].prof.t);
3747 case ICE_SID_PROFID_REDIR_SW:
3748 case ICE_SID_PROFID_REDIR_FD:
3749 case ICE_SID_PROFID_REDIR_RSS:
3750 case ICE_SID_PROFID_REDIR_ACL:
3751 case ICE_SID_PROFID_REDIR_PE:
3752 pr = (struct ice_prof_redir_section *)sect;
3753 src = pr->redir_value;
3754 sect_len = LE16_TO_CPU(pr->count) *
3755 sizeof(*hw->blk[block_id].prof_redir.t);
3756 dst = hw->blk[block_id].prof_redir.t;
3757 dst_len = hw->blk[block_id].prof_redir.count *
3758 sizeof(*hw->blk[block_id].prof_redir.t);
3760 case ICE_SID_FLD_VEC_SW:
3761 case ICE_SID_FLD_VEC_FD:
3762 case ICE_SID_FLD_VEC_RSS:
3763 case ICE_SID_FLD_VEC_ACL:
3764 case ICE_SID_FLD_VEC_PE:
3765 es = (struct ice_sw_fv_section *)sect;
3767 sect_len = (u32)(LE16_TO_CPU(es->count) *
3768 hw->blk[block_id].es.fvw) *
3769 sizeof(*hw->blk[block_id].es.t);
3770 dst = (u8 *)hw->blk[block_id].es.t;
3771 dst_len = (u32)(hw->blk[block_id].es.count *
3772 hw->blk[block_id].es.fvw) *
3773 sizeof(*hw->blk[block_id].es.t);
3779 /* if the section offset exceeds destination length, terminate
3782 if (offset > dst_len)
3785 /* if the sum of section size and offset exceed destination size
3786 * then we are out of bounds of the HW table size for that PF.
3787 * Changing section length to fill the remaining table space
3790 if ((offset + sect_len) > dst_len)
3791 sect_len = dst_len - offset;
3793 ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
3795 sect = ice_pkg_enum_section(NULL, &state, sid);
3800 * ice_fill_blk_tbls - Read package context for tables
3801 * @hw: pointer to the hardware structure
3803 * Reads the current package contents and populates the driver
3804 * database with the data iteratively for all advanced feature
3805 * blocks. Assume that the HW tables have been allocated.
3807 void ice_fill_blk_tbls(struct ice_hw *hw)
3811 for (i = 0; i < ICE_BLK_COUNT; i++) {
3812 enum ice_block blk_id = (enum ice_block)i;
3814 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3815 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3816 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3817 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3818 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3825 * ice_free_prof_map - free profile map
3826 * @hw: pointer to the hardware structure
3827 * @blk_idx: HW block index
3829 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3831 struct ice_es *es = &hw->blk[blk_idx].es;
3832 struct ice_prof_map *del, *tmp;
3834 ice_acquire_lock(&es->prof_map_lock);
3835 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
3836 ice_prof_map, list) {
3837 LIST_DEL(&del->list);
3840 INIT_LIST_HEAD(&es->prof_map);
3841 ice_release_lock(&es->prof_map_lock);
3845 * ice_free_flow_profs - free flow profile entries
3846 * @hw: pointer to the hardware structure
3847 * @blk_idx: HW block index
3849 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3851 struct ice_flow_prof *p, *tmp;
3853 ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
3854 LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
3855 ice_flow_prof, l_entry) {
3856 struct ice_flow_entry *e, *t;
3858 LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
3859 ice_flow_entry, l_entry)
3860 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
3861 ICE_FLOW_ENTRY_HNDL(e));
3863 LIST_DEL(&p->l_entry);
3865 ice_free(hw, p->acts);
3867 ice_destroy_lock(&p->entries_lock);
3870 ice_release_lock(&hw->fl_profs_locks[blk_idx]);
3872 /* if driver is in reset and tables are being cleared
3873 * re-initialize the flow profile list heads
3875 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3879 * ice_free_vsig_tbl - free complete VSIG table entries
3880 * @hw: pointer to the hardware structure
3881 * @blk: the HW block on which to free the VSIG table entries
3883 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3887 if (!hw->blk[blk].xlt2.vsig_tbl)
3890 for (i = 1; i < ICE_MAX_VSIGS; i++)
3891 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3892 ice_vsig_free(hw, blk, i);
3896 * ice_free_hw_tbls - free hardware table memory
3897 * @hw: pointer to the hardware structure
3899 void ice_free_hw_tbls(struct ice_hw *hw)
3901 struct ice_rss_cfg *r, *rt;
3904 for (i = 0; i < ICE_BLK_COUNT; i++) {
3905 if (hw->blk[i].is_list_init) {
3906 struct ice_es *es = &hw->blk[i].es;
3908 ice_free_prof_map(hw, i);
3909 ice_destroy_lock(&es->prof_map_lock);
3910 ice_free_flow_profs(hw, i);
3911 ice_destroy_lock(&hw->fl_profs_locks[i]);
3913 hw->blk[i].is_list_init = false;
3915 ice_free_vsig_tbl(hw, (enum ice_block)i);
3916 ice_free(hw, hw->blk[i].xlt1.ptypes);
3917 ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
3918 ice_free(hw, hw->blk[i].xlt1.t);
3919 ice_free(hw, hw->blk[i].xlt2.t);
3920 ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
3921 ice_free(hw, hw->blk[i].xlt2.vsis);
3922 ice_free(hw, hw->blk[i].prof.t);
3923 ice_free(hw, hw->blk[i].prof_redir.t);
3924 ice_free(hw, hw->blk[i].es.t);
3925 ice_free(hw, hw->blk[i].es.ref_count);
3926 ice_free(hw, hw->blk[i].es.written);
3927 ice_free(hw, hw->blk[i].es.mask_ena);
3930 LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
3931 ice_rss_cfg, l_entry) {
3932 LIST_DEL(&r->l_entry);
3935 ice_destroy_lock(&hw->rss_locks);
3936 if (!hw->dcf_enabled)
3937 ice_shutdown_all_prof_masks(hw);
3938 ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
3942 * ice_init_flow_profs - init flow profile locks and list heads
3943 * @hw: pointer to the hardware structure
3944 * @blk_idx: HW block index
3946 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3948 ice_init_lock(&hw->fl_profs_locks[blk_idx]);
3949 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3953 * ice_clear_hw_tbls - clear HW tables and flow profiles
3954 * @hw: pointer to the hardware structure
3956 void ice_clear_hw_tbls(struct ice_hw *hw)
3960 for (i = 0; i < ICE_BLK_COUNT; i++) {
3961 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3962 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3963 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3964 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3965 struct ice_es *es = &hw->blk[i].es;
3967 if (hw->blk[i].is_list_init) {
3968 ice_free_prof_map(hw, i);
3969 ice_free_flow_profs(hw, i);
3972 ice_free_vsig_tbl(hw, (enum ice_block)i);
3974 ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
3976 ice_memset(xlt1->ptg_tbl, 0,
3977 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
3979 ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
3982 ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
3984 ice_memset(xlt2->vsig_tbl, 0,
3985 xlt2->count * sizeof(*xlt2->vsig_tbl),
3987 ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
3990 ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
3992 ice_memset(prof_redir->t, 0,
3993 prof_redir->count * sizeof(*prof_redir->t),
3996 ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
3998 ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
4000 ice_memset(es->written, 0, es->count * sizeof(*es->written),
4002 ice_memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena),
4008 * ice_init_hw_tbls - init hardware table memory
4009 * @hw: pointer to the hardware structure
4011 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
4015 ice_init_lock(&hw->rss_locks);
4016 INIT_LIST_HEAD(&hw->rss_list_head);
4017 if (!hw->dcf_enabled)
4018 ice_init_all_prof_masks(hw);
4019 for (i = 0; i < ICE_BLK_COUNT; i++) {
4020 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
4021 struct ice_prof_tcam *prof = &hw->blk[i].prof;
4022 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
4023 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
4024 struct ice_es *es = &hw->blk[i].es;
4027 if (hw->blk[i].is_list_init)
4030 ice_init_flow_profs(hw, i);
4031 ice_init_lock(&es->prof_map_lock);
4032 INIT_LIST_HEAD(&es->prof_map);
4033 hw->blk[i].is_list_init = true;
4035 hw->blk[i].overwrite = blk_sizes[i].overwrite;
4036 es->reverse = blk_sizes[i].reverse;
4038 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
4039 xlt1->count = blk_sizes[i].xlt1;
4041 xlt1->ptypes = (struct ice_ptg_ptype *)
4042 ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
4047 xlt1->ptg_tbl = (struct ice_ptg_entry *)
4048 ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
4053 xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
4057 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
4058 xlt2->count = blk_sizes[i].xlt2;
4060 xlt2->vsis = (struct ice_vsig_vsi *)
4061 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
4066 xlt2->vsig_tbl = (struct ice_vsig_entry *)
4067 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
4068 if (!xlt2->vsig_tbl)
4071 for (j = 0; j < xlt2->count; j++)
4072 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
4074 xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
4078 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
4079 prof->count = blk_sizes[i].prof_tcam;
4080 prof->max_prof_id = blk_sizes[i].prof_id;
4081 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
4082 prof->t = (struct ice_prof_tcam_entry *)
4083 ice_calloc(hw, prof->count, sizeof(*prof->t));
4088 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
4089 prof_redir->count = blk_sizes[i].prof_redir;
4090 prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
4091 sizeof(*prof_redir->t));
4096 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
4097 es->count = blk_sizes[i].es;
4098 es->fvw = blk_sizes[i].fvw;
4099 es->t = (struct ice_fv_word *)
4100 ice_calloc(hw, (u32)(es->count * es->fvw),
4105 es->ref_count = (u16 *)
4106 ice_calloc(hw, es->count, sizeof(*es->ref_count));
4111 es->written = (u8 *)
4112 ice_calloc(hw, es->count, sizeof(*es->written));
4117 es->mask_ena = (u32 *)
4118 ice_calloc(hw, es->count, sizeof(*es->mask_ena));
4126 ice_free_hw_tbls(hw);
4127 return ICE_ERR_NO_MEMORY;
4131 * ice_prof_gen_key - generate profile ID key
4132 * @hw: pointer to the HW struct
4133 * @blk: the block in which to write profile ID to
4134 * @ptg: packet type group (PTG) portion of key
4135 * @vsig: VSIG portion of key
4136 * @cdid: CDID portion of key
4137 * @flags: flag portion of key
4138 * @vl_msk: valid mask
4139 * @dc_msk: don't care mask
4140 * @nm_msk: never match mask
4141 * @key: output of profile ID key
4143 static enum ice_status
4144 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
4145 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4146 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
4147 u8 key[ICE_TCAM_KEY_SZ])
4149 struct ice_prof_id_key inkey;
4152 inkey.xlt2_cdid = CPU_TO_LE16(vsig);
4153 inkey.flags = CPU_TO_LE16(flags);
4155 switch (hw->blk[blk].prof.cdid_bits) {
4159 #define ICE_CD_2_M 0xC000U
4160 #define ICE_CD_2_S 14
4161 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
4162 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
4165 #define ICE_CD_4_M 0xF000U
4166 #define ICE_CD_4_S 12
4167 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
4168 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
4171 #define ICE_CD_8_M 0xFF00U
4172 #define ICE_CD_8_S 16
4173 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
4174 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
4177 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
4181 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
4182 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
4186 * ice_tcam_write_entry - write TCAM entry
4187 * @hw: pointer to the HW struct
4188 * @blk: the block in which to write profile ID to
4189 * @idx: the entry index to write to
4190 * @prof_id: profile ID
4191 * @ptg: packet type group (PTG) portion of key
4192 * @vsig: VSIG portion of key
4193 * @cdid: CDID portion of key
4194 * @flags: flag portion of key
4195 * @vl_msk: valid mask
4196 * @dc_msk: don't care mask
4197 * @nm_msk: never match mask
4199 static enum ice_status
4200 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
4201 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
4202 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4203 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
4204 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
4206 struct ice_prof_tcam_entry;
4207 enum ice_status status;
4209 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
4210 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
4212 hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
4213 hw->blk[blk].prof.t[idx].prof_id = prof_id;
4220 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
4221 * @hw: pointer to the hardware structure
4223 * @vsig: VSIG to query
4224 * @refs: pointer to variable to receive the reference count
4226 static enum ice_status
4227 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
4229 u16 idx = vsig & ICE_VSIG_IDX_M;
4230 struct ice_vsig_vsi *ptr;
4234 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
4235 return ICE_ERR_DOES_NOT_EXIST;
4237 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4240 ptr = ptr->next_vsi;
4247 * ice_has_prof_vsig - check to see if VSIG has a specific profile
4248 * @hw: pointer to the hardware structure
4250 * @vsig: VSIG to check against
4251 * @hdl: profile handle
4254 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
4256 u16 idx = vsig & ICE_VSIG_IDX_M;
4257 struct ice_vsig_prof *ent;
4259 LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4260 ice_vsig_prof, list)
4261 if (ent->profile_cookie == hdl)
4264 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
4270 * ice_prof_bld_es - build profile ID extraction sequence changes
4271 * @hw: pointer to the HW struct
4272 * @blk: hardware block
4273 * @bld: the update package buffer build to add to
4274 * @chgs: the list of changes to make in hardware
4276 static enum ice_status
4277 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
4278 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4280 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
4281 struct ice_chs_chg *tmp;
4283 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4284 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
4285 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
4286 struct ice_pkg_es *p;
4289 id = ice_sect_id(blk, ICE_VEC_TBL);
4290 p = (struct ice_pkg_es *)
4291 ice_pkg_buf_alloc_section(bld, id,
4292 ice_struct_size(p, es,
4298 return ICE_ERR_MAX_LIMIT;
4300 p->count = CPU_TO_LE16(1);
4301 p->offset = CPU_TO_LE16(tmp->prof_id);
4303 ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
4304 ICE_NONDMA_TO_NONDMA);
4311 * ice_prof_bld_tcam - build profile ID TCAM changes
4312 * @hw: pointer to the HW struct
4313 * @blk: hardware block
4314 * @bld: the update package buffer build to add to
4315 * @chgs: the list of changes to make in hardware
4317 static enum ice_status
4318 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
4319 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4321 struct ice_chs_chg *tmp;
4323 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4324 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
4325 struct ice_prof_id_section *p;
4328 id = ice_sect_id(blk, ICE_PROF_TCAM);
4329 p = (struct ice_prof_id_section *)
4330 ice_pkg_buf_alloc_section(bld, id,
4336 return ICE_ERR_MAX_LIMIT;
4338 p->count = CPU_TO_LE16(1);
4339 p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
4340 p->entry[0].prof_id = tmp->prof_id;
4342 ice_memcpy(p->entry[0].key,
4343 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
4344 sizeof(hw->blk[blk].prof.t->key),
4345 ICE_NONDMA_TO_NONDMA);
4352 * ice_prof_bld_xlt1 - build XLT1 changes
4353 * @blk: hardware block
4354 * @bld: the update package buffer build to add to
4355 * @chgs: the list of changes to make in hardware
4357 static enum ice_status
4358 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
4359 struct LIST_HEAD_TYPE *chgs)
4361 struct ice_chs_chg *tmp;
4363 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4364 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
4365 struct ice_xlt1_section *p;
4368 id = ice_sect_id(blk, ICE_XLT1);
4369 p = (struct ice_xlt1_section *)
4370 ice_pkg_buf_alloc_section(bld, id,
4376 return ICE_ERR_MAX_LIMIT;
4378 p->count = CPU_TO_LE16(1);
4379 p->offset = CPU_TO_LE16(tmp->ptype);
4380 p->value[0] = tmp->ptg;
4387 * ice_prof_bld_xlt2 - build XLT2 changes
4388 * @blk: hardware block
4389 * @bld: the update package buffer build to add to
4390 * @chgs: the list of changes to make in hardware
4392 static enum ice_status
4393 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4394 struct LIST_HEAD_TYPE *chgs)
4396 struct ice_chs_chg *tmp;
4398 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4399 struct ice_xlt2_section *p;
4402 switch (tmp->type) {
4406 id = ice_sect_id(blk, ICE_XLT2);
4407 p = (struct ice_xlt2_section *)
4408 ice_pkg_buf_alloc_section(bld, id,
4414 return ICE_ERR_MAX_LIMIT;
4416 p->count = CPU_TO_LE16(1);
4417 p->offset = CPU_TO_LE16(tmp->vsi);
4418 p->value[0] = CPU_TO_LE16(tmp->vsig);
4429 * ice_upd_prof_hw - update hardware using the change list
4430 * @hw: pointer to the HW struct
4431 * @blk: hardware block
4432 * @chgs: the list of changes to make in hardware
4434 static enum ice_status
4435 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4436 struct LIST_HEAD_TYPE *chgs)
4438 struct ice_buf_build *b;
4439 struct ice_chs_chg *tmp;
4440 enum ice_status status;
4448 /* count number of sections we need */
4449 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4450 switch (tmp->type) {
4451 case ICE_PTG_ES_ADD:
4469 sects = xlt1 + xlt2 + tcam + es;
4474 /* Build update package buffer */
4475 b = ice_pkg_buf_alloc(hw);
4477 return ICE_ERR_NO_MEMORY;
4479 status = ice_pkg_buf_reserve_section(b, sects);
4483 /* Preserve order of table update: ES, TCAM, PTG, VSIG */
4485 status = ice_prof_bld_es(hw, blk, b, chgs);
4491 status = ice_prof_bld_tcam(hw, blk, b, chgs);
4497 status = ice_prof_bld_xlt1(blk, b, chgs);
4503 status = ice_prof_bld_xlt2(blk, b, chgs);
4508 /* After package buffer build check if the section count in buffer is
4509 * non-zero and matches the number of sections detected for package
4512 pkg_sects = ice_pkg_buf_get_active_sections(b);
4513 if (!pkg_sects || pkg_sects != sects) {
4514 status = ICE_ERR_INVAL_SIZE;
4518 /* update package */
4519 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4520 if (status == ICE_ERR_AQ_ERROR)
4521 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
4524 ice_pkg_buf_free(hw, b);
4529 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
4530 * @hw: pointer to the HW struct
4531 * @prof_id: profile ID
4532 * @mask_sel: mask select
4534 * This function enable any of the masks selected by the mask select parameter
4535 * for the profile specified.
4537 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
4539 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
4541 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
4542 GLQF_FDMASK_SEL(prof_id), mask_sel);
4545 struct ice_fd_src_dst_pair {
4551 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
4552 /* These are defined in pairs */
4553 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
4554 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
4556 { ICE_PROT_IPV4_IL, 2, 12 },
4557 { ICE_PROT_IPV4_IL, 2, 16 },
4559 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
4560 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
4562 { ICE_PROT_IPV6_IL, 8, 8 },
4563 { ICE_PROT_IPV6_IL, 8, 24 },
4565 { ICE_PROT_TCP_IL, 1, 0 },
4566 { ICE_PROT_TCP_IL, 1, 2 },
4568 { ICE_PROT_UDP_OF, 1, 0 },
4569 { ICE_PROT_UDP_OF, 1, 2 },
4571 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
4572 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
4574 { ICE_PROT_SCTP_IL, 1, 0 },
4575 { ICE_PROT_SCTP_IL, 1, 2 }
4578 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
4581 * ice_update_fd_swap - set register appropriately for a FD FV extraction
4582 * @hw: pointer to the HW struct
4583 * @prof_id: profile ID
4584 * @es: extraction sequence (length of array is determined by the block)
4586 static enum ice_status
4587 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
4589 ice_declare_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4590 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
4591 #define ICE_FD_FV_NOT_FOUND (-2)
4592 s8 first_free = ICE_FD_FV_NOT_FOUND;
4593 u8 used[ICE_MAX_FV_WORDS] = { 0 };
4598 ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4600 /* This code assumes that the Flow Director field vectors are assigned
4601 * from the end of the FV indexes working towards the zero index, that
4602 * only complete fields will be included and will be consecutive, and
4603 * that there are no gaps between valid indexes.
4606 /* Determine swap fields present */
4607 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
4608 /* Find the first free entry, assuming right to left population.
4609 * This is where we can start adding additional pairs if needed.
4611 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4615 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4616 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4617 es[i].off == ice_fd_pairs[j].off) {
4618 ice_set_bit(j, pair_list);
4623 orig_free = first_free;
4625 /* determine missing swap fields that need to be added */
4626 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4627 u8 bit1 = ice_is_bit_set(pair_list, i + 1);
4628 u8 bit0 = ice_is_bit_set(pair_list, i);
4633 /* add the appropriate 'paired' entry */
4639 /* check for room */
4640 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4641 return ICE_ERR_MAX_LIMIT;
4643 /* place in extraction sequence */
4644 for (k = 0; k < ice_fd_pairs[index].count; k++) {
4645 es[first_free - k].prot_id =
4646 ice_fd_pairs[index].prot_id;
4647 es[first_free - k].off =
4648 ice_fd_pairs[index].off + (k * 2);
4651 return ICE_ERR_OUT_OF_RANGE;
4653 /* keep track of non-relevant fields */
4654 mask_sel |= BIT(first_free - k);
4657 pair_start[index] = first_free;
4658 first_free -= ice_fd_pairs[index].count;
4662 /* fill in the swap array */
4663 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4665 u8 indexes_used = 1;
4667 /* assume flat at this index */
4668 #define ICE_SWAP_VALID 0x80
4669 used[si] = si | ICE_SWAP_VALID;
4671 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4676 /* check for a swap location */
4677 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4678 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4679 es[si].off == ice_fd_pairs[j].off) {
4682 /* determine the appropriate matching field */
4683 idx = j + ((j % 2) ? -1 : 1);
4685 indexes_used = ice_fd_pairs[idx].count;
4686 for (k = 0; k < indexes_used; k++) {
4687 used[si - k] = (pair_start[idx] - k) |
4697 /* for each set of 4 swap and 4 inset indexes, write the appropriate
4700 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4704 for (k = 0; k < 4; k++) {
4708 if (used[idx] && !(mask_sel & BIT(idx))) {
4709 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4710 #define ICE_INSET_DFLT 0x9f
4711 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4715 /* write the appropriate swap register set */
4716 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4718 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4719 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4721 /* write the appropriate inset register set */
4722 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4724 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4725 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4728 /* initially clear the mask select for this profile */
4729 ice_update_fd_mask(hw, prof_id, 0);
4734 /* The entries here needs to match the order of enum ice_ptype_attrib */
4735 static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4736 { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
4737 { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
4738 { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
4739 { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
4743 * ice_get_ptype_attrib_info - get ptype attribute information
4744 * @type: attribute type
4745 * @info: pointer to variable to the attribute information
4748 ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4749 struct ice_ptype_attrib_info *info)
4751 *info = ice_ptype_attributes[type];
4755 * ice_add_prof_attrib - add any PTG with attributes to profile
4756 * @prof: pointer to the profile to which PTG entries will be added
4757 * @ptg: PTG to be added
4758 * @ptype: PTYPE that needs to be looked up
4759 * @attr: array of attributes that will be considered
4760 * @attr_cnt: number of elements in the attribute array
4762 static enum ice_status
4763 ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4764 const struct ice_ptype_attributes *attr, u16 attr_cnt)
4769 for (i = 0; i < attr_cnt; i++) {
4770 if (attr[i].ptype == ptype) {
4773 prof->ptg[prof->ptg_cnt] = ptg;
4774 ice_get_ptype_attrib_info(attr[i].attrib,
4775 &prof->attr[prof->ptg_cnt]);
4777 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4778 return ICE_ERR_MAX_LIMIT;
4783 return ICE_ERR_DOES_NOT_EXIST;
4789 * ice_add_prof - add profile
4790 * @hw: pointer to the HW struct
4791 * @blk: hardware block
4792 * @id: profile tracking ID
4793 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4794 * @attr: array of attributes
4795 * @attr_cnt: number of elements in attrib array
4796 * @es: extraction sequence (length of array is determined by the block)
4797 * @masks: mask for extraction sequence
4799 * This function registers a profile, which matches a set of PTYPES with a
4800 * particular extraction sequence. While the hardware profile is allocated
4801 * it will not be written until the first call to ice_add_flow that specifies
4802 * the ID value used here.
4805 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4806 const struct ice_ptype_attributes *attr, u16 attr_cnt,
4807 struct ice_fv_word *es, u16 *masks)
4809 u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4810 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
4811 struct ice_prof_map *prof;
4812 enum ice_status status;
4816 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
4818 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4820 /* search for existing profile */
4821 status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4823 /* allocate profile ID */
4824 status = ice_alloc_prof_id(hw, blk, &prof_id);
4826 goto err_ice_add_prof;
4827 if (blk == ICE_BLK_FD) {
4828 /* For Flow Director block, the extraction sequence may
4829 * need to be altered in the case where there are paired
4830 * fields that have no match. This is necessary because
4831 * for Flow Director, src and dest fields need to paired
4832 * for filter programming and these values are swapped
4835 status = ice_update_fd_swap(hw, prof_id, es);
4837 goto err_ice_add_prof;
4839 status = ice_update_prof_masking(hw, blk, prof_id, masks);
4841 goto err_ice_add_prof;
4843 /* and write new es */
4844 ice_write_es(hw, blk, prof_id, es);
4847 ice_prof_inc_ref(hw, blk, prof_id);
4849 /* add profile info */
4851 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
4853 goto err_ice_add_prof;
4855 prof->profile_cookie = id;
4856 prof->prof_id = prof_id;
4860 /* build list of ptgs */
4861 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4864 if (!ptypes[byte]) {
4870 /* Examine 8 bits per byte */
4871 ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
4876 ptype = byte * BITS_PER_BYTE + bit;
4878 /* The package should place all ptypes in a non-zero
4879 * PTG, so the following call should never fail.
4881 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4884 /* If PTG is already added, skip and continue */
4885 if (ice_is_bit_set(ptgs_used, ptg))
4888 ice_set_bit(ptg, ptgs_used);
4889 /* Check to see there are any attributes for this
4890 * ptype, and add them if found.
4892 status = ice_add_prof_attrib(prof, ptg, ptype, attr,
4894 if (status == ICE_ERR_MAX_LIMIT)
4897 /* This is simple a ptype/PTG with no
4900 prof->ptg[prof->ptg_cnt] = ptg;
4901 prof->attr[prof->ptg_cnt].flags = 0;
4902 prof->attr[prof->ptg_cnt].mask = 0;
4904 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4913 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
4914 status = ICE_SUCCESS;
4917 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4922 * ice_search_prof_id - Search for a profile tracking ID
4923 * @hw: pointer to the HW struct
4924 * @blk: hardware block
4925 * @id: profile tracking ID
4927 * This will search for a profile tracking ID which was previously added.
4928 * The profile map lock should be held before calling this function.
4930 struct ice_prof_map *
4931 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4933 struct ice_prof_map *entry = NULL;
4934 struct ice_prof_map *map;
4936 LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
4937 if (map->profile_cookie == id) {
4946 * ice_vsig_prof_id_count - count profiles in a VSIG
4947 * @hw: pointer to the HW struct
4948 * @blk: hardware block
4949 * @vsig: VSIG to remove the profile from
4952 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4954 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4955 struct ice_vsig_prof *p;
4957 LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4958 ice_vsig_prof, list)
4965 * ice_rel_tcam_idx - release a TCAM index
4966 * @hw: pointer to the HW struct
4967 * @blk: hardware block
4968 * @idx: the index to release
4970 static enum ice_status
4971 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4973 /* Masks to invoke a never match entry */
4974 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4975 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4976 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4977 enum ice_status status;
4979 /* write the TCAM entry */
4980 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4985 /* release the TCAM entry */
4986 status = ice_free_tcam_ent(hw, blk, idx);
4992 * ice_rem_prof_id - remove one profile from a VSIG
4993 * @hw: pointer to the HW struct
4994 * @blk: hardware block
4995 * @prof: pointer to profile structure to remove
4997 static enum ice_status
4998 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4999 struct ice_vsig_prof *prof)
5001 enum ice_status status;
5004 for (i = 0; i < prof->tcam_count; i++)
5005 if (prof->tcam[i].in_use) {
5006 prof->tcam[i].in_use = false;
5007 status = ice_rel_tcam_idx(hw, blk,
5008 prof->tcam[i].tcam_idx);
5010 return ICE_ERR_HW_TABLE;
5017 * ice_rem_vsig - remove VSIG
5018 * @hw: pointer to the HW struct
5019 * @blk: hardware block
5020 * @vsig: the VSIG to remove
5021 * @chg: the change list
5023 static enum ice_status
5024 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5025 struct LIST_HEAD_TYPE *chg)
5027 u16 idx = vsig & ICE_VSIG_IDX_M;
5028 struct ice_vsig_vsi *vsi_cur;
5029 struct ice_vsig_prof *d, *t;
5030 enum ice_status status;
5032 /* remove TCAM entries */
5033 LIST_FOR_EACH_ENTRY_SAFE(d, t,
5034 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5035 ice_vsig_prof, list) {
5036 status = ice_rem_prof_id(hw, blk, d);
5044 /* Move all VSIS associated with this VSIG to the default VSIG */
5045 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
5046 /* If the VSIG has at least 1 VSI then iterate through the list
5047 * and remove the VSIs before deleting the group.
5051 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
5052 struct ice_chs_chg *p;
5054 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5056 return ICE_ERR_NO_MEMORY;
5058 p->type = ICE_VSIG_REM;
5059 p->orig_vsig = vsig;
5060 p->vsig = ICE_DEFAULT_VSIG;
5061 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
5063 LIST_ADD(&p->list_entry, chg);
5068 return ice_vsig_free(hw, blk, vsig);
5072 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
5073 * @hw: pointer to the HW struct
5074 * @blk: hardware block
5075 * @vsig: VSIG to remove the profile from
5076 * @hdl: profile handle indicating which profile to remove
5077 * @chg: list to receive a record of changes
5079 static enum ice_status
5080 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5081 struct LIST_HEAD_TYPE *chg)
5083 u16 idx = vsig & ICE_VSIG_IDX_M;
5084 struct ice_vsig_prof *p, *t;
5085 enum ice_status status;
5087 LIST_FOR_EACH_ENTRY_SAFE(p, t,
5088 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5089 ice_vsig_prof, list)
5090 if (p->profile_cookie == hdl) {
5091 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
5092 /* this is the last profile, remove the VSIG */
5093 return ice_rem_vsig(hw, blk, vsig, chg);
5095 status = ice_rem_prof_id(hw, blk, p);
5103 return ICE_ERR_DOES_NOT_EXIST;
5107 * ice_rem_flow_all - remove all flows with a particular profile
5108 * @hw: pointer to the HW struct
5109 * @blk: hardware block
5110 * @id: profile tracking ID
5112 static enum ice_status
5113 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
5115 struct ice_chs_chg *del, *tmp;
5116 struct LIST_HEAD_TYPE chg;
5117 enum ice_status status;
5120 INIT_LIST_HEAD(&chg);
5122 for (i = 1; i < ICE_MAX_VSIGS; i++)
5123 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
5124 if (ice_has_prof_vsig(hw, blk, i, id)) {
5125 status = ice_rem_prof_id_vsig(hw, blk, i, id,
5128 goto err_ice_rem_flow_all;
5132 status = ice_upd_prof_hw(hw, blk, &chg);
5134 err_ice_rem_flow_all:
5135 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5136 LIST_DEL(&del->list_entry);
5144 * ice_rem_prof - remove profile
5145 * @hw: pointer to the HW struct
5146 * @blk: hardware block
5147 * @id: profile tracking ID
5149 * This will remove the profile specified by the ID parameter, which was
5150 * previously created through ice_add_prof. If any existing entries
5151 * are associated with this profile, they will be removed as well.
5153 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
5155 struct ice_prof_map *pmap;
5156 enum ice_status status;
5158 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5160 pmap = ice_search_prof_id(hw, blk, id);
5162 status = ICE_ERR_DOES_NOT_EXIST;
5163 goto err_ice_rem_prof;
5166 /* remove all flows with this profile */
5167 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
5169 goto err_ice_rem_prof;
5171 /* dereference profile, and possibly remove */
5172 ice_prof_dec_ref(hw, blk, pmap->prof_id);
5174 LIST_DEL(&pmap->list);
5178 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5183 * ice_get_prof - get profile
5184 * @hw: pointer to the HW struct
5185 * @blk: hardware block
5186 * @hdl: profile handle
5189 static enum ice_status
5190 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
5191 struct LIST_HEAD_TYPE *chg)
5193 enum ice_status status = ICE_SUCCESS;
5194 struct ice_prof_map *map;
5195 struct ice_chs_chg *p;
5198 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5199 /* Get the details on the profile specified by the handle ID */
5200 map = ice_search_prof_id(hw, blk, hdl);
5202 status = ICE_ERR_DOES_NOT_EXIST;
5203 goto err_ice_get_prof;
5206 for (i = 0; i < map->ptg_cnt; i++)
5207 if (!hw->blk[blk].es.written[map->prof_id]) {
5208 /* add ES to change list */
5209 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5211 status = ICE_ERR_NO_MEMORY;
5212 goto err_ice_get_prof;
5215 p->type = ICE_PTG_ES_ADD;
5217 p->ptg = map->ptg[i];
5218 p->attr = map->attr[i];
5222 p->prof_id = map->prof_id;
5224 hw->blk[blk].es.written[map->prof_id] = true;
5226 LIST_ADD(&p->list_entry, chg);
5230 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5231 /* let caller clean up the change list */
5236 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
5237 * @hw: pointer to the HW struct
5238 * @blk: hardware block
5239 * @vsig: VSIG from which to copy the list
5242 * This routine makes a copy of the list of profiles in the specified VSIG.
5244 static enum ice_status
5245 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5246 struct LIST_HEAD_TYPE *lst)
5248 struct ice_vsig_prof *ent1, *ent2;
5249 u16 idx = vsig & ICE_VSIG_IDX_M;
5251 LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5252 ice_vsig_prof, list) {
5253 struct ice_vsig_prof *p;
5255 /* copy to the input list */
5256 p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
5257 ICE_NONDMA_TO_NONDMA);
5259 goto err_ice_get_profs_vsig;
5261 LIST_ADD_TAIL(&p->list, lst);
5266 err_ice_get_profs_vsig:
5267 LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
5268 LIST_DEL(&ent1->list);
5272 return ICE_ERR_NO_MEMORY;
5276 * ice_add_prof_to_lst - add profile entry to a list
5277 * @hw: pointer to the HW struct
5278 * @blk: hardware block
5279 * @lst: the list to be added to
5280 * @hdl: profile handle of entry to add
5282 static enum ice_status
5283 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
5284 struct LIST_HEAD_TYPE *lst, u64 hdl)
5286 enum ice_status status = ICE_SUCCESS;
5287 struct ice_prof_map *map;
5288 struct ice_vsig_prof *p;
5291 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5292 map = ice_search_prof_id(hw, blk, hdl);
5294 status = ICE_ERR_DOES_NOT_EXIST;
5295 goto err_ice_add_prof_to_lst;
5298 p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
5300 status = ICE_ERR_NO_MEMORY;
5301 goto err_ice_add_prof_to_lst;
5304 p->profile_cookie = map->profile_cookie;
5305 p->prof_id = map->prof_id;
5306 p->tcam_count = map->ptg_cnt;
5308 for (i = 0; i < map->ptg_cnt; i++) {
5309 p->tcam[i].prof_id = map->prof_id;
5310 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
5311 p->tcam[i].ptg = map->ptg[i];
5312 p->tcam[i].attr = map->attr[i];
5315 LIST_ADD(&p->list, lst);
5317 err_ice_add_prof_to_lst:
5318 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5323 * ice_move_vsi - move VSI to another VSIG
5324 * @hw: pointer to the HW struct
5325 * @blk: hardware block
5326 * @vsi: the VSI to move
5327 * @vsig: the VSIG to move the VSI to
5328 * @chg: the change list
5330 static enum ice_status
5331 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
5332 struct LIST_HEAD_TYPE *chg)
5334 enum ice_status status;
5335 struct ice_chs_chg *p;
5338 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5340 return ICE_ERR_NO_MEMORY;
5342 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
5344 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
5351 p->type = ICE_VSI_MOVE;
5353 p->orig_vsig = orig_vsig;
5356 LIST_ADD(&p->list_entry, chg);
5362 * ice_set_tcam_flags - set TCAM flag don't care mask
5363 * @mask: mask for flags
5364 * @dc_mask: pointer to the don't care mask
5366 static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
5370 /* flags are lowest u16 */
5371 flag_word = (u16 *)dc_mask;
5376 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
5377 * @hw: pointer to the HW struct
5378 * @idx: the index of the TCAM entry to remove
5379 * @chg: the list of change structures to search
5382 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
5384 struct ice_chs_chg *pos, *tmp;
5386 LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry)
5387 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
5388 LIST_DEL(&tmp->list_entry);
5394 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
5395 * @hw: pointer to the HW struct
5396 * @blk: hardware block
5397 * @enable: true to enable, false to disable
5398 * @vsig: the VSIG of the TCAM entry
5399 * @tcam: pointer the TCAM info structure of the TCAM to disable
5400 * @chg: the change list
5402 * This function appends an enable or disable TCAM entry in the change log
5404 static enum ice_status
5405 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
5406 u16 vsig, struct ice_tcam_inf *tcam,
5407 struct LIST_HEAD_TYPE *chg)
5409 enum ice_status status;
5410 struct ice_chs_chg *p;
5412 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5413 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5414 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5416 /* if disabling, free the TCAM */
5418 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
5420 /* if we have already created a change for this TCAM entry, then
5421 * we need to remove that entry, in order to prevent writing to
5422 * a TCAM entry we no longer will have ownership of.
5424 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
5430 /* for re-enabling, reallocate a TCAM */
5431 /* for entries with empty attribute masks, allocate entry from
5432 * the bottom of the TCAM table; otherwise, allocate from the
5433 * top of the table in order to give it higher priority
5435 status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
5440 /* add TCAM to change list */
5441 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5443 return ICE_ERR_NO_MEMORY;
5445 /* set don't care masks for TCAM flags */
5446 ice_set_tcam_flags(tcam->attr.mask, dc_msk);
5448 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
5449 tcam->ptg, vsig, 0, tcam->attr.flags,
5450 vl_msk, dc_msk, nm_msk);
5452 goto err_ice_prof_tcam_ena_dis;
5456 p->type = ICE_TCAM_ADD;
5457 p->add_tcam_idx = true;
5458 p->prof_id = tcam->prof_id;
5461 p->tcam_idx = tcam->tcam_idx;
5464 LIST_ADD(&p->list_entry, chg);
5468 err_ice_prof_tcam_ena_dis:
5474 * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use
5475 * @ptg_attr: pointer to the PTG and attribute pair to check
5476 * @ptgs_used: bitmap that denotes which PTGs are in use
5477 * @attr_used: array of PTG and attributes pairs already used
5478 * @attr_cnt: count of entries in the attr_used array
5481 ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, ice_bitmap_t *ptgs_used,
5482 struct ice_tcam_inf *attr_used[], u16 attr_cnt)
5486 if (!ice_is_bit_set(ptgs_used, ptg_attr->ptg))
5489 /* the PTG is used, so now look for correct attributes */
5490 for (i = 0; i < attr_cnt; i++)
5491 if (attr_used[i]->ptg == ptg_attr->ptg &&
5492 attr_used[i]->attr.flags == ptg_attr->attr.flags &&
5493 attr_used[i]->attr.mask == ptg_attr->attr.mask)
5500 * ice_adj_prof_priorities - adjust profile based on priorities
5501 * @hw: pointer to the HW struct
5502 * @blk: hardware block
5503 * @vsig: the VSIG for which to adjust profile priorities
5504 * @chg: the change list
5506 static enum ice_status
5507 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5508 struct LIST_HEAD_TYPE *chg)
5510 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
5511 struct ice_tcam_inf **attr_used;
5512 enum ice_status status = ICE_SUCCESS;
5513 struct ice_vsig_prof *t;
5514 u16 attr_used_cnt = 0;
5517 #define ICE_MAX_PTG_ATTRS 1024
5518 attr_used = (struct ice_tcam_inf **)ice_calloc(hw, ICE_MAX_PTG_ATTRS,
5519 sizeof(*attr_used));
5521 return ICE_ERR_NO_MEMORY;
5523 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
5524 idx = vsig & ICE_VSIG_IDX_M;
5526 /* Priority is based on the order in which the profiles are added. The
5527 * newest added profile has highest priority and the oldest added
5528 * profile has the lowest priority. Since the profile property list for
5529 * a VSIG is sorted from newest to oldest, this code traverses the list
5530 * in order and enables the first of each PTG that it finds (that is not
5531 * already enabled); it also disables any duplicate PTGs that it finds
5532 * in the older profiles (that are currently enabled).
5535 LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5536 ice_vsig_prof, list) {
5539 for (i = 0; i < t->tcam_count; i++) {
5542 /* Scan the priorities from newest to oldest.
5543 * Make sure that the newest profiles take priority.
5545 used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used,
5546 attr_used, attr_used_cnt);
5548 if (used && t->tcam[i].in_use) {
5549 /* need to mark this PTG as never match, as it
5550 * was already in use and therefore duplicate
5551 * (and lower priority)
5553 status = ice_prof_tcam_ena_dis(hw, blk, false,
5558 goto err_ice_adj_prof_priorities;
5559 } else if (!used && !t->tcam[i].in_use) {
5560 /* need to enable this PTG, as it in not in use
5561 * and not enabled (highest priority)
5563 status = ice_prof_tcam_ena_dis(hw, blk, true,
5568 goto err_ice_adj_prof_priorities;
5571 /* keep track of used ptgs */
5572 ice_set_bit(t->tcam[i].ptg, ptgs_used);
5573 if (attr_used_cnt < ICE_MAX_PTG_ATTRS)
5574 attr_used[attr_used_cnt++] = &t->tcam[i];
5576 ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n");
5580 err_ice_adj_prof_priorities:
5581 ice_free(hw, attr_used);
5586 * ice_add_prof_id_vsig - add profile to VSIG
5587 * @hw: pointer to the HW struct
5588 * @blk: hardware block
5589 * @vsig: the VSIG to which this profile is to be added
5590 * @hdl: the profile handle indicating the profile to add
5591 * @rev: true to add entries to the end of the list
5592 * @chg: the change list
5594 static enum ice_status
5595 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5596 bool rev, struct LIST_HEAD_TYPE *chg)
5598 /* Masks that ignore flags */
5599 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5600 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5601 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5602 enum ice_status status = ICE_SUCCESS;
5603 struct ice_prof_map *map;
5604 struct ice_vsig_prof *t;
5605 struct ice_chs_chg *p;
5608 /* Error, if this VSIG already has this profile */
5609 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5610 return ICE_ERR_ALREADY_EXISTS;
5612 /* new VSIG profile structure */
5613 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5615 return ICE_ERR_NO_MEMORY;
5617 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5618 /* Get the details on the profile specified by the handle ID */
5619 map = ice_search_prof_id(hw, blk, hdl);
5621 status = ICE_ERR_DOES_NOT_EXIST;
5622 goto err_ice_add_prof_id_vsig;
5625 t->profile_cookie = map->profile_cookie;
5626 t->prof_id = map->prof_id;
5627 t->tcam_count = map->ptg_cnt;
5629 /* create TCAM entries */
5630 for (i = 0; i < map->ptg_cnt; i++) {
5633 /* add TCAM to change list */
5634 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5636 status = ICE_ERR_NO_MEMORY;
5637 goto err_ice_add_prof_id_vsig;
5640 /* allocate the TCAM entry index */
5641 /* for entries with empty attribute masks, allocate entry from
5642 * the bottom of the TCAM table; otherwise, allocate from the
5643 * top of the table in order to give it higher priority
5645 status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
5649 goto err_ice_add_prof_id_vsig;
5652 t->tcam[i].ptg = map->ptg[i];
5653 t->tcam[i].prof_id = map->prof_id;
5654 t->tcam[i].tcam_idx = tcam_idx;
5655 t->tcam[i].attr = map->attr[i];
5656 t->tcam[i].in_use = true;
5658 p->type = ICE_TCAM_ADD;
5659 p->add_tcam_idx = true;
5660 p->prof_id = t->tcam[i].prof_id;
5661 p->ptg = t->tcam[i].ptg;
5663 p->tcam_idx = t->tcam[i].tcam_idx;
5665 /* set don't care masks for TCAM flags */
5666 ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
5668 /* write the TCAM entry */
5669 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5671 t->tcam[i].ptg, vsig, 0,
5672 t->tcam[i].attr.flags, vl_msk,
5676 goto err_ice_add_prof_id_vsig;
5680 LIST_ADD(&p->list_entry, chg);
5683 /* add profile to VSIG */
5684 vsig_idx = vsig & ICE_VSIG_IDX_M;
5686 LIST_ADD_TAIL(&t->list,
5687 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5690 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5692 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5695 err_ice_add_prof_id_vsig:
5696 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5697 /* let caller clean up the change list */
5703 * ice_create_prof_id_vsig - add a new VSIG with a single profile
5704 * @hw: pointer to the HW struct
5705 * @blk: hardware block
5706 * @vsi: the initial VSI that will be in VSIG
5707 * @hdl: the profile handle of the profile that will be added to the VSIG
5708 * @chg: the change list
5710 static enum ice_status
5711 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5712 struct LIST_HEAD_TYPE *chg)
5714 enum ice_status status;
5715 struct ice_chs_chg *p;
5718 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5720 return ICE_ERR_NO_MEMORY;
5722 new_vsig = ice_vsig_alloc(hw, blk);
5724 status = ICE_ERR_HW_TABLE;
5725 goto err_ice_create_prof_id_vsig;
5728 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5730 goto err_ice_create_prof_id_vsig;
5732 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5734 goto err_ice_create_prof_id_vsig;
5736 p->type = ICE_VSIG_ADD;
5738 p->orig_vsig = ICE_DEFAULT_VSIG;
5741 LIST_ADD(&p->list_entry, chg);
5745 err_ice_create_prof_id_vsig:
5746 /* let caller clean up the change list */
5752 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
5753 * @hw: pointer to the HW struct
5754 * @blk: hardware block
5755 * @vsi: the initial VSI that will be in VSIG
5756 * @lst: the list of profile that will be added to the VSIG
5757 * @new_vsig: return of new VSIG
5758 * @chg: the change list
5760 static enum ice_status
5761 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5762 struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
5763 struct LIST_HEAD_TYPE *chg)
5765 struct ice_vsig_prof *t;
5766 enum ice_status status;
5769 vsig = ice_vsig_alloc(hw, blk);
5771 return ICE_ERR_HW_TABLE;
5773 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5777 LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
5778 /* Reverse the order here since we are copying the list */
5779 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5791 * ice_find_prof_vsig - find a VSIG with a specific profile handle
5792 * @hw: pointer to the HW struct
5793 * @blk: hardware block
5794 * @hdl: the profile handle of the profile to search for
5795 * @vsig: returns the VSIG with the matching profile
5798 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5800 struct ice_vsig_prof *t;
5801 struct LIST_HEAD_TYPE lst;
5802 enum ice_status status;
5804 INIT_LIST_HEAD(&lst);
5806 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5810 t->profile_cookie = hdl;
5811 LIST_ADD(&t->list, &lst);
5813 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5818 return status == ICE_SUCCESS;
5822 * ice_add_vsi_flow - add VSI flow
5823 * @hw: pointer to the HW struct
5824 * @blk: hardware block
5826 * @vsig: target VSIG to include the input VSI
5828 * Calling this function will add the VSI to a given VSIG and
5829 * update the HW tables accordingly. This call can be used to
5830 * add multiple VSIs to a VSIG if we know beforehand that those
5831 * VSIs have the same characteristics of the VSIG. This will
5832 * save time in generating a new VSIG and TCAMs till a match is
5833 * found and subsequent rollback when a matching VSIG is found.
5836 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
5838 struct ice_chs_chg *tmp, *del;
5839 struct LIST_HEAD_TYPE chg;
5840 enum ice_status status;
5842 /* if target VSIG is default the move is invalid */
5843 if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
5844 return ICE_ERR_PARAM;
5846 INIT_LIST_HEAD(&chg);
5848 /* move VSI to the VSIG that matches */
5849 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5850 /* update hardware if success */
5852 status = ice_upd_prof_hw(hw, blk, &chg);
5854 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5855 LIST_DEL(&del->list_entry);
5863 * ice_add_prof_id_flow - add profile flow
5864 * @hw: pointer to the HW struct
5865 * @blk: hardware block
5866 * @vsi: the VSI to enable with the profile specified by ID
5867 * @hdl: profile handle
5869 * Calling this function will update the hardware tables to enable the
5870 * profile indicated by the ID parameter for the VSIs specified in the VSI
5871 * array. Once successfully called, the flow will be enabled.
5874 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5876 struct ice_vsig_prof *tmp1, *del1;
5877 struct LIST_HEAD_TYPE union_lst;
5878 struct ice_chs_chg *tmp, *del;
5879 struct LIST_HEAD_TYPE chg;
5880 enum ice_status status;
5883 INIT_LIST_HEAD(&union_lst);
5884 INIT_LIST_HEAD(&chg);
5887 status = ice_get_prof(hw, blk, hdl, &chg);
5891 /* determine if VSI is already part of a VSIG */
5892 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5893 if (!status && vsig) {
5901 /* make sure that there is no overlap/conflict between the new
5902 * characteristics and the existing ones; we don't support that
5905 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5906 status = ICE_ERR_ALREADY_EXISTS;
5907 goto err_ice_add_prof_id_flow;
5910 /* last VSI in the VSIG? */
5911 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5913 goto err_ice_add_prof_id_flow;
5914 only_vsi = (ref == 1);
5916 /* create a union of the current profiles and the one being
5919 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5921 goto err_ice_add_prof_id_flow;
5923 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5925 goto err_ice_add_prof_id_flow;
5927 /* search for an existing VSIG with an exact charc match */
5928 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5930 /* move VSI to the VSIG that matches */
5931 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5933 goto err_ice_add_prof_id_flow;
5935 /* VSI has been moved out of or_vsig. If the or_vsig had
5936 * only that VSI it is now empty and can be removed.
5939 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5941 goto err_ice_add_prof_id_flow;
5943 } else if (only_vsi) {
5944 /* If the original VSIG only contains one VSI, then it
5945 * will be the requesting VSI. In this case the VSI is
5946 * not sharing entries and we can simply add the new
5947 * profile to the VSIG.
5949 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5952 goto err_ice_add_prof_id_flow;
5954 /* Adjust priorities */
5955 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5957 goto err_ice_add_prof_id_flow;
5959 /* No match, so we need a new VSIG */
5960 status = ice_create_vsig_from_lst(hw, blk, vsi,
5964 goto err_ice_add_prof_id_flow;
5966 /* Adjust priorities */
5967 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5969 goto err_ice_add_prof_id_flow;
5972 /* need to find or add a VSIG */
5973 /* search for an existing VSIG with an exact charc match */
5974 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5975 /* found an exact match */
5976 /* add or move VSI to the VSIG that matches */
5977 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5979 goto err_ice_add_prof_id_flow;
5981 /* we did not find an exact match */
5982 /* we need to add a VSIG */
5983 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5986 goto err_ice_add_prof_id_flow;
5990 /* update hardware */
5992 status = ice_upd_prof_hw(hw, blk, &chg);
5994 err_ice_add_prof_id_flow:
5995 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5996 LIST_DEL(&del->list_entry);
6000 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
6001 LIST_DEL(&del1->list);
6009 * ice_rem_prof_from_list - remove a profile from list
6010 * @hw: pointer to the HW struct
6011 * @lst: list to remove the profile from
6012 * @hdl: the profile handle indicating the profile to remove
6014 static enum ice_status
6015 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
6017 struct ice_vsig_prof *ent, *tmp;
6019 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list)
6020 if (ent->profile_cookie == hdl) {
6021 LIST_DEL(&ent->list);
6026 return ICE_ERR_DOES_NOT_EXIST;
6030 * ice_rem_prof_id_flow - remove flow
6031 * @hw: pointer to the HW struct
6032 * @blk: hardware block
6033 * @vsi: the VSI from which to remove the profile specified by ID
6034 * @hdl: profile tracking handle
6036 * Calling this function will update the hardware tables to remove the
6037 * profile indicated by the ID parameter for the VSIs specified in the VSI
6038 * array. Once successfully called, the flow will be disabled.
6041 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
6043 struct ice_vsig_prof *tmp1, *del1;
6044 struct LIST_HEAD_TYPE chg, copy;
6045 struct ice_chs_chg *tmp, *del;
6046 enum ice_status status;
6049 INIT_LIST_HEAD(©);
6050 INIT_LIST_HEAD(&chg);
6052 /* determine if VSI is already part of a VSIG */
6053 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
6054 if (!status && vsig) {
6060 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
6061 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
6063 goto err_ice_rem_prof_id_flow;
6064 only_vsi = (ref == 1);
6067 /* If the original VSIG only contains one reference,
6068 * which will be the requesting VSI, then the VSI is not
6069 * sharing entries and we can simply remove the specific
6070 * characteristics from the VSIG.
6074 /* If there are no profiles left for this VSIG,
6075 * then simply remove the VSIG.
6077 status = ice_rem_vsig(hw, blk, vsig, &chg);
6079 goto err_ice_rem_prof_id_flow;
6081 status = ice_rem_prof_id_vsig(hw, blk, vsig,
6084 goto err_ice_rem_prof_id_flow;
6086 /* Adjust priorities */
6087 status = ice_adj_prof_priorities(hw, blk, vsig,
6090 goto err_ice_rem_prof_id_flow;
6094 /* Make a copy of the VSIG's list of Profiles */
6095 status = ice_get_profs_vsig(hw, blk, vsig, ©);
6097 goto err_ice_rem_prof_id_flow;
6099 /* Remove specified profile entry from the list */
6100 status = ice_rem_prof_from_list(hw, ©, hdl);
6102 goto err_ice_rem_prof_id_flow;
6104 if (LIST_EMPTY(©)) {
6105 status = ice_move_vsi(hw, blk, vsi,
6106 ICE_DEFAULT_VSIG, &chg);
6108 goto err_ice_rem_prof_id_flow;
6110 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
6112 /* found an exact match */
6113 /* add or move VSI to the VSIG that matches */
6114 /* Search for a VSIG with a matching profile
6118 /* Found match, move VSI to the matching VSIG */
6119 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6121 goto err_ice_rem_prof_id_flow;
6123 /* since no existing VSIG supports this
6124 * characteristic pattern, we need to create a
6125 * new VSIG and TCAM entries
6127 status = ice_create_vsig_from_lst(hw, blk, vsi,
6131 goto err_ice_rem_prof_id_flow;
6133 /* Adjust priorities */
6134 status = ice_adj_prof_priorities(hw, blk, vsig,
6137 goto err_ice_rem_prof_id_flow;
6141 status = ICE_ERR_DOES_NOT_EXIST;
6144 /* update hardware tables */
6146 status = ice_upd_prof_hw(hw, blk, &chg);
6148 err_ice_rem_prof_id_flow:
6149 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
6150 LIST_DEL(&del->list_entry);
6154 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) {
6155 LIST_DEL(&del1->list);