i40e_release_spinlock(&hw->aq.arq_spinlock);
return ret_code;
}
+#ifdef PF_DRIVER
+
+/**
+ * i40e_resume_aq - resume AQ processing from 0
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
+#endif /* PF_DRIVER */
/**
* i40e_init_adminq - main initialization routine for Admin Queue
**/
enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
{
- enum i40e_status_code ret_code;
#ifdef PF_DRIVER
- u16 eetrack_lo, eetrack_hi;
u16 cfg_ptr, oem_hi, oem_lo;
+ u16 eetrack_lo, eetrack_hi;
+#endif
+ enum i40e_status_code ret_code;
+#ifdef PF_DRIVER
int retry = 0;
#endif
+
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
-
- /* initialize spin locks */
i40e_init_spinlock(&hw->aq.asq_spinlock);
i40e_init_spinlock(&hw->aq.arq_spinlock);
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
-
- /* destroy the spinlocks */
i40e_destroy_spinlock(&hw->aq.asq_spinlock);
i40e_destroy_spinlock(&hw->aq.arq_spinlock);
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
-
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
+#ifdef VF_DRIVER
bool i40e_asq_done(struct i40e_hw *hw)
+#else
+STATIC bool i40e_asq_done(struct i40e_hw *hw)
+#endif
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
if (i40e_asq_done(hw))
break;
- /* ugh! delay while spin_lock */
i40e_msec_delay(1);
total_delay++;
} while (total_delay < hw->aq.asq_cmd_timeout);
#ifdef PF_DRIVER
i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
-#endif
+#endif /* PF_DRIVER */
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
return ret_code;
}
-void i40e_resume_aq(struct i40e_hw *hw)
-{
- /* Registers are reset after PF reset */
- hw->aq.asq.next_to_use = 0;
- hw->aq.asq.next_to_clean = 0;
-
- i40e_config_asq_regs(hw);
-
- hw->aq.arq.next_to_use = 0;
- hw->aq.arq.next_to_clean = 0;
-
- i40e_config_arq_regs(hw);
-}
}
/* general information */
-#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
#ifdef I40E_ESS_SUPPORT
#define I40E_ASQ_CMD_TIMEOUT_ESS 50000 /* msecs */
#endif
u8 up_enable_bits;
u8 sched_reserved;
/* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress table */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
u8 cmd_reserved[8];
/* last 32 bytes are written by FW */
__le16 qs_handle[8];
u8 reserved1[28];
};
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+I40E_CHECK_STRUCT_LEN(0x40,
+ i40e_aqc_configure_switching_comp_ets_bw_limit_data);
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
- __le16 element_id; /* Feature/field ID */
+ __le16 element_id; /* Feature/field ID */
__le16 element_id_msw; /* MSWord of field ID */
__le32 address_high;
__le32 address_low;
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
-#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE 0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
/* OEM Post Update (indirect 0x0720)
* no command data struct used
*/
- struct i40e_aqc_nvm_oem_post_update {
+struct i40e_aqc_nvm_oem_post_update {
#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
u8 sel_data;
u8 reserved[7];
*/
struct i40e_aqc_lldp_stop_start_specific_agent {
#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
-#define I40E_AQC_START_SPECIFIC_AGENT_MASK (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
u8 command;
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
struct i40e_aqc_add_udp_tunnel_completion {
- __le16 udp_port;
+ __le16 udp_port;
u8 filter_entry_index;
u8 multiple_pfs;
#define I40E_AQC_SINGLE_PF 0x0
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- /* It can take upto 15 secs for GRST steady state */
- grst_del = grst_del * 20; /* bump it to 16 secs max to be safe */
+ grst_del = grst_del * 20;
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
}
/**
- * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port
* @hw: pointer to the hw struct
* @flags: component flags
* @mac_seid: uplink seid (MAC SEID)
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
+#ifdef VF_DRIVER
bool i40e_asq_done(struct i40e_hw *hw);
+#endif
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
-void i40e_resume_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
#ifdef X722_SUPPORT