{
struct ice_ctl_q_info *cq = &hw->adminq;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
ICE_CQ_INIT_REGS(cq, PF_FW);
}
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
- * Configure base address and length registers for the receive (event q)
+ * Configure base address and length registers for the receive (event queue)
*/
static enum ice_status
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @cq: pointer to the specific Control queue
*
* This is the main initialization routine for the Control Send Queue
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_sq_entries
* - cq->sq_buf_size
{
enum ice_status ret_code;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
if (cq->sq.count > 0) {
/* queue already initialized */
ret_code = ICE_ERR_NOT_READY;
* @cq: pointer to the specific Control queue
*
* The main initialization routine for the Admin Receive (Event) Queue.
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_rq_entries
* - cq->rq_buf_size
{
enum ice_status ret_code;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
if (cq->rq.count > 0) {
/* queue already initialized */
ret_code = ICE_ERR_NOT_READY;
{
enum ice_status ret_code = ICE_SUCCESS;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
ice_acquire_lock(&cq->sq_lock);
if (!cq->sq.count) {
{
enum ice_status ret_code = ICE_SUCCESS;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
ice_acquire_lock(&cq->rq_lock);
if (!cq->rq.count) {
struct ice_ctl_q_info *cq = &hw->adminq;
enum ice_status status;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
status = ice_aq_get_fw_ver(hw, NULL);
if (status)
return ICE_SUCCESS;
init_ctrlq_free_rq:
- if (cq->rq.count) {
- ice_shutdown_rq(hw, cq);
- ice_destroy_lock(&cq->rq_lock);
- }
- if (cq->sq.count) {
- ice_shutdown_sq(hw, cq);
- ice_destroy_lock(&cq->sq_lock);
- }
+ ice_shutdown_rq(hw, cq);
+ ice_shutdown_sq(hw, cq);
return status;
}
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
*
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks
*/
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
struct ice_ctl_q_info *cq;
enum ice_status ret_code;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
switch (q_type) {
case ICE_CTL_Q_ADMIN:
ice_adminq_init_regs(hw);
!cq->rq_buf_size || !cq->sq_buf_size) {
return ICE_ERR_CFG;
}
- ice_init_lock(&cq->sq_lock);
- ice_init_lock(&cq->rq_lock);
/* setup SQ command write back timeout */
cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
/* allocate the ATQ */
ret_code = ice_init_sq(hw, cq);
if (ret_code)
- goto init_ctrlq_destroy_locks;
+ return ret_code;
/* allocate the ARQ */
ret_code = ice_init_rq(hw, cq);
init_ctrlq_free_sq:
ice_shutdown_sq(hw, cq);
-init_ctrlq_destroy_locks:
- ice_destroy_lock(&cq->sq_lock);
- ice_destroy_lock(&cq->rq_lock);
return ret_code;
}
* ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver MUST* set the following fields
* in the cq->structure for all control queues:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks.
*/
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{
enum ice_status ret_code;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
/* Init FW admin queue */
ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
+/**
+ * ice_init_ctrlq_locks - Initialize locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Initializes the send and receive queue locks for a given control queue.
+ */
+static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ ice_init_lock(&cq->sq_lock);
+ ice_init_lock(&cq->rq_lock);
+}
+
+/**
+ * ice_create_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * This function creates all the control queue locks and then calls
+ * ice_init_all_ctrlq. It should be called once during driver load. If the
+ * driver needs to re-initialize control queues at run time it should call
+ * ice_init_all_ctrlq instead.
+ */
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+{
+ ice_init_ctrlq_locks(&hw->adminq);
+ ice_init_ctrlq_locks(&hw->mailboxq);
+
+ return ice_init_all_ctrlq(hw);
+}
+
/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
+ *
+ * NOTE: this function does not destroy the control queue locks.
*/
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
struct ice_ctl_q_info *cq;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
switch (q_type) {
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
return;
}
- if (cq->sq.count) {
- ice_shutdown_sq(hw, cq);
- ice_destroy_lock(&cq->sq_lock);
- }
- if (cq->rq.count) {
- ice_shutdown_rq(hw, cq);
- ice_destroy_lock(&cq->rq_lock);
- }
+ ice_shutdown_sq(hw, cq);
+ ice_shutdown_rq(hw, cq);
}
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
+ *
+ * NOTE: this function does not destroy the control queue locks. The driver
+ * may call this at runtime to shutdown and later restart control queues, such
+ * as in response to a reset event.
*/
void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
/* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
+/**
+ * ice_destroy_ctrlq_locks - Destroy locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Destroys the send and receive queue locks for a given control queue.
+ */
+static void
+ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ ice_destroy_lock(&cq->sq_lock);
+ ice_destroy_lock(&cq->rq_lock);
+}
+
+/**
+ * ice_destroy_all_ctrlq - exit routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * This function shuts down all the control queues and then destroys the
+ * control queue locks. It should be called once during driver unload. The
+ * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
+ * reinitialize control queues, such as in response to a reset event.
+ */
+void ice_destroy_all_ctrlq(struct ice_hw *hw)
+{
+ /* shut down all the control queues first */
+ ice_shutdown_all_ctrlq(hw);
+
+ ice_destroy_ctrlq_locks(&hw->adminq);
+ ice_destroy_ctrlq_locks(&hw->mailboxq);
+}
+
/**
* ice_clean_sq - cleans Admin send queue (ATQ)
* @hw: pointer to the hardware structure
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
struct ice_sq_cd *details;
-#if 0
- struct ice_aq_desc desc_cb;
-#endif
struct ice_aq_desc *desc;
desc = ICE_CTL_Q_DESC(*sq, ntc);
while (rd32(hw, cq->sq.head) != ntc) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
-#if 0
- if (details->callback) {
- ICE_CTL_Q_CALLBACK cb_func =
- (ICE_CTL_Q_CALLBACK)details->callback;
- ice_memcpy(&desc_cb, desc, sizeof(desc_cb),
- ICE_DMA_TO_DMA);
- cb_func(hw, &desc_cb);
- }
-#endif
ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
ntc++;
details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
if (cd)
*details = *cd;
-#if 0
- /* FIXME: if/when this block gets enabled (when the #if 0
- * is removed), add braces to both branches of the surrounding
- * conditional expression. The braces have been removed to
- * prevent checkpatch complaining.
- */
-
- /* If the command details are defined copy the cookie. The
- * CPU_TO_LE32 is not needed here because the data is ignored
- * by the FW, only used by the driver
- */
- if (details->cookie) {
- desc->cookie_high =
- CPU_TO_LE32(ICE_HI_DWORD(details->cookie));
- desc->cookie_low =
- CPU_TO_LE32(ICE_LO_DWORD(details->cookie));
- }
-#endif
else
ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
-#if 0
- /* clear requested flags and then set additional flags if defined */
- desc->flags &= ~CPU_TO_LE16(details->flags_dis);
- desc->flags |= CPU_TO_LE16(details->flags_ena);
-
- if (details->postpone && !details->async) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Async flag not set along with postpone flag\n");
- status = ICE_ERR_PARAM;
- goto sq_send_command_error;
- }
-#endif
/* Call clean and check queue available function to reclaim the
* descriptors that were processed by FW/MBX; the function returns the
(cq->sq.next_to_use)++;
if (cq->sq.next_to_use == cq->sq.count)
cq->sq.next_to_use = 0;
-#if 0
- /* FIXME - handle this case? */
- if (!details->postpone)
-#endif
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
-#if 0
- /* if command details are not defined or async flag is not set,
- * we need to wait for desc write back
- */
- if (!details->async && !details->postpone) {
- /* FIXME - handle this case? */
- }
-#endif
do {
if (ice_sq_done(hw, cq))
break;
/* update the error if time out occurred */
if (!cmd_completed) {
-#if 0
- (!details->async && !details->postpone)) {
-#endif
ice_debug(hw, ICE_DBG_AQ_MSG,
"Control Send Queue Writeback timeout.\n");
status = ICE_ERR_AQ_TIMEOUT;
cq->rq.next_to_clean = ntc;
cq->rq.next_to_use = ntu;
-#if 0
- ice_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
-#endif
clean_rq_elem_out:
/* Set pending if needed, unlock and return */
if (pending) {