1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_common.h"
7 #define ICE_CQ_INIT_REGS(qinfo, prefix) \
9 (qinfo)->sq.head = prefix##_ATQH; \
10 (qinfo)->sq.tail = prefix##_ATQT; \
11 (qinfo)->sq.len = prefix##_ATQLEN; \
12 (qinfo)->sq.bah = prefix##_ATQBAH; \
13 (qinfo)->sq.bal = prefix##_ATQBAL; \
14 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
15 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
17 (qinfo)->rq.head = prefix##_ARQH; \
18 (qinfo)->rq.tail = prefix##_ARQT; \
19 (qinfo)->rq.len = prefix##_ARQLEN; \
20 (qinfo)->rq.bah = prefix##_ARQBAH; \
21 (qinfo)->rq.bal = prefix##_ARQBAL; \
22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
24 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
28 * ice_adminq_init_regs - Initialize AdminQ registers
29 * @hw: pointer to the hardware structure
31 * This assumes the alloc_sq and alloc_rq functions have already been called
33 static void ice_adminq_init_regs(struct ice_hw *hw)
35 struct ice_ctl_q_info *cq = &hw->adminq;
37 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
39 ICE_CQ_INIT_REGS(cq, PF_FW);
43 * ice_mailbox_init_regs - Initialize Mailbox registers
44 * @hw: pointer to the hardware structure
46 * This assumes the alloc_sq and alloc_rq functions have already been called
48 static void ice_mailbox_init_regs(struct ice_hw *hw)
50 struct ice_ctl_q_info *cq = &hw->mailboxq;
52 ICE_CQ_INIT_REGS(cq, PF_MBX);
57 * @hw: pointer to the HW struct
58 * @cq: pointer to the specific Control queue
60 * Returns true if Queue is enabled else false.
62 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
64 /* check both queue-length and queue-enable fields */
65 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
66 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
67 cq->sq.len_ena_mask)) ==
68 (cq->num_sq_entries | cq->sq.len_ena_mask);
74 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
75 * @hw: pointer to the hardware structure
76 * @cq: pointer to the specific Control queue
78 static enum ice_status
79 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
81 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
83 cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
84 if (!cq->sq.desc_buf.va)
85 return ICE_ERR_NO_MEMORY;
87 cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
88 sizeof(struct ice_sq_cd));
89 if (!cq->sq.cmd_buf) {
90 ice_free_dma_mem(hw, &cq->sq.desc_buf);
91 return ICE_ERR_NO_MEMORY;
98 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
99 * @hw: pointer to the hardware structure
100 * @cq: pointer to the specific Control queue
102 static enum ice_status
103 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
105 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
107 cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
108 if (!cq->rq.desc_buf.va)
109 return ICE_ERR_NO_MEMORY;
114 * ice_free_cq_ring - Free control queue ring
115 * @hw: pointer to the hardware structure
116 * @ring: pointer to the specific control queue ring
118 * This assumes the posted buffers have already been cleaned
121 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
123 ice_free_dma_mem(hw, &ring->desc_buf);
127 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
128 * @hw: pointer to the hardware structure
129 * @cq: pointer to the specific Control queue
131 static enum ice_status
132 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
136 /* We'll be allocating the buffer info memory first, then we can
137 * allocate the mapped buffers for the event processing
139 cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
140 sizeof(cq->rq.desc_buf));
141 if (!cq->rq.dma_head)
142 return ICE_ERR_NO_MEMORY;
143 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
145 /* allocate the mapped buffers */
146 for (i = 0; i < cq->num_rq_entries; i++) {
147 struct ice_aq_desc *desc;
148 struct ice_dma_mem *bi;
150 bi = &cq->rq.r.rq_bi[i];
151 bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
153 goto unwind_alloc_rq_bufs;
155 /* now configure the descriptors for use */
156 desc = ICE_CTL_Q_DESC(cq->rq, i);
158 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
159 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
160 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
162 /* This is in accordance with Admin queue design, there is no
163 * register for buffer size configuration
165 desc->datalen = CPU_TO_LE16(bi->size);
167 desc->cookie_high = 0;
168 desc->cookie_low = 0;
169 desc->params.generic.addr_high =
170 CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
171 desc->params.generic.addr_low =
172 CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
173 desc->params.generic.param0 = 0;
174 desc->params.generic.param1 = 0;
178 unwind_alloc_rq_bufs:
179 /* don't try to free the one that failed... */
182 ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
183 ice_free(hw, cq->rq.dma_head);
185 return ICE_ERR_NO_MEMORY;
189 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
190 * @hw: pointer to the hardware structure
191 * @cq: pointer to the specific Control queue
193 static enum ice_status
194 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
198 /* No mapped memory needed yet, just the buffer info structures */
199 cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
200 sizeof(cq->sq.desc_buf));
201 if (!cq->sq.dma_head)
202 return ICE_ERR_NO_MEMORY;
203 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
205 /* allocate the mapped buffers */
206 for (i = 0; i < cq->num_sq_entries; i++) {
207 struct ice_dma_mem *bi;
209 bi = &cq->sq.r.sq_bi[i];
210 bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
212 goto unwind_alloc_sq_bufs;
216 unwind_alloc_sq_bufs:
217 /* don't try to free the one that failed... */
220 ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
221 ice_free(hw, cq->sq.dma_head);
223 return ICE_ERR_NO_MEMORY;
226 static enum ice_status
227 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
229 /* Clear Head and Tail */
230 wr32(hw, ring->head, 0);
231 wr32(hw, ring->tail, 0);
233 /* set starting point */
234 wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
235 wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
236 wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
238 /* Check one register to verify that config was applied */
239 if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
240 return ICE_ERR_AQ_ERROR;
246 * ice_cfg_sq_regs - configure Control ATQ registers
247 * @hw: pointer to the hardware structure
248 * @cq: pointer to the specific Control queue
250 * Configure base address and length registers for the transmit queue
252 static enum ice_status
253 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
255 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
259 * ice_cfg_rq_regs - configure Control ARQ register
260 * @hw: pointer to the hardware structure
261 * @cq: pointer to the specific Control queue
263 * Configure base address and length registers for the receive (event queue)
265 static enum ice_status
266 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
268 enum ice_status status;
270 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
274 /* Update tail in the HW to post pre-allocated buffers */
275 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
281 * ice_init_sq - main initialization routine for Control ATQ
282 * @hw: pointer to the hardware structure
283 * @cq: pointer to the specific Control queue
285 * This is the main initialization routine for the Control Send Queue
286 * Prior to calling this function, the driver *MUST* set the following fields
287 * in the cq->structure:
288 * - cq->num_sq_entries
291 * Do *NOT* hold the lock when calling this as the memory allocation routines
292 * called are not going to be atomic context safe
294 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
296 enum ice_status ret_code;
298 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
300 if (cq->sq.count > 0) {
301 /* queue already initialized */
302 ret_code = ICE_ERR_NOT_READY;
303 goto init_ctrlq_exit;
306 /* verify input for valid configuration */
307 if (!cq->num_sq_entries || !cq->sq_buf_size) {
308 ret_code = ICE_ERR_CFG;
309 goto init_ctrlq_exit;
312 cq->sq.next_to_use = 0;
313 cq->sq.next_to_clean = 0;
315 /* allocate the ring memory */
316 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
318 goto init_ctrlq_exit;
320 /* allocate buffers in the rings */
321 ret_code = ice_alloc_sq_bufs(hw, cq);
323 goto init_ctrlq_free_rings;
325 /* initialize base registers */
326 ret_code = ice_cfg_sq_regs(hw, cq);
328 goto init_ctrlq_free_rings;
331 cq->sq.count = cq->num_sq_entries;
332 goto init_ctrlq_exit;
334 init_ctrlq_free_rings:
335 ice_free_cq_ring(hw, &cq->sq);
342 * ice_init_rq - initialize ARQ
343 * @hw: pointer to the hardware structure
344 * @cq: pointer to the specific Control queue
346 * The main initialization routine for the Admin Receive (Event) Queue.
347 * Prior to calling this function, the driver *MUST* set the following fields
348 * in the cq->structure:
349 * - cq->num_rq_entries
352 * Do *NOT* hold the lock when calling this as the memory allocation routines
353 * called are not going to be atomic context safe
355 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
357 enum ice_status ret_code;
359 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
361 if (cq->rq.count > 0) {
362 /* queue already initialized */
363 ret_code = ICE_ERR_NOT_READY;
364 goto init_ctrlq_exit;
367 /* verify input for valid configuration */
368 if (!cq->num_rq_entries || !cq->rq_buf_size) {
369 ret_code = ICE_ERR_CFG;
370 goto init_ctrlq_exit;
373 cq->rq.next_to_use = 0;
374 cq->rq.next_to_clean = 0;
376 /* allocate the ring memory */
377 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
379 goto init_ctrlq_exit;
381 /* allocate buffers in the rings */
382 ret_code = ice_alloc_rq_bufs(hw, cq);
384 goto init_ctrlq_free_rings;
386 /* initialize base registers */
387 ret_code = ice_cfg_rq_regs(hw, cq);
389 goto init_ctrlq_free_rings;
392 cq->rq.count = cq->num_rq_entries;
393 goto init_ctrlq_exit;
395 init_ctrlq_free_rings:
396 ice_free_cq_ring(hw, &cq->rq);
402 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
405 /* free descriptors */ \
406 for (i = 0; i < (qi)->num_##ring##_entries; i++) \
407 if ((qi)->ring.r.ring##_bi[i].pa) \
408 ice_free_dma_mem((hw), \
409 &(qi)->ring.r.ring##_bi[i]); \
410 /* free the buffer info list */ \
411 if ((qi)->ring.cmd_buf) \
412 ice_free(hw, (qi)->ring.cmd_buf); \
413 /* free DMA head */ \
414 ice_free(hw, (qi)->ring.dma_head); \
418 * ice_shutdown_sq - shutdown the Control ATQ
419 * @hw: pointer to the hardware structure
420 * @cq: pointer to the specific Control queue
422 * The main shutdown routine for the Control Transmit Queue
424 static enum ice_status
425 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
427 enum ice_status ret_code = ICE_SUCCESS;
429 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
431 ice_acquire_lock(&cq->sq_lock);
434 ret_code = ICE_ERR_NOT_READY;
435 goto shutdown_sq_out;
438 /* Stop firmware AdminQ processing */
439 wr32(hw, cq->sq.head, 0);
440 wr32(hw, cq->sq.tail, 0);
441 wr32(hw, cq->sq.len, 0);
442 wr32(hw, cq->sq.bal, 0);
443 wr32(hw, cq->sq.bah, 0);
445 cq->sq.count = 0; /* to indicate uninitialized queue */
447 /* free ring buffers and the ring itself */
448 ICE_FREE_CQ_BUFS(hw, cq, sq);
449 ice_free_cq_ring(hw, &cq->sq);
452 ice_release_lock(&cq->sq_lock);
457 * ice_aq_ver_check - Check the reported AQ API version.
458 * @hw: pointer to the hardware structure
460 * Checks if the driver should load on a given AQ API version.
462 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
464 static bool ice_aq_ver_check(struct ice_hw *hw)
466 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
467 /* Major API version is newer than expected, don't load */
468 ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
470 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
471 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
472 ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
473 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
474 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
476 /* Major API version is older than expected, log a warning */
477 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
483 * ice_shutdown_rq - shutdown Control ARQ
484 * @hw: pointer to the hardware structure
485 * @cq: pointer to the specific Control queue
487 * The main shutdown routine for the Control Receive Queue
489 static enum ice_status
490 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
492 enum ice_status ret_code = ICE_SUCCESS;
494 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
496 ice_acquire_lock(&cq->rq_lock);
499 ret_code = ICE_ERR_NOT_READY;
500 goto shutdown_rq_out;
503 /* Stop Control Queue processing */
504 wr32(hw, cq->rq.head, 0);
505 wr32(hw, cq->rq.tail, 0);
506 wr32(hw, cq->rq.len, 0);
507 wr32(hw, cq->rq.bal, 0);
508 wr32(hw, cq->rq.bah, 0);
510 /* set rq.count to 0 to indicate uninitialized queue */
513 /* free ring buffers and the ring itself */
514 ICE_FREE_CQ_BUFS(hw, cq, rq);
515 ice_free_cq_ring(hw, &cq->rq);
518 ice_release_lock(&cq->rq_lock);
523 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
524 * @hw: pointer to the hardware structure
526 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
528 struct ice_ctl_q_info *cq = &hw->adminq;
529 enum ice_status status;
531 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
533 status = ice_aq_get_fw_ver(hw, NULL);
535 goto init_ctrlq_free_rq;
537 if (!ice_aq_ver_check(hw)) {
538 status = ICE_ERR_FW_API_VER;
539 goto init_ctrlq_free_rq;
545 ice_shutdown_rq(hw, cq);
546 ice_shutdown_sq(hw, cq);
551 * ice_init_ctrlq - main initialization routine for any control Queue
552 * @hw: pointer to the hardware structure
553 * @q_type: specific Control queue type
555 * Prior to calling this function, the driver *MUST* set the following fields
556 * in the cq->structure:
557 * - cq->num_sq_entries
558 * - cq->num_rq_entries
562 * NOTE: this function does not initialize the controlq locks
564 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
566 struct ice_ctl_q_info *cq;
567 enum ice_status ret_code;
569 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
572 case ICE_CTL_Q_ADMIN:
573 ice_adminq_init_regs(hw);
576 case ICE_CTL_Q_MAILBOX:
577 ice_mailbox_init_regs(hw);
581 return ICE_ERR_PARAM;
585 /* verify input for valid configuration */
586 if (!cq->num_rq_entries || !cq->num_sq_entries ||
587 !cq->rq_buf_size || !cq->sq_buf_size) {
591 /* setup SQ command write back timeout */
592 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
594 /* allocate the ATQ */
595 ret_code = ice_init_sq(hw, cq);
599 /* allocate the ARQ */
600 ret_code = ice_init_rq(hw, cq);
602 goto init_ctrlq_free_sq;
608 ice_shutdown_sq(hw, cq);
613 * ice_init_all_ctrlq - main initialization routine for all control queues
614 * @hw: pointer to the hardware structure
616 * Prior to calling this function, the driver MUST* set the following fields
617 * in the cq->structure for all control queues:
618 * - cq->num_sq_entries
619 * - cq->num_rq_entries
623 * NOTE: this function does not initialize the controlq locks.
625 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
627 enum ice_status ret_code;
629 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
631 /* Init FW admin queue */
632 ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
636 ret_code = ice_init_check_adminq(hw);
639 /* Init Mailbox queue */
640 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
644 * ice_init_ctrlq_locks - Initialize locks for a control queue
645 * @cq: pointer to the control queue
647 * Initializes the send and receive queue locks for a given control queue.
649 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
651 ice_init_lock(&cq->sq_lock);
652 ice_init_lock(&cq->rq_lock);
656 * ice_create_all_ctrlq - main initialization routine for all control queues
657 * @hw: pointer to the hardware structure
659 * Prior to calling this function, the driver *MUST* set the following fields
660 * in the cq->structure for all control queues:
661 * - cq->num_sq_entries
662 * - cq->num_rq_entries
666 * This function creates all the control queue locks and then calls
667 * ice_init_all_ctrlq. It should be called once during driver load. If the
668 * driver needs to re-initialize control queues at run time it should call
669 * ice_init_all_ctrlq instead.
671 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
673 ice_init_ctrlq_locks(&hw->adminq);
674 ice_init_ctrlq_locks(&hw->mailboxq);
676 return ice_init_all_ctrlq(hw);
680 * ice_shutdown_ctrlq - shutdown routine for any control queue
681 * @hw: pointer to the hardware structure
682 * @q_type: specific Control queue type
684 * NOTE: this function does not destroy the control queue locks.
686 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
688 struct ice_ctl_q_info *cq;
690 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
693 case ICE_CTL_Q_ADMIN:
695 if (ice_check_sq_alive(hw, cq))
696 ice_aq_q_shutdown(hw, true);
698 case ICE_CTL_Q_MAILBOX:
705 ice_shutdown_sq(hw, cq);
706 ice_shutdown_rq(hw, cq);
710 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
711 * @hw: pointer to the hardware structure
713 * NOTE: this function does not destroy the control queue locks. The driver
714 * may call this at runtime to shutdown and later restart control queues, such
715 * as in response to a reset event.
717 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
719 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
720 /* Shutdown FW admin queue */
721 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
722 /* Shutdown PF-VF Mailbox */
723 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
727 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
728 * @cq: pointer to the control queue
730 * Destroys the send and receive queue locks for a given control queue.
733 ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
735 ice_destroy_lock(&cq->sq_lock);
736 ice_destroy_lock(&cq->rq_lock);
740 * ice_destroy_all_ctrlq - exit routine for all control queues
741 * @hw: pointer to the hardware structure
743 * This function shuts down all the control queues and then destroys the
744 * control queue locks. It should be called once during driver unload. The
745 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
746 * reinitialize control queues, such as in response to a reset event.
748 void ice_destroy_all_ctrlq(struct ice_hw *hw)
750 /* shut down all the control queues first */
751 ice_shutdown_all_ctrlq(hw);
753 ice_destroy_ctrlq_locks(&hw->adminq);
754 ice_destroy_ctrlq_locks(&hw->mailboxq);
758 * ice_clean_sq - cleans Admin send queue (ATQ)
759 * @hw: pointer to the hardware structure
760 * @cq: pointer to the specific Control queue
762 * returns the number of free desc
764 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
766 struct ice_ctl_q_ring *sq = &cq->sq;
767 u16 ntc = sq->next_to_clean;
768 struct ice_sq_cd *details;
769 struct ice_aq_desc *desc;
771 desc = ICE_CTL_Q_DESC(*sq, ntc);
772 details = ICE_CTL_Q_DETAILS(*sq, ntc);
774 while (rd32(hw, cq->sq.head) != ntc) {
775 ice_debug(hw, ICE_DBG_AQ_MSG,
776 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
777 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
778 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
780 if (ntc == sq->count)
782 desc = ICE_CTL_Q_DESC(*sq, ntc);
783 details = ICE_CTL_Q_DETAILS(*sq, ntc);
786 sq->next_to_clean = ntc;
788 return ICE_CTL_Q_DESC_UNUSED(sq);
793 * @hw: pointer to the hardware structure
794 * @desc: pointer to control queue descriptor
795 * @buf: pointer to command buffer
796 * @buf_len: max length of buf
798 * Dumps debug log about control command with descriptor contents.
800 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
802 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
805 if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
811 datalen = LE16_TO_CPU(cq_desc->datalen);
812 flags = LE16_TO_CPU(cq_desc->flags);
814 ice_debug(hw, ICE_DBG_AQ_DESC,
815 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
816 LE16_TO_CPU(cq_desc->opcode), flags, datalen,
817 LE16_TO_CPU(cq_desc->retval));
818 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
819 LE32_TO_CPU(cq_desc->cookie_high),
820 LE32_TO_CPU(cq_desc->cookie_low));
821 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
822 LE32_TO_CPU(cq_desc->params.generic.param0),
823 LE32_TO_CPU(cq_desc->params.generic.param1));
824 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
825 LE32_TO_CPU(cq_desc->params.generic.addr_high),
826 LE32_TO_CPU(cq_desc->params.generic.addr_low));
827 /* Dump buffer iff 1) one exists and 2) is either a response indicated
828 * by the DD and/or CMP flag set or a command with the RD flag set.
830 if (buf && cq_desc->datalen != 0 &&
831 (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
832 flags & ICE_AQ_FLAG_RD)) {
833 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
834 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
835 min(buf_len, datalen));
840 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
841 * @hw: pointer to the HW struct
842 * @cq: pointer to the specific Control queue
844 * Returns true if the firmware has processed all descriptors on the
845 * admin send queue. Returns false if there are still requests pending.
847 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
849 /* AQ designers suggest use of head for better
850 * timing reliability than DD bit
852 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
856 * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
857 * @hw: pointer to the HW struct
858 * @cq: pointer to the specific Control queue
859 * @desc: prefilled descriptor describing the command (non DMA mem)
860 * @buf: buffer to use for indirect commands (or NULL for direct commands)
861 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
862 * @cd: pointer to command details structure
864 * This is the main send command routine for the ATQ. It runs the queue,
865 * cleans the queue, etc.
867 static enum ice_status
868 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
869 struct ice_aq_desc *desc, void *buf, u16 buf_size,
870 struct ice_sq_cd *cd)
872 struct ice_dma_mem *dma_buf = NULL;
873 struct ice_aq_desc *desc_on_ring;
874 bool cmd_completed = false;
875 enum ice_status status = ICE_SUCCESS;
876 struct ice_sq_cd *details;
881 /* if reset is in progress return a soft error */
882 if (hw->reset_ongoing)
883 return ICE_ERR_RESET_ONGOING;
885 cq->sq_last_status = ICE_AQ_RC_OK;
888 ice_debug(hw, ICE_DBG_AQ_MSG,
889 "Control Send queue not initialized.\n");
890 status = ICE_ERR_AQ_EMPTY;
891 goto sq_send_command_error;
894 if ((buf && !buf_size) || (!buf && buf_size)) {
895 status = ICE_ERR_PARAM;
896 goto sq_send_command_error;
900 if (buf_size > cq->sq_buf_size) {
901 ice_debug(hw, ICE_DBG_AQ_MSG,
902 "Invalid buffer size for Control Send queue: %d.\n",
904 status = ICE_ERR_INVAL_SIZE;
905 goto sq_send_command_error;
908 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
909 if (buf_size > ICE_AQ_LG_BUF)
910 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
913 val = rd32(hw, cq->sq.head);
914 if (val >= cq->num_sq_entries) {
915 ice_debug(hw, ICE_DBG_AQ_MSG,
916 "head overrun at %d in the Control Send Queue ring\n",
918 status = ICE_ERR_AQ_EMPTY;
919 goto sq_send_command_error;
922 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
926 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
928 /* Call clean and check queue available function to reclaim the
929 * descriptors that were processed by FW/MBX; the function returns the
930 * number of desc available. The clean function called here could be
931 * called in a separate thread in case of asynchronous completions.
933 if (ice_clean_sq(hw, cq) == 0) {
934 ice_debug(hw, ICE_DBG_AQ_MSG,
935 "Error: Control Send Queue is full.\n");
936 status = ICE_ERR_AQ_FULL;
937 goto sq_send_command_error;
940 /* initialize the temp desc pointer with the right desc */
941 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
943 /* if the desc is available copy the temp desc to the right place */
944 ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
947 /* if buf is not NULL assume indirect command */
949 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
950 /* copy the user buf into the respective DMA buf */
951 ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
952 desc_on_ring->datalen = CPU_TO_LE16(buf_size);
954 /* Update the address values in the desc with the pa value
955 * for respective buffer
957 desc_on_ring->params.generic.addr_high =
958 CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
959 desc_on_ring->params.generic.addr_low =
960 CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
963 /* Debug desc and buffer */
964 ice_debug(hw, ICE_DBG_AQ_DESC,
965 "ATQ: Control Send queue desc and buffer:\n");
967 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
969 (cq->sq.next_to_use)++;
970 if (cq->sq.next_to_use == cq->sq.count)
971 cq->sq.next_to_use = 0;
972 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
975 if (ice_sq_done(hw, cq))
978 ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
980 } while (total_delay < cq->sq_cmd_timeout);
982 /* if ready, copy the desc back to temp */
983 if (ice_sq_done(hw, cq)) {
984 ice_memcpy(desc, desc_on_ring, sizeof(*desc),
987 /* get returned length to copy */
988 u16 copy_size = LE16_TO_CPU(desc->datalen);
990 if (copy_size > buf_size) {
991 ice_debug(hw, ICE_DBG_AQ_MSG,
992 "Return len %d > than buf len %d\n",
993 copy_size, buf_size);
994 status = ICE_ERR_AQ_ERROR;
996 ice_memcpy(buf, dma_buf->va, copy_size,
1000 retval = LE16_TO_CPU(desc->retval);
1002 ice_debug(hw, ICE_DBG_AQ_MSG,
1003 "Control Send Queue command 0x%04X completed with error 0x%X\n",
1004 LE16_TO_CPU(desc->opcode),
1007 /* strip off FW internal code */
1010 cmd_completed = true;
1011 if (!status && retval != ICE_AQ_RC_OK)
1012 status = ICE_ERR_AQ_ERROR;
1013 cq->sq_last_status = (enum ice_aq_err)retval;
1016 ice_debug(hw, ICE_DBG_AQ_MSG,
1017 "ATQ: desc and buffer writeback:\n");
1019 ice_debug_cq(hw, (void *)desc, buf, buf_size);
1021 /* save writeback AQ if requested */
1022 if (details->wb_desc)
1023 ice_memcpy(details->wb_desc, desc_on_ring,
1024 sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1026 /* update the error if time out occurred */
1027 if (!cmd_completed) {
1028 ice_debug(hw, ICE_DBG_AQ_MSG,
1029 "Control Send Queue Writeback timeout.\n");
1030 status = ICE_ERR_AQ_TIMEOUT;
1033 sq_send_command_error:
1038 * ice_sq_send_cmd - send command to Control Queue (ATQ)
1039 * @hw: pointer to the HW struct
1040 * @cq: pointer to the specific Control queue
1041 * @desc: prefilled descriptor describing the command (non DMA mem)
1042 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1043 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1044 * @cd: pointer to command details structure
1046 * This is the main send command routine for the ATQ. It runs the queue,
1047 * cleans the queue, etc.
1050 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1051 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1052 struct ice_sq_cd *cd)
1054 enum ice_status status = ICE_SUCCESS;
1056 /* if reset is in progress return a soft error */
1057 if (hw->reset_ongoing)
1058 return ICE_ERR_RESET_ONGOING;
1060 ice_acquire_lock(&cq->sq_lock);
1061 status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1062 ice_release_lock(&cq->sq_lock);
1068 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1069 * @desc: pointer to the temp descriptor (non DMA mem)
1070 * @opcode: the opcode can be used to decide which flags to turn off or on
1072 * Fill the desc with default values
1074 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1076 /* zero out the desc */
1077 ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1078 desc->opcode = CPU_TO_LE16(opcode);
1079 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1084 * @hw: pointer to the HW struct
1085 * @cq: pointer to the specific Control queue
1086 * @e: event info from the receive descriptor, includes any buffers
1087 * @pending: number of events that could be left to process
1089 * This function cleans one Admin Receive Queue element and returns
1090 * the contents through e. It can also return how many events are
1091 * left to process through 'pending'.
1094 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1095 struct ice_rq_event_info *e, u16 *pending)
1097 u16 ntc = cq->rq.next_to_clean;
1098 enum ice_status ret_code = ICE_SUCCESS;
1099 struct ice_aq_desc *desc;
1100 struct ice_dma_mem *bi;
1106 /* pre-clean the event info */
1107 ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1109 /* take the lock before we start messing with the ring */
1110 ice_acquire_lock(&cq->rq_lock);
1112 if (!cq->rq.count) {
1113 ice_debug(hw, ICE_DBG_AQ_MSG,
1114 "Control Receive queue not initialized.\n");
1115 ret_code = ICE_ERR_AQ_EMPTY;
1116 goto clean_rq_elem_err;
1119 /* set next_to_use to head */
1120 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1123 /* nothing to do - shouldn't need to update ring's values */
1124 ret_code = ICE_ERR_AQ_NO_WORK;
1125 goto clean_rq_elem_out;
1128 /* now clean the next descriptor */
1129 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1132 cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1133 flags = LE16_TO_CPU(desc->flags);
1134 if (flags & ICE_AQ_FLAG_ERR) {
1135 ret_code = ICE_ERR_AQ_ERROR;
1136 ice_debug(hw, ICE_DBG_AQ_MSG,
1137 "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1138 LE16_TO_CPU(desc->opcode),
1139 cq->rq_last_status);
1141 ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1142 datalen = LE16_TO_CPU(desc->datalen);
1143 e->msg_len = min(datalen, e->buf_len);
1144 if (e->msg_buf && e->msg_len)
1145 ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1146 e->msg_len, ICE_DMA_TO_NONDMA);
1148 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1150 ice_debug_cq(hw, (void *)desc, e->msg_buf,
1153 /* Restore the original datalen and buffer address in the desc,
1154 * FW updates datalen to indicate the event message size
1156 bi = &cq->rq.r.rq_bi[ntc];
1157 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1159 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1160 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1161 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1162 desc->datalen = CPU_TO_LE16(bi->size);
1163 desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1164 desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1166 /* set tail = the last cleaned desc index. */
1167 wr32(hw, cq->rq.tail, ntc);
1168 /* ntc is updated to tail + 1 */
1170 if (ntc == cq->num_rq_entries)
1172 cq->rq.next_to_clean = ntc;
1173 cq->rq.next_to_use = ntu;
1176 /* Set pending if needed, unlock and return */
1178 /* re-read HW head to calculate actual pending messages */
1179 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1180 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1183 ice_release_lock(&cq->rq_lock);