1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_common.h"
8 #define ICE_CQ_INIT_REGS(qinfo, prefix) \
10 (qinfo)->sq.head = prefix##_ATQH; \
11 (qinfo)->sq.tail = prefix##_ATQT; \
12 (qinfo)->sq.len = prefix##_ATQLEN; \
13 (qinfo)->sq.bah = prefix##_ATQBAH; \
14 (qinfo)->sq.bal = prefix##_ATQBAL; \
15 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
16 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
17 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
18 (qinfo)->rq.head = prefix##_ARQH; \
19 (qinfo)->rq.tail = prefix##_ARQT; \
20 (qinfo)->rq.len = prefix##_ARQLEN; \
21 (qinfo)->rq.bah = prefix##_ARQBAH; \
22 (qinfo)->rq.bal = prefix##_ARQBAL; \
23 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
24 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
29 * ice_adminq_init_regs - Initialize AdminQ registers
30 * @hw: pointer to the hardware structure
32 * This assumes the alloc_sq and alloc_rq functions have already been called
34 static void ice_adminq_init_regs(struct ice_hw *hw)
36 struct ice_ctl_q_info *cq = &hw->adminq;
38 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
40 ICE_CQ_INIT_REGS(cq, PF_FW);
44 * ice_mailbox_init_regs - Initialize Mailbox registers
45 * @hw: pointer to the hardware structure
47 * This assumes the alloc_sq and alloc_rq functions have already been called
49 static void ice_mailbox_init_regs(struct ice_hw *hw)
51 struct ice_ctl_q_info *cq = &hw->mailboxq;
53 ICE_CQ_INIT_REGS(cq, PF_MBX);
59 * @hw: pointer to the HW struct
60 * @cq: pointer to the specific Control queue
62 * Returns true if Queue is enabled else false.
64 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
66 /* check both queue-length and queue-enable fields */
67 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
68 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
69 cq->sq.len_ena_mask)) ==
70 (cq->num_sq_entries | cq->sq.len_ena_mask);
76 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
77 * @hw: pointer to the hardware structure
78 * @cq: pointer to the specific Control queue
80 static enum ice_status
81 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
83 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
85 cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
86 if (!cq->sq.desc_buf.va)
87 return ICE_ERR_NO_MEMORY;
89 cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
90 sizeof(struct ice_sq_cd));
91 if (!cq->sq.cmd_buf) {
92 ice_free_dma_mem(hw, &cq->sq.desc_buf);
93 return ICE_ERR_NO_MEMORY;
100 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
101 * @hw: pointer to the hardware structure
102 * @cq: pointer to the specific Control queue
104 static enum ice_status
105 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
107 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
109 cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
110 if (!cq->rq.desc_buf.va)
111 return ICE_ERR_NO_MEMORY;
116 * ice_free_cq_ring - Free control queue ring
117 * @hw: pointer to the hardware structure
118 * @ring: pointer to the specific control queue ring
120 * This assumes the posted buffers have already been cleaned
123 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
125 ice_free_dma_mem(hw, &ring->desc_buf);
129 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
130 * @hw: pointer to the hardware structure
131 * @cq: pointer to the specific Control queue
133 static enum ice_status
134 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
138 /* We'll be allocating the buffer info memory first, then we can
139 * allocate the mapped buffers for the event processing
141 cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
142 sizeof(cq->rq.desc_buf));
143 if (!cq->rq.dma_head)
144 return ICE_ERR_NO_MEMORY;
145 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
147 /* allocate the mapped buffers */
148 for (i = 0; i < cq->num_rq_entries; i++) {
149 struct ice_aq_desc *desc;
150 struct ice_dma_mem *bi;
152 bi = &cq->rq.r.rq_bi[i];
153 bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
155 goto unwind_alloc_rq_bufs;
157 /* now configure the descriptors for use */
158 desc = ICE_CTL_Q_DESC(cq->rq, i);
160 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
161 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
162 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
164 /* This is in accordance with Admin queue design, there is no
165 * register for buffer size configuration
167 desc->datalen = CPU_TO_LE16(bi->size);
169 desc->cookie_high = 0;
170 desc->cookie_low = 0;
171 desc->params.generic.addr_high =
172 CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
173 desc->params.generic.addr_low =
174 CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
175 desc->params.generic.param0 = 0;
176 desc->params.generic.param1 = 0;
180 unwind_alloc_rq_bufs:
181 /* don't try to free the one that failed... */
184 ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
185 ice_free(hw, cq->rq.dma_head);
187 return ICE_ERR_NO_MEMORY;
191 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
192 * @hw: pointer to the hardware structure
193 * @cq: pointer to the specific Control queue
195 static enum ice_status
196 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
200 /* No mapped memory needed yet, just the buffer info structures */
201 cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
202 sizeof(cq->sq.desc_buf));
203 if (!cq->sq.dma_head)
204 return ICE_ERR_NO_MEMORY;
205 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
207 /* allocate the mapped buffers */
208 for (i = 0; i < cq->num_sq_entries; i++) {
209 struct ice_dma_mem *bi;
211 bi = &cq->sq.r.sq_bi[i];
212 bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
214 goto unwind_alloc_sq_bufs;
218 unwind_alloc_sq_bufs:
219 /* don't try to free the one that failed... */
222 ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
223 ice_free(hw, cq->sq.dma_head);
225 return ICE_ERR_NO_MEMORY;
228 static enum ice_status
229 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
231 /* Clear Head and Tail */
232 wr32(hw, ring->head, 0);
233 wr32(hw, ring->tail, 0);
235 /* set starting point */
236 wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
237 wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
238 wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
240 /* Check one register to verify that config was applied */
241 if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
242 return ICE_ERR_AQ_ERROR;
248 * ice_cfg_sq_regs - configure Control ATQ registers
249 * @hw: pointer to the hardware structure
250 * @cq: pointer to the specific Control queue
252 * Configure base address and length registers for the transmit queue
254 static enum ice_status
255 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
257 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
261 * ice_cfg_rq_regs - configure Control ARQ register
262 * @hw: pointer to the hardware structure
263 * @cq: pointer to the specific Control queue
265 * Configure base address and length registers for the receive (event queue)
267 static enum ice_status
268 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
270 enum ice_status status;
272 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
276 /* Update tail in the HW to post pre-allocated buffers */
277 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
283 * ice_init_sq - main initialization routine for Control ATQ
284 * @hw: pointer to the hardware structure
285 * @cq: pointer to the specific Control queue
287 * This is the main initialization routine for the Control Send Queue
288 * Prior to calling this function, the driver *MUST* set the following fields
289 * in the cq->structure:
290 * - cq->num_sq_entries
293 * Do *NOT* hold the lock when calling this as the memory allocation routines
294 * called are not going to be atomic context safe
296 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
298 enum ice_status ret_code;
300 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
302 if (cq->sq.count > 0) {
303 /* queue already initialized */
304 ret_code = ICE_ERR_NOT_READY;
305 goto init_ctrlq_exit;
308 /* verify input for valid configuration */
309 if (!cq->num_sq_entries || !cq->sq_buf_size) {
310 ret_code = ICE_ERR_CFG;
311 goto init_ctrlq_exit;
314 cq->sq.next_to_use = 0;
315 cq->sq.next_to_clean = 0;
317 /* allocate the ring memory */
318 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
320 goto init_ctrlq_exit;
322 /* allocate buffers in the rings */
323 ret_code = ice_alloc_sq_bufs(hw, cq);
325 goto init_ctrlq_free_rings;
327 /* initialize base registers */
328 ret_code = ice_cfg_sq_regs(hw, cq);
330 goto init_ctrlq_free_rings;
333 cq->sq.count = cq->num_sq_entries;
334 goto init_ctrlq_exit;
336 init_ctrlq_free_rings:
337 ice_free_cq_ring(hw, &cq->sq);
344 * ice_init_rq - initialize ARQ
345 * @hw: pointer to the hardware structure
346 * @cq: pointer to the specific Control queue
348 * The main initialization routine for the Admin Receive (Event) Queue.
349 * Prior to calling this function, the driver *MUST* set the following fields
350 * in the cq->structure:
351 * - cq->num_rq_entries
354 * Do *NOT* hold the lock when calling this as the memory allocation routines
355 * called are not going to be atomic context safe
357 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
359 enum ice_status ret_code;
361 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
363 if (cq->rq.count > 0) {
364 /* queue already initialized */
365 ret_code = ICE_ERR_NOT_READY;
366 goto init_ctrlq_exit;
369 /* verify input for valid configuration */
370 if (!cq->num_rq_entries || !cq->rq_buf_size) {
371 ret_code = ICE_ERR_CFG;
372 goto init_ctrlq_exit;
375 cq->rq.next_to_use = 0;
376 cq->rq.next_to_clean = 0;
378 /* allocate the ring memory */
379 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
381 goto init_ctrlq_exit;
383 /* allocate buffers in the rings */
384 ret_code = ice_alloc_rq_bufs(hw, cq);
386 goto init_ctrlq_free_rings;
388 /* initialize base registers */
389 ret_code = ice_cfg_rq_regs(hw, cq);
391 goto init_ctrlq_free_rings;
394 cq->rq.count = cq->num_rq_entries;
395 goto init_ctrlq_exit;
397 init_ctrlq_free_rings:
398 ice_free_cq_ring(hw, &cq->rq);
404 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
407 /* free descriptors */ \
408 for (i = 0; i < (qi)->num_##ring##_entries; i++) \
409 if ((qi)->ring.r.ring##_bi[i].pa) \
410 ice_free_dma_mem((hw), \
411 &(qi)->ring.r.ring##_bi[i]); \
412 /* free the buffer info list */ \
413 if ((qi)->ring.cmd_buf) \
414 ice_free(hw, (qi)->ring.cmd_buf); \
415 /* free dma head */ \
416 ice_free(hw, (qi)->ring.dma_head); \
420 * ice_shutdown_sq - shutdown the Control ATQ
421 * @hw: pointer to the hardware structure
422 * @cq: pointer to the specific Control queue
424 * The main shutdown routine for the Control Transmit Queue
426 static enum ice_status
427 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
429 enum ice_status ret_code = ICE_SUCCESS;
431 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
433 ice_acquire_lock(&cq->sq_lock);
436 ret_code = ICE_ERR_NOT_READY;
437 goto shutdown_sq_out;
440 /* Stop firmware AdminQ processing */
441 wr32(hw, cq->sq.head, 0);
442 wr32(hw, cq->sq.tail, 0);
443 wr32(hw, cq->sq.len, 0);
444 wr32(hw, cq->sq.bal, 0);
445 wr32(hw, cq->sq.bah, 0);
447 cq->sq.count = 0; /* to indicate uninitialized queue */
449 /* free ring buffers and the ring itself */
450 ICE_FREE_CQ_BUFS(hw, cq, sq);
451 ice_free_cq_ring(hw, &cq->sq);
454 ice_release_lock(&cq->sq_lock);
459 * ice_aq_ver_check - Check the reported AQ API version.
460 * @hw: pointer to the hardware structure
462 * Checks if the driver should load on a given AQ API version.
464 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
466 static bool ice_aq_ver_check(struct ice_hw *hw)
468 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
469 /* Major API version is newer than expected, don't load */
470 ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
472 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
473 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
474 ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
475 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
476 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
478 /* Major API version is older than expected, log a warning */
479 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
485 * ice_shutdown_rq - shutdown Control ARQ
486 * @hw: pointer to the hardware structure
487 * @cq: pointer to the specific Control queue
489 * The main shutdown routine for the Control Receive Queue
491 static enum ice_status
492 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
494 enum ice_status ret_code = ICE_SUCCESS;
496 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
498 ice_acquire_lock(&cq->rq_lock);
501 ret_code = ICE_ERR_NOT_READY;
502 goto shutdown_rq_out;
505 /* Stop Control Queue processing */
506 wr32(hw, cq->rq.head, 0);
507 wr32(hw, cq->rq.tail, 0);
508 wr32(hw, cq->rq.len, 0);
509 wr32(hw, cq->rq.bal, 0);
510 wr32(hw, cq->rq.bah, 0);
512 /* set rq.count to 0 to indicate uninitialized queue */
515 /* free ring buffers and the ring itself */
516 ICE_FREE_CQ_BUFS(hw, cq, rq);
517 ice_free_cq_ring(hw, &cq->rq);
520 ice_release_lock(&cq->rq_lock);
526 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
527 * @hw: pointer to the hardware structure
529 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
531 struct ice_ctl_q_info *cq = &hw->adminq;
532 enum ice_status status;
534 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
537 status = ice_aq_get_fw_ver(hw, NULL);
539 goto init_ctrlq_free_rq;
542 if (!ice_aq_ver_check(hw)) {
543 status = ICE_ERR_FW_API_VER;
544 goto init_ctrlq_free_rq;
550 ice_shutdown_rq(hw, cq);
551 ice_shutdown_sq(hw, cq);
556 * ice_init_ctrlq - main initialization routine for any control Queue
557 * @hw: pointer to the hardware structure
558 * @q_type: specific Control queue type
560 * Prior to calling this function, the driver *MUST* set the following fields
561 * in the cq->structure:
562 * - cq->num_sq_entries
563 * - cq->num_rq_entries
567 * NOTE: this function does not initialize the controlq locks
569 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
571 struct ice_ctl_q_info *cq;
572 enum ice_status ret_code;
574 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
577 case ICE_CTL_Q_ADMIN:
578 ice_adminq_init_regs(hw);
581 case ICE_CTL_Q_MAILBOX:
582 ice_mailbox_init_regs(hw);
586 return ICE_ERR_PARAM;
590 /* verify input for valid configuration */
591 if (!cq->num_rq_entries || !cq->num_sq_entries ||
592 !cq->rq_buf_size || !cq->sq_buf_size) {
596 /* setup SQ command write back timeout */
597 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
599 /* allocate the ATQ */
600 ret_code = ice_init_sq(hw, cq);
604 /* allocate the ARQ */
605 ret_code = ice_init_rq(hw, cq);
607 goto init_ctrlq_free_sq;
613 ice_shutdown_sq(hw, cq);
618 * ice_init_all_ctrlq - main initialization routine for all control queues
619 * @hw: pointer to the hardware structure
621 * Prior to calling this function, the driver MUST* set the following fields
622 * in the cq->structure for all control queues:
623 * - cq->num_sq_entries
624 * - cq->num_rq_entries
628 * NOTE: this function does not initialize the controlq locks.
630 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
632 enum ice_status ret_code;
634 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
637 /* Init FW admin queue */
638 ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
642 ret_code = ice_init_check_adminq(hw);
645 /* Init Mailbox queue */
646 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
650 * ice_init_ctrlq_locks - Initialize locks for a control queue
651 * @cq: pointer to the control queue
653 * Initializes the send and receive queue locks for a given control queue.
655 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
657 ice_init_lock(&cq->sq_lock);
658 ice_init_lock(&cq->rq_lock);
662 * ice_create_all_ctrlq - main initialization routine for all control queues
663 * @hw: pointer to the hardware structure
665 * Prior to calling this function, the driver *MUST* set the following fields
666 * in the cq->structure for all control queues:
667 * - cq->num_sq_entries
668 * - cq->num_rq_entries
672 * This function creates all the control queue locks and then calls
673 * ice_init_all_ctrlq. It should be called once during driver load. If the
674 * driver needs to re-initialize control queues at run time it should call
675 * ice_init_all_ctrlq instead.
677 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
679 ice_init_ctrlq_locks(&hw->adminq);
680 ice_init_ctrlq_locks(&hw->mailboxq);
682 return ice_init_all_ctrlq(hw);
686 * ice_shutdown_ctrlq - shutdown routine for any control queue
687 * @hw: pointer to the hardware structure
688 * @q_type: specific Control queue type
690 * NOTE: this function does not destroy the control queue locks.
692 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
694 struct ice_ctl_q_info *cq;
696 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
699 case ICE_CTL_Q_ADMIN:
701 if (ice_check_sq_alive(hw, cq))
702 ice_aq_q_shutdown(hw, true);
704 case ICE_CTL_Q_MAILBOX:
711 ice_shutdown_sq(hw, cq);
712 ice_shutdown_rq(hw, cq);
716 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
717 * @hw: pointer to the hardware structure
719 * NOTE: this function does not destroy the control queue locks. The driver
720 * may call this at runtime to shutdown and later restart control queues, such
721 * as in response to a reset event.
723 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
725 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
726 /* Shutdown FW admin queue */
727 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
728 /* Shutdown PF-VF Mailbox */
729 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
733 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
734 * @cq: pointer to the control queue
736 * Destroys the send and receive queue locks for a given control queue.
739 ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
741 ice_destroy_lock(&cq->sq_lock);
742 ice_destroy_lock(&cq->rq_lock);
746 * ice_destroy_all_ctrlq - exit routine for all control queues
747 * @hw: pointer to the hardware structure
749 * This function shuts down all the control queues and then destroys the
750 * control queue locks. It should be called once during driver unload. The
751 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
752 * reinitialize control queues, such as in response to a reset event.
754 void ice_destroy_all_ctrlq(struct ice_hw *hw)
756 /* shut down all the control queues first */
757 ice_shutdown_all_ctrlq(hw);
759 ice_destroy_ctrlq_locks(&hw->adminq);
760 ice_destroy_ctrlq_locks(&hw->mailboxq);
764 * ice_clean_sq - cleans Admin send queue (ATQ)
765 * @hw: pointer to the hardware structure
766 * @cq: pointer to the specific Control queue
768 * returns the number of free desc
770 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
772 struct ice_ctl_q_ring *sq = &cq->sq;
773 u16 ntc = sq->next_to_clean;
774 struct ice_sq_cd *details;
775 struct ice_aq_desc *desc;
777 desc = ICE_CTL_Q_DESC(*sq, ntc);
778 details = ICE_CTL_Q_DETAILS(*sq, ntc);
780 while (rd32(hw, cq->sq.head) != ntc) {
781 ice_debug(hw, ICE_DBG_AQ_MSG,
782 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
783 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
784 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
786 if (ntc == sq->count)
788 desc = ICE_CTL_Q_DESC(*sq, ntc);
789 details = ICE_CTL_Q_DETAILS(*sq, ntc);
792 sq->next_to_clean = ntc;
794 return ICE_CTL_Q_DESC_UNUSED(sq);
799 * @hw: pointer to the hardware structure
800 * @desc: pointer to control queue descriptor
801 * @buf: pointer to command buffer
802 * @buf_len: max length of buf
804 * Dumps debug log about control command with descriptor contents.
806 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
808 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
811 if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
817 datalen = LE16_TO_CPU(cq_desc->datalen);
818 flags = LE16_TO_CPU(cq_desc->flags);
820 ice_debug(hw, ICE_DBG_AQ_DESC,
821 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
822 LE16_TO_CPU(cq_desc->opcode), flags, datalen,
823 LE16_TO_CPU(cq_desc->retval));
824 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
825 LE32_TO_CPU(cq_desc->cookie_high),
826 LE32_TO_CPU(cq_desc->cookie_low));
827 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
828 LE32_TO_CPU(cq_desc->params.generic.param0),
829 LE32_TO_CPU(cq_desc->params.generic.param1));
830 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
831 LE32_TO_CPU(cq_desc->params.generic.addr_high),
832 LE32_TO_CPU(cq_desc->params.generic.addr_low));
833 /* Dump buffer iff 1) one exists and 2) is either a response indicated
834 * by the DD and/or CMP flag set or a command with the RD flag set.
836 if (buf && cq_desc->datalen != 0 &&
837 (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
838 flags & ICE_AQ_FLAG_RD)) {
839 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
840 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
841 min(buf_len, datalen));
846 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
847 * @hw: pointer to the HW struct
848 * @cq: pointer to the specific Control queue
850 * Returns true if the firmware has processed all descriptors on the
851 * admin send queue. Returns false if there are still requests pending.
853 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
855 /* AQ designers suggest use of head for better
856 * timing reliability than DD bit
858 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
862 * ice_sq_send_cmd - send command to Control Queue (ATQ)
863 * @hw: pointer to the HW struct
864 * @cq: pointer to the specific Control queue
865 * @desc: prefilled descriptor describing the command (non DMA mem)
866 * @buf: buffer to use for indirect commands (or NULL for direct commands)
867 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
868 * @cd: pointer to command details structure
870 * This is the main send command routine for the ATQ. It runs the queue,
871 * cleans the queue, etc.
874 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
875 struct ice_aq_desc *desc, void *buf, u16 buf_size,
876 struct ice_sq_cd *cd)
878 struct ice_dma_mem *dma_buf = NULL;
879 struct ice_aq_desc *desc_on_ring;
880 bool cmd_completed = false;
881 enum ice_status status = ICE_SUCCESS;
882 struct ice_sq_cd *details;
887 /* if reset is in progress return a soft error */
888 if (hw->reset_ongoing)
889 return ICE_ERR_RESET_ONGOING;
890 ice_acquire_lock(&cq->sq_lock);
892 cq->sq_last_status = ICE_AQ_RC_OK;
895 ice_debug(hw, ICE_DBG_AQ_MSG,
896 "Control Send queue not initialized.\n");
897 status = ICE_ERR_AQ_EMPTY;
898 goto sq_send_command_error;
901 if ((buf && !buf_size) || (!buf && buf_size)) {
902 status = ICE_ERR_PARAM;
903 goto sq_send_command_error;
907 if (buf_size > cq->sq_buf_size) {
908 ice_debug(hw, ICE_DBG_AQ_MSG,
909 "Invalid buffer size for Control Send queue: %d.\n",
911 status = ICE_ERR_INVAL_SIZE;
912 goto sq_send_command_error;
915 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
916 if (buf_size > ICE_AQ_LG_BUF)
917 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
920 val = rd32(hw, cq->sq.head);
921 if (val >= cq->num_sq_entries) {
922 ice_debug(hw, ICE_DBG_AQ_MSG,
923 "head overrun at %d in the Control Send Queue ring\n",
925 status = ICE_ERR_AQ_EMPTY;
926 goto sq_send_command_error;
929 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
933 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
935 /* Call clean and check queue available function to reclaim the
936 * descriptors that were processed by FW/MBX; the function returns the
937 * number of desc available. The clean function called here could be
938 * called in a separate thread in case of asynchronous completions.
940 if (ice_clean_sq(hw, cq) == 0) {
941 ice_debug(hw, ICE_DBG_AQ_MSG,
942 "Error: Control Send Queue is full.\n");
943 status = ICE_ERR_AQ_FULL;
944 goto sq_send_command_error;
947 /* initialize the temp desc pointer with the right desc */
948 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
950 /* if the desc is available copy the temp desc to the right place */
951 ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
954 /* if buf is not NULL assume indirect command */
956 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
957 /* copy the user buf into the respective DMA buf */
958 ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
959 desc_on_ring->datalen = CPU_TO_LE16(buf_size);
961 /* Update the address values in the desc with the pa value
962 * for respective buffer
964 desc_on_ring->params.generic.addr_high =
965 CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
966 desc_on_ring->params.generic.addr_low =
967 CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
970 /* Debug desc and buffer */
971 ice_debug(hw, ICE_DBG_AQ_MSG,
972 "ATQ: Control Send queue desc and buffer:\n");
974 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
977 (cq->sq.next_to_use)++;
978 if (cq->sq.next_to_use == cq->sq.count)
979 cq->sq.next_to_use = 0;
980 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
983 if (ice_sq_done(hw, cq))
986 ice_msec_delay(1, false);
988 } while (total_delay < cq->sq_cmd_timeout);
990 /* if ready, copy the desc back to temp */
991 if (ice_sq_done(hw, cq)) {
992 ice_memcpy(desc, desc_on_ring, sizeof(*desc),
995 /* get returned length to copy */
996 u16 copy_size = LE16_TO_CPU(desc->datalen);
998 if (copy_size > buf_size) {
999 ice_debug(hw, ICE_DBG_AQ_MSG,
1000 "Return len %d > than buf len %d\n",
1001 copy_size, buf_size);
1002 status = ICE_ERR_AQ_ERROR;
1004 ice_memcpy(buf, dma_buf->va, copy_size,
1008 retval = LE16_TO_CPU(desc->retval);
1010 ice_debug(hw, ICE_DBG_AQ_MSG,
1011 "Control Send Queue command completed with error 0x%x\n",
1014 /* strip off FW internal code */
1017 cmd_completed = true;
1018 if (!status && retval != ICE_AQ_RC_OK)
1019 status = ICE_ERR_AQ_ERROR;
1020 cq->sq_last_status = (enum ice_aq_err)retval;
1023 ice_debug(hw, ICE_DBG_AQ_MSG,
1024 "ATQ: desc and buffer writeback:\n");
1026 ice_debug_cq(hw, (void *)desc, buf, buf_size);
1029 /* save writeback AQ if requested */
1030 if (details->wb_desc)
1031 ice_memcpy(details->wb_desc, desc_on_ring,
1032 sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1034 /* update the error if time out occurred */
1035 if (!cmd_completed) {
1036 ice_debug(hw, ICE_DBG_AQ_MSG,
1037 "Control Send Queue Writeback timeout.\n");
1038 status = ICE_ERR_AQ_TIMEOUT;
1041 sq_send_command_error:
1042 ice_release_lock(&cq->sq_lock);
1047 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1048 * @desc: pointer to the temp descriptor (non DMA mem)
1049 * @opcode: the opcode can be used to decide which flags to turn off or on
1051 * Fill the desc with default values
1053 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1055 /* zero out the desc */
1056 ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1057 desc->opcode = CPU_TO_LE16(opcode);
1058 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1063 * @hw: pointer to the HW struct
1064 * @cq: pointer to the specific Control queue
1065 * @e: event info from the receive descriptor, includes any buffers
1066 * @pending: number of events that could be left to process
1068 * This function cleans one Admin Receive Queue element and returns
1069 * the contents through e. It can also return how many events are
1070 * left to process through 'pending'.
1073 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1074 struct ice_rq_event_info *e, u16 *pending)
1076 u16 ntc = cq->rq.next_to_clean;
1077 enum ice_status ret_code = ICE_SUCCESS;
1078 struct ice_aq_desc *desc;
1079 struct ice_dma_mem *bi;
1085 /* pre-clean the event info */
1086 ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1088 /* take the lock before we start messing with the ring */
1089 ice_acquire_lock(&cq->rq_lock);
1091 if (!cq->rq.count) {
1092 ice_debug(hw, ICE_DBG_AQ_MSG,
1093 "Control Receive queue not initialized.\n");
1094 ret_code = ICE_ERR_AQ_EMPTY;
1095 goto clean_rq_elem_err;
1098 /* set next_to_use to head */
1099 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1102 /* nothing to do - shouldn't need to update ring's values */
1103 ret_code = ICE_ERR_AQ_NO_WORK;
1104 goto clean_rq_elem_out;
1107 /* now clean the next descriptor */
1108 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1111 cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1112 flags = LE16_TO_CPU(desc->flags);
1113 if (flags & ICE_AQ_FLAG_ERR) {
1114 ret_code = ICE_ERR_AQ_ERROR;
1115 ice_debug(hw, ICE_DBG_AQ_MSG,
1116 "Control Receive Queue Event received with error 0x%x\n",
1117 cq->rq_last_status);
1119 ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1120 datalen = LE16_TO_CPU(desc->datalen);
1121 e->msg_len = min(datalen, e->buf_len);
1122 if (e->msg_buf && e->msg_len)
1123 ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1124 e->msg_len, ICE_DMA_TO_NONDMA);
1126 ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
1128 ice_debug_cq(hw, (void *)desc, e->msg_buf,
1132 /* Restore the original datalen and buffer address in the desc,
1133 * FW updates datalen to indicate the event message size
1135 bi = &cq->rq.r.rq_bi[ntc];
1136 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1138 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1139 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1140 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1141 desc->datalen = CPU_TO_LE16(bi->size);
1142 desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1143 desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1145 /* set tail = the last cleaned desc index. */
1146 wr32(hw, cq->rq.tail, ntc);
1147 /* ntc is updated to tail + 1 */
1149 if (ntc == cq->num_rq_entries)
1151 cq->rq.next_to_clean = ntc;
1152 cq->rq.next_to_use = ntu;
1155 /* Set pending if needed, unlock and return */
1157 /* re-read HW head to calculate actual pending messages */
1158 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1159 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1162 ice_release_lock(&cq->rq_lock);