1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "iavf_status.h"
7 #include "iavf_register.h"
8 #include "iavf_adminq.h"
9 #include "iavf_prototype.h"
12 * iavf_adminq_init_regs - Initialize AdminQ registers
13 * @hw: pointer to the hardware structure
15 * This assumes the alloc_asq and alloc_arq functions have already been called
17 STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
19 /* set head and tail registers in our local struct */
20 hw->aq.asq.tail = IAVF_VF_ATQT1;
21 hw->aq.asq.head = IAVF_VF_ATQH1;
22 hw->aq.asq.len = IAVF_VF_ATQLEN1;
23 hw->aq.asq.bal = IAVF_VF_ATQBAL1;
24 hw->aq.asq.bah = IAVF_VF_ATQBAH1;
25 hw->aq.arq.tail = IAVF_VF_ARQT1;
26 hw->aq.arq.head = IAVF_VF_ARQH1;
27 hw->aq.arq.len = IAVF_VF_ARQLEN1;
28 hw->aq.arq.bal = IAVF_VF_ARQBAL1;
29 hw->aq.arq.bah = IAVF_VF_ARQBAH1;
33 * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
34 * @hw: pointer to the hardware structure
36 enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
38 enum iavf_status ret_code;
40 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
42 (hw->aq.num_asq_entries *
43 sizeof(struct iavf_aq_desc)),
44 IAVF_ADMINQ_DESC_ALIGNMENT);
48 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
49 (hw->aq.num_asq_entries *
50 sizeof(struct iavf_asq_cmd_details)));
52 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
60 * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
61 * @hw: pointer to the hardware structure
63 enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
65 enum iavf_status ret_code;
67 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
69 (hw->aq.num_arq_entries *
70 sizeof(struct iavf_aq_desc)),
71 IAVF_ADMINQ_DESC_ALIGNMENT);
77 * iavf_free_adminq_asq - Free Admin Queue send rings
78 * @hw: pointer to the hardware structure
80 * This assumes the posted send buffers have already been cleaned
83 void iavf_free_adminq_asq(struct iavf_hw *hw)
85 iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
86 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
90 * iavf_free_adminq_arq - Free Admin Queue receive rings
91 * @hw: pointer to the hardware structure
93 * This assumes the posted receive buffers have already been cleaned
96 void iavf_free_adminq_arq(struct iavf_hw *hw)
98 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
102 * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
103 * @hw: pointer to the hardware structure
105 STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
107 enum iavf_status ret_code;
108 struct iavf_aq_desc *desc;
109 struct iavf_dma_mem *bi;
112 /* We'll be allocating the buffer info memory first, then we can
113 * allocate the mapped buffers for the event processing
116 /* buffer_info structures do not need alignment */
117 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
118 (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
121 hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
123 /* allocate the mapped buffers */
124 for (i = 0; i < hw->aq.num_arq_entries; i++) {
125 bi = &hw->aq.arq.r.arq_bi[i];
126 ret_code = iavf_allocate_dma_mem(hw, bi,
129 IAVF_ADMINQ_DESC_ALIGNMENT);
131 goto unwind_alloc_arq_bufs;
133 /* now configure the descriptors for use */
134 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
136 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
137 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
138 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
140 /* This is in accordance with Admin queue design, there is no
141 * register for buffer size configuration
143 desc->datalen = CPU_TO_LE16((u16)bi->size);
145 desc->cookie_high = 0;
146 desc->cookie_low = 0;
147 desc->params.external.addr_high =
148 CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
149 desc->params.external.addr_low =
150 CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
151 desc->params.external.param0 = 0;
152 desc->params.external.param1 = 0;
158 unwind_alloc_arq_bufs:
159 /* don't try to free the one that failed... */
162 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
163 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
169 * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
170 * @hw: pointer to the hardware structure
172 STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
174 enum iavf_status ret_code;
175 struct iavf_dma_mem *bi;
178 /* No mapped memory needed yet, just the buffer info structures */
179 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
180 (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
183 hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
185 /* allocate the mapped buffers */
186 for (i = 0; i < hw->aq.num_asq_entries; i++) {
187 bi = &hw->aq.asq.r.asq_bi[i];
188 ret_code = iavf_allocate_dma_mem(hw, bi,
191 IAVF_ADMINQ_DESC_ALIGNMENT);
193 goto unwind_alloc_asq_bufs;
198 unwind_alloc_asq_bufs:
199 /* don't try to free the one that failed... */
202 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
203 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
209 * iavf_free_arq_bufs - Free receive queue buffer info elements
210 * @hw: pointer to the hardware structure
212 STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
216 /* free descriptors */
217 for (i = 0; i < hw->aq.num_arq_entries; i++)
218 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
220 /* free the descriptor memory */
221 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
223 /* free the dma header */
224 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
228 * iavf_free_asq_bufs - Free send queue buffer info elements
229 * @hw: pointer to the hardware structure
231 STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
235 /* only unmap if the address is non-NULL */
236 for (i = 0; i < hw->aq.num_asq_entries; i++)
237 if (hw->aq.asq.r.asq_bi[i].pa)
238 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
240 /* free the buffer info list */
241 iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
243 /* free the descriptor memory */
244 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
246 /* free the dma header */
247 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
251 * iavf_config_asq_regs - configure ASQ registers
252 * @hw: pointer to the hardware structure
254 * Configure base address and length registers for the transmit queue
256 STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
258 enum iavf_status ret_code = IAVF_SUCCESS;
261 /* Clear Head and Tail */
262 wr32(hw, hw->aq.asq.head, 0);
263 wr32(hw, hw->aq.asq.tail, 0);
265 /* set starting point */
266 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
267 IAVF_VF_ATQLEN1_ATQENABLE_MASK));
268 wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
269 wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
271 /* Check one register to verify that config was applied */
272 reg = rd32(hw, hw->aq.asq.bal);
273 if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
274 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
280 * iavf_config_arq_regs - ARQ register configuration
281 * @hw: pointer to the hardware structure
283 * Configure base address and length registers for the receive (event queue)
285 STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
287 enum iavf_status ret_code = IAVF_SUCCESS;
290 /* Clear Head and Tail */
291 wr32(hw, hw->aq.arq.head, 0);
292 wr32(hw, hw->aq.arq.tail, 0);
294 /* set starting point */
295 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
296 IAVF_VF_ARQLEN1_ARQENABLE_MASK));
297 wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
298 wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
300 /* Update tail in the HW to post pre-allocated buffers */
301 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
303 /* Check one register to verify that config was applied */
304 reg = rd32(hw, hw->aq.arq.bal);
305 if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
306 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
312 * iavf_init_asq - main initialization routine for ASQ
313 * @hw: pointer to the hardware structure
315 * This is the main initialization routine for the Admin Send Queue
316 * Prior to calling this function, drivers *MUST* set the following fields
317 * in the hw->aq structure:
318 * - hw->aq.num_asq_entries
319 * - hw->aq.arq_buf_size
321 * Do *NOT* hold the lock when calling this as the memory allocation routines
322 * called are not going to be atomic context safe
324 enum iavf_status iavf_init_asq(struct iavf_hw *hw)
326 enum iavf_status ret_code = IAVF_SUCCESS;
328 if (hw->aq.asq.count > 0) {
329 /* queue already initialized */
330 ret_code = IAVF_ERR_NOT_READY;
331 goto init_adminq_exit;
334 /* verify input for valid configuration */
335 if ((hw->aq.num_asq_entries == 0) ||
336 (hw->aq.asq_buf_size == 0)) {
337 ret_code = IAVF_ERR_CONFIG;
338 goto init_adminq_exit;
341 hw->aq.asq.next_to_use = 0;
342 hw->aq.asq.next_to_clean = 0;
344 /* allocate the ring memory */
345 ret_code = iavf_alloc_adminq_asq_ring(hw);
346 if (ret_code != IAVF_SUCCESS)
347 goto init_adminq_exit;
349 /* allocate buffers in the rings */
350 ret_code = iavf_alloc_asq_bufs(hw);
351 if (ret_code != IAVF_SUCCESS)
352 goto init_adminq_free_rings;
354 /* initialize base registers */
355 ret_code = iavf_config_asq_regs(hw);
356 if (ret_code != IAVF_SUCCESS)
357 goto init_config_regs;
360 hw->aq.asq.count = hw->aq.num_asq_entries;
361 goto init_adminq_exit;
363 init_adminq_free_rings:
364 iavf_free_adminq_asq(hw);
368 iavf_free_asq_bufs(hw);
375 * iavf_init_arq - initialize ARQ
376 * @hw: pointer to the hardware structure
378 * The main initialization routine for the Admin Receive (Event) Queue.
379 * Prior to calling this function, drivers *MUST* set the following fields
380 * in the hw->aq structure:
381 * - hw->aq.num_asq_entries
382 * - hw->aq.arq_buf_size
384 * Do *NOT* hold the lock when calling this as the memory allocation routines
385 * called are not going to be atomic context safe
387 enum iavf_status iavf_init_arq(struct iavf_hw *hw)
389 enum iavf_status ret_code = IAVF_SUCCESS;
391 if (hw->aq.arq.count > 0) {
392 /* queue already initialized */
393 ret_code = IAVF_ERR_NOT_READY;
394 goto init_adminq_exit;
397 /* verify input for valid configuration */
398 if ((hw->aq.num_arq_entries == 0) ||
399 (hw->aq.arq_buf_size == 0)) {
400 ret_code = IAVF_ERR_CONFIG;
401 goto init_adminq_exit;
404 hw->aq.arq.next_to_use = 0;
405 hw->aq.arq.next_to_clean = 0;
407 /* allocate the ring memory */
408 ret_code = iavf_alloc_adminq_arq_ring(hw);
409 if (ret_code != IAVF_SUCCESS)
410 goto init_adminq_exit;
412 /* allocate buffers in the rings */
413 ret_code = iavf_alloc_arq_bufs(hw);
414 if (ret_code != IAVF_SUCCESS)
415 goto init_adminq_free_rings;
417 /* initialize base registers */
418 ret_code = iavf_config_arq_regs(hw);
419 if (ret_code != IAVF_SUCCESS)
420 goto init_adminq_free_rings;
423 hw->aq.arq.count = hw->aq.num_arq_entries;
424 goto init_adminq_exit;
426 init_adminq_free_rings:
427 iavf_free_adminq_arq(hw);
434 * iavf_shutdown_asq - shutdown the ASQ
435 * @hw: pointer to the hardware structure
437 * The main shutdown routine for the Admin Send Queue
439 enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
441 enum iavf_status ret_code = IAVF_SUCCESS;
443 iavf_acquire_spinlock(&hw->aq.asq_spinlock);
445 if (hw->aq.asq.count == 0) {
446 ret_code = IAVF_ERR_NOT_READY;
447 goto shutdown_asq_out;
450 /* Stop firmware AdminQ processing */
451 wr32(hw, hw->aq.asq.head, 0);
452 wr32(hw, hw->aq.asq.tail, 0);
453 wr32(hw, hw->aq.asq.len, 0);
454 wr32(hw, hw->aq.asq.bal, 0);
455 wr32(hw, hw->aq.asq.bah, 0);
457 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
459 /* free ring buffers */
460 iavf_free_asq_bufs(hw);
463 iavf_release_spinlock(&hw->aq.asq_spinlock);
468 * iavf_shutdown_arq - shutdown ARQ
469 * @hw: pointer to the hardware structure
471 * The main shutdown routine for the Admin Receive Queue
473 enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
475 enum iavf_status ret_code = IAVF_SUCCESS;
477 iavf_acquire_spinlock(&hw->aq.arq_spinlock);
479 if (hw->aq.arq.count == 0) {
480 ret_code = IAVF_ERR_NOT_READY;
481 goto shutdown_arq_out;
484 /* Stop firmware AdminQ processing */
485 wr32(hw, hw->aq.arq.head, 0);
486 wr32(hw, hw->aq.arq.tail, 0);
487 wr32(hw, hw->aq.arq.len, 0);
488 wr32(hw, hw->aq.arq.bal, 0);
489 wr32(hw, hw->aq.arq.bah, 0);
491 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
493 /* free ring buffers */
494 iavf_free_arq_bufs(hw);
497 iavf_release_spinlock(&hw->aq.arq_spinlock);
502 * iavf_init_adminq - main initialization routine for Admin Queue
503 * @hw: pointer to the hardware structure
505 * Prior to calling this function, drivers *MUST* set the following fields
506 * in the hw->aq structure:
507 * - hw->aq.num_asq_entries
508 * - hw->aq.num_arq_entries
509 * - hw->aq.arq_buf_size
510 * - hw->aq.asq_buf_size
512 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
514 enum iavf_status ret_code;
516 /* verify input for valid configuration */
517 if ((hw->aq.num_arq_entries == 0) ||
518 (hw->aq.num_asq_entries == 0) ||
519 (hw->aq.arq_buf_size == 0) ||
520 (hw->aq.asq_buf_size == 0)) {
521 ret_code = IAVF_ERR_CONFIG;
522 goto init_adminq_exit;
524 iavf_init_spinlock(&hw->aq.asq_spinlock);
525 iavf_init_spinlock(&hw->aq.arq_spinlock);
527 /* Set up register offsets */
528 iavf_adminq_init_regs(hw);
530 /* setup ASQ command write back timeout */
531 hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
533 /* allocate the ASQ */
534 ret_code = iavf_init_asq(hw);
535 if (ret_code != IAVF_SUCCESS)
536 goto init_adminq_destroy_spinlocks;
538 /* allocate the ARQ */
539 ret_code = iavf_init_arq(hw);
540 if (ret_code != IAVF_SUCCESS)
541 goto init_adminq_free_asq;
544 goto init_adminq_exit;
546 init_adminq_free_asq:
547 iavf_shutdown_asq(hw);
548 init_adminq_destroy_spinlocks:
549 iavf_destroy_spinlock(&hw->aq.asq_spinlock);
550 iavf_destroy_spinlock(&hw->aq.arq_spinlock);
557 * iavf_shutdown_adminq - shutdown routine for the Admin Queue
558 * @hw: pointer to the hardware structure
560 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
562 enum iavf_status ret_code = IAVF_SUCCESS;
564 if (iavf_check_asq_alive(hw))
565 iavf_aq_queue_shutdown(hw, true);
567 iavf_shutdown_asq(hw);
568 iavf_shutdown_arq(hw);
569 iavf_destroy_spinlock(&hw->aq.asq_spinlock);
570 iavf_destroy_spinlock(&hw->aq.arq_spinlock);
576 * iavf_clean_asq - cleans Admin send queue
577 * @hw: pointer to the hardware structure
579 * returns the number of free desc
581 u16 iavf_clean_asq(struct iavf_hw *hw)
583 struct iavf_adminq_ring *asq = &(hw->aq.asq);
584 struct iavf_asq_cmd_details *details;
585 u16 ntc = asq->next_to_clean;
586 struct iavf_aq_desc desc_cb;
587 struct iavf_aq_desc *desc;
589 desc = IAVF_ADMINQ_DESC(*asq, ntc);
590 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
591 while (rd32(hw, hw->aq.asq.head) != ntc) {
592 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
593 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
595 if (details->callback) {
596 IAVF_ADMINQ_CALLBACK cb_func =
597 (IAVF_ADMINQ_CALLBACK)details->callback;
598 iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
600 cb_func(hw, &desc_cb);
602 iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
603 iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
605 if (ntc == asq->count)
607 desc = IAVF_ADMINQ_DESC(*asq, ntc);
608 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
611 asq->next_to_clean = ntc;
613 return IAVF_DESC_UNUSED(asq);
617 * iavf_asq_done - check if FW has processed the Admin Send Queue
618 * @hw: pointer to the hw struct
620 * Returns true if the firmware has processed all descriptors on the
621 * admin send queue. Returns false if there are still requests pending.
623 bool iavf_asq_done(struct iavf_hw *hw)
625 /* AQ designers suggest use of head for better
626 * timing reliability than DD bit
628 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
633 * iavf_asq_send_command - send command to Admin Queue
634 * @hw: pointer to the hw struct
635 * @desc: prefilled descriptor describing the command (non DMA mem)
636 * @buff: buffer to use for indirect commands
637 * @buff_size: size of buffer for indirect commands
638 * @cmd_details: pointer to command details structure
640 * This is the main send command driver routine for the Admin Queue send
641 * queue. It runs the queue, cleans the queue, etc
643 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
644 struct iavf_aq_desc *desc,
645 void *buff, /* can be NULL */
647 struct iavf_asq_cmd_details *cmd_details)
649 enum iavf_status status = IAVF_SUCCESS;
650 struct iavf_dma_mem *dma_buff = NULL;
651 struct iavf_asq_cmd_details *details;
652 struct iavf_aq_desc *desc_on_ring;
653 bool cmd_completed = false;
657 iavf_acquire_spinlock(&hw->aq.asq_spinlock);
659 hw->aq.asq_last_status = IAVF_AQ_RC_OK;
661 if (hw->aq.asq.count == 0) {
662 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
663 "AQTX: Admin queue not initialized.\n");
664 status = IAVF_ERR_QUEUE_EMPTY;
665 goto asq_send_command_error;
668 val = rd32(hw, hw->aq.asq.head);
669 if (val >= hw->aq.num_asq_entries) {
670 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
671 "AQTX: head overrun at %d\n", val);
672 status = IAVF_ERR_QUEUE_EMPTY;
673 goto asq_send_command_error;
676 details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
680 sizeof(struct iavf_asq_cmd_details),
681 IAVF_NONDMA_TO_NONDMA);
683 /* If the cmd_details are defined copy the cookie. The
684 * CPU_TO_LE32 is not needed here because the data is ignored
685 * by the FW, only used by the driver
687 if (details->cookie) {
689 CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
691 CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
694 iavf_memset(details, 0,
695 sizeof(struct iavf_asq_cmd_details),
699 /* clear requested flags and then set additional flags if defined */
700 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
701 desc->flags |= CPU_TO_LE16(details->flags_ena);
703 if (buff_size > hw->aq.asq_buf_size) {
705 IAVF_DEBUG_AQ_MESSAGE,
706 "AQTX: Invalid buffer size: %d.\n",
708 status = IAVF_ERR_INVALID_SIZE;
709 goto asq_send_command_error;
712 if (details->postpone && !details->async) {
714 IAVF_DEBUG_AQ_MESSAGE,
715 "AQTX: Async flag not set along with postpone flag");
716 status = IAVF_ERR_PARAM;
717 goto asq_send_command_error;
720 /* call clean and check queue available function to reclaim the
721 * descriptors that were processed by FW, the function returns the
722 * number of desc available
724 /* the clean function called here could be called in a separate thread
725 * in case of asynchronous completions
727 if (iavf_clean_asq(hw) == 0) {
729 IAVF_DEBUG_AQ_MESSAGE,
730 "AQTX: Error queue is full.\n");
731 status = IAVF_ERR_ADMIN_QUEUE_FULL;
732 goto asq_send_command_error;
735 /* initialize the temp desc pointer with the right desc */
736 desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
738 /* if the desc is available copy the temp desc to the right place */
739 iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
742 /* if buff is not NULL assume indirect command */
744 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
745 /* copy the user buff into the respective DMA buff */
746 iavf_memcpy(dma_buff->va, buff, buff_size,
748 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
750 /* Update the address values in the desc with the pa value
751 * for respective buffer
753 desc_on_ring->params.external.addr_high =
754 CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
755 desc_on_ring->params.external.addr_low =
756 CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
760 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
761 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
763 (hw->aq.asq.next_to_use)++;
764 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
765 hw->aq.asq.next_to_use = 0;
766 if (!details->postpone)
767 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
769 /* if cmd_details are not defined or async flag is not set,
770 * we need to wait for desc write back
772 if (!details->async && !details->postpone) {
776 /* AQ designers suggest use of head for better
777 * timing reliability than DD bit
779 if (iavf_asq_done(hw))
783 } while (total_delay < hw->aq.asq_cmd_timeout);
786 /* if ready, copy the desc back to temp */
787 if (iavf_asq_done(hw)) {
788 iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
791 iavf_memcpy(buff, dma_buff->va, buff_size,
793 retval = LE16_TO_CPU(desc->retval);
796 IAVF_DEBUG_AQ_MESSAGE,
797 "AQTX: Command completed with error 0x%X.\n",
800 /* strip off FW internal code */
803 cmd_completed = true;
804 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
805 status = IAVF_SUCCESS;
806 else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
807 status = IAVF_ERR_NOT_READY;
809 status = IAVF_ERR_ADMIN_QUEUE_ERROR;
810 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
813 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
814 "AQTX: desc and buffer writeback:\n");
815 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
817 /* save writeback aq if requested */
818 if (details->wb_desc)
819 iavf_memcpy(details->wb_desc, desc_on_ring,
820 sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
822 /* update the error if time out occurred */
823 if ((!cmd_completed) &&
824 (!details->async && !details->postpone)) {
825 if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
826 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
827 "AQTX: AQ Critical error.\n");
828 status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
830 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
831 "AQTX: Writeback timeout.\n");
832 status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
836 asq_send_command_error:
837 iavf_release_spinlock(&hw->aq.asq_spinlock);
842 * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
843 * @desc: pointer to the temp descriptor (non DMA mem)
844 * @opcode: the opcode can be used to decide which flags to turn off or on
846 * Fill the desc with default values
848 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
851 /* zero out the desc */
852 iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
854 desc->opcode = CPU_TO_LE16(opcode);
855 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
859 * iavf_clean_arq_element
860 * @hw: pointer to the hw struct
861 * @e: event info from the receive descriptor, includes any buffers
862 * @pending: number of events that could be left to process
864 * This function cleans one Admin Receive Queue element and returns
865 * the contents through e. It can also return how many events are
866 * left to process through 'pending'
868 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
869 struct iavf_arq_event_info *e,
872 enum iavf_status ret_code = IAVF_SUCCESS;
873 u16 ntc = hw->aq.arq.next_to_clean;
874 struct iavf_aq_desc *desc;
875 struct iavf_dma_mem *bi;
881 /* pre-clean the event info */
882 iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
884 /* take the lock before we start messing with the ring */
885 iavf_acquire_spinlock(&hw->aq.arq_spinlock);
887 if (hw->aq.arq.count == 0) {
888 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
889 "AQRX: Admin queue not initialized.\n");
890 ret_code = IAVF_ERR_QUEUE_EMPTY;
891 goto clean_arq_element_err;
894 /* set next_to_use to head */
895 ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
897 /* nothing to do - shouldn't need to update ring's values */
898 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
899 goto clean_arq_element_out;
902 /* now clean the next descriptor */
903 desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
906 hw->aq.arq_last_status =
907 (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
908 flags = LE16_TO_CPU(desc->flags);
909 if (flags & IAVF_AQ_FLAG_ERR) {
910 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
912 IAVF_DEBUG_AQ_MESSAGE,
913 "AQRX: Event received with error 0x%X.\n",
914 hw->aq.arq_last_status);
917 iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
919 datalen = LE16_TO_CPU(desc->datalen);
920 e->msg_len = min(datalen, e->buf_len);
921 if (e->msg_buf != NULL && (e->msg_len != 0))
922 iavf_memcpy(e->msg_buf,
923 hw->aq.arq.r.arq_bi[desc_idx].va,
924 e->msg_len, IAVF_DMA_TO_NONDMA);
926 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
927 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
928 hw->aq.arq_buf_size);
930 /* Restore the original datalen and buffer address in the desc,
931 * FW updates datalen to indicate the event message
934 bi = &hw->aq.arq.r.arq_bi[ntc];
935 iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
937 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
938 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
939 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
940 desc->datalen = CPU_TO_LE16((u16)bi->size);
941 desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
942 desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
944 /* set tail = the last cleaned desc index. */
945 wr32(hw, hw->aq.arq.tail, ntc);
946 /* ntc is updated to tail + 1 */
948 if (ntc == hw->aq.num_arq_entries)
950 hw->aq.arq.next_to_clean = ntc;
951 hw->aq.arq.next_to_use = ntu;
953 clean_arq_element_out:
954 /* Set pending if needed, unlock and return */
956 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
957 clean_arq_element_err:
958 iavf_release_spinlock(&hw->aq.arq_spinlock);