1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013 - 2015 Intel Corporation
5 #include "iavf_status.h"
7 #include "iavf_register.h"
8 #include "iavf_adminq.h"
9 #include "iavf_prototype.h"
12 * iavf_adminq_init_regs - Initialize AdminQ registers
13 * @hw: pointer to the hardware structure
15 * This assumes the alloc_asq and alloc_arq functions have already been called
17 STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
19 /* set head and tail registers in our local struct */
21 hw->aq.asq.tail = IAVF_ATQT1;
22 hw->aq.asq.head = IAVF_ATQH1;
23 hw->aq.asq.len = IAVF_ATQLEN1;
24 hw->aq.asq.bal = IAVF_ATQBAL1;
25 hw->aq.asq.bah = IAVF_ATQBAH1;
26 hw->aq.arq.tail = IAVF_ARQT1;
27 hw->aq.arq.head = IAVF_ARQH1;
28 hw->aq.arq.len = IAVF_ARQLEN1;
29 hw->aq.arq.bal = IAVF_ARQBAL1;
30 hw->aq.arq.bah = IAVF_ARQBAH1;
35 * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
36 * @hw: pointer to the hardware structure
38 enum iavf_status_code iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
40 enum iavf_status_code ret_code;
42 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
44 (hw->aq.num_asq_entries *
45 sizeof(struct iavf_aq_desc)),
46 IAVF_ADMINQ_DESC_ALIGNMENT);
50 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
51 (hw->aq.num_asq_entries *
52 sizeof(struct iavf_asq_cmd_details)));
54 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
62 * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
63 * @hw: pointer to the hardware structure
65 enum iavf_status_code iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
67 enum iavf_status_code ret_code;
69 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
71 (hw->aq.num_arq_entries *
72 sizeof(struct iavf_aq_desc)),
73 IAVF_ADMINQ_DESC_ALIGNMENT);
79 * iavf_free_adminq_asq - Free Admin Queue send rings
80 * @hw: pointer to the hardware structure
82 * This assumes the posted send buffers have already been cleaned
85 void iavf_free_adminq_asq(struct iavf_hw *hw)
87 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
91 * iavf_free_adminq_arq - Free Admin Queue receive rings
92 * @hw: pointer to the hardware structure
94 * This assumes the posted receive buffers have already been cleaned
97 void iavf_free_adminq_arq(struct iavf_hw *hw)
99 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
103 * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
104 * @hw: pointer to the hardware structure
106 STATIC enum iavf_status_code iavf_alloc_arq_bufs(struct iavf_hw *hw)
108 enum iavf_status_code ret_code;
109 struct iavf_aq_desc *desc;
110 struct iavf_dma_mem *bi;
113 /* We'll be allocating the buffer info memory first, then we can
114 * allocate the mapped buffers for the event processing
117 /* buffer_info structures do not need alignment */
118 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
119 (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
122 hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
124 /* allocate the mapped buffers */
125 for (i = 0; i < hw->aq.num_arq_entries; i++) {
126 bi = &hw->aq.arq.r.arq_bi[i];
127 ret_code = iavf_allocate_dma_mem(hw, bi,
130 IAVF_ADMINQ_DESC_ALIGNMENT);
132 goto unwind_alloc_arq_bufs;
134 /* now configure the descriptors for use */
135 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
137 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
138 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
139 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
141 /* This is in accordance with Admin queue design, there is no
142 * register for buffer size configuration
144 desc->datalen = CPU_TO_LE16((u16)bi->size);
146 desc->cookie_high = 0;
147 desc->cookie_low = 0;
148 desc->params.external.addr_high =
149 CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
150 desc->params.external.addr_low =
151 CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
152 desc->params.external.param0 = 0;
153 desc->params.external.param1 = 0;
159 unwind_alloc_arq_bufs:
160 /* don't try to free the one that failed... */
163 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
164 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
170 * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
171 * @hw: pointer to the hardware structure
173 STATIC enum iavf_status_code iavf_alloc_asq_bufs(struct iavf_hw *hw)
175 enum iavf_status_code ret_code;
176 struct iavf_dma_mem *bi;
179 /* No mapped memory needed yet, just the buffer info structures */
180 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
181 (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
184 hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
186 /* allocate the mapped buffers */
187 for (i = 0; i < hw->aq.num_asq_entries; i++) {
188 bi = &hw->aq.asq.r.asq_bi[i];
189 ret_code = iavf_allocate_dma_mem(hw, bi,
192 IAVF_ADMINQ_DESC_ALIGNMENT);
194 goto unwind_alloc_asq_bufs;
199 unwind_alloc_asq_bufs:
200 /* don't try to free the one that failed... */
203 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
204 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
210 * iavf_free_arq_bufs - Free receive queue buffer info elements
211 * @hw: pointer to the hardware structure
213 STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
217 /* free descriptors */
218 for (i = 0; i < hw->aq.num_arq_entries; i++)
219 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
221 /* free the descriptor memory */
222 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
224 /* free the dma header */
225 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
229 * iavf_free_asq_bufs - Free send queue buffer info elements
230 * @hw: pointer to the hardware structure
232 STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
236 /* only unmap if the address is non-NULL */
237 for (i = 0; i < hw->aq.num_asq_entries; i++)
238 if (hw->aq.asq.r.asq_bi[i].pa)
239 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
241 /* free the buffer info list */
242 iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
244 /* free the descriptor memory */
245 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
247 /* free the dma header */
248 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
252 * iavf_config_asq_regs - configure ASQ registers
253 * @hw: pointer to the hardware structure
255 * Configure base address and length registers for the transmit queue
257 STATIC enum iavf_status_code iavf_config_asq_regs(struct iavf_hw *hw)
259 enum iavf_status_code ret_code = IAVF_SUCCESS;
262 /* Clear Head and Tail */
263 wr32(hw, hw->aq.asq.head, 0);
264 wr32(hw, hw->aq.asq.tail, 0);
266 /* set starting point */
269 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
270 IAVF_ATQLEN1_ATQENABLE_MASK));
272 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
273 IAVF_ATQLEN1_ATQENABLE_MASK));
274 #endif /* INTEGRATED_VF */
275 wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
276 wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
278 /* Check one register to verify that config was applied */
279 reg = rd32(hw, hw->aq.asq.bal);
280 if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
281 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
287 * iavf_config_arq_regs - ARQ register configuration
288 * @hw: pointer to the hardware structure
290 * Configure base address and length registers for the receive (event queue)
292 STATIC enum iavf_status_code iavf_config_arq_regs(struct iavf_hw *hw)
294 enum iavf_status_code ret_code = IAVF_SUCCESS;
297 /* Clear Head and Tail */
298 wr32(hw, hw->aq.arq.head, 0);
299 wr32(hw, hw->aq.arq.tail, 0);
301 /* set starting point */
304 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
305 IAVF_ARQLEN1_ARQENABLE_MASK));
307 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
308 IAVF_ARQLEN1_ARQENABLE_MASK));
309 #endif /* INTEGRATED_VF */
310 wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
311 wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
313 /* Update tail in the HW to post pre-allocated buffers */
314 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
316 /* Check one register to verify that config was applied */
317 reg = rd32(hw, hw->aq.arq.bal);
318 if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
319 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
325 * iavf_init_asq - main initialization routine for ASQ
326 * @hw: pointer to the hardware structure
328 * This is the main initialization routine for the Admin Send Queue
329 * Prior to calling this function, drivers *MUST* set the following fields
330 * in the hw->aq structure:
331 * - hw->aq.num_asq_entries
332 * - hw->aq.arq_buf_size
334 * Do *NOT* hold the lock when calling this as the memory allocation routines
335 * called are not going to be atomic context safe
337 enum iavf_status_code iavf_init_asq(struct iavf_hw *hw)
339 enum iavf_status_code ret_code = IAVF_SUCCESS;
341 if (hw->aq.asq.count > 0) {
342 /* queue already initialized */
343 ret_code = IAVF_ERR_NOT_READY;
344 goto init_adminq_exit;
347 /* verify input for valid configuration */
348 if ((hw->aq.num_asq_entries == 0) ||
349 (hw->aq.asq_buf_size == 0)) {
350 ret_code = IAVF_ERR_CONFIG;
351 goto init_adminq_exit;
354 hw->aq.asq.next_to_use = 0;
355 hw->aq.asq.next_to_clean = 0;
357 /* allocate the ring memory */
358 ret_code = iavf_alloc_adminq_asq_ring(hw);
359 if (ret_code != IAVF_SUCCESS)
360 goto init_adminq_exit;
362 /* allocate buffers in the rings */
363 ret_code = iavf_alloc_asq_bufs(hw);
364 if (ret_code != IAVF_SUCCESS)
365 goto init_adminq_free_rings;
367 /* initialize base registers */
368 ret_code = iavf_config_asq_regs(hw);
369 if (ret_code != IAVF_SUCCESS)
370 goto init_adminq_free_rings;
373 hw->aq.asq.count = hw->aq.num_asq_entries;
374 goto init_adminq_exit;
376 init_adminq_free_rings:
377 iavf_free_adminq_asq(hw);
384 * iavf_init_arq - initialize ARQ
385 * @hw: pointer to the hardware structure
387 * The main initialization routine for the Admin Receive (Event) Queue.
388 * Prior to calling this function, drivers *MUST* set the following fields
389 * in the hw->aq structure:
390 * - hw->aq.num_asq_entries
391 * - hw->aq.arq_buf_size
393 * Do *NOT* hold the lock when calling this as the memory allocation routines
394 * called are not going to be atomic context safe
396 enum iavf_status_code iavf_init_arq(struct iavf_hw *hw)
398 enum iavf_status_code ret_code = IAVF_SUCCESS;
400 if (hw->aq.arq.count > 0) {
401 /* queue already initialized */
402 ret_code = IAVF_ERR_NOT_READY;
403 goto init_adminq_exit;
406 /* verify input for valid configuration */
407 if ((hw->aq.num_arq_entries == 0) ||
408 (hw->aq.arq_buf_size == 0)) {
409 ret_code = IAVF_ERR_CONFIG;
410 goto init_adminq_exit;
413 hw->aq.arq.next_to_use = 0;
414 hw->aq.arq.next_to_clean = 0;
416 /* allocate the ring memory */
417 ret_code = iavf_alloc_adminq_arq_ring(hw);
418 if (ret_code != IAVF_SUCCESS)
419 goto init_adminq_exit;
421 /* allocate buffers in the rings */
422 ret_code = iavf_alloc_arq_bufs(hw);
423 if (ret_code != IAVF_SUCCESS)
424 goto init_adminq_free_rings;
426 /* initialize base registers */
427 ret_code = iavf_config_arq_regs(hw);
428 if (ret_code != IAVF_SUCCESS)
429 goto init_adminq_free_rings;
432 hw->aq.arq.count = hw->aq.num_arq_entries;
433 goto init_adminq_exit;
435 init_adminq_free_rings:
436 iavf_free_adminq_arq(hw);
443 * iavf_shutdown_asq - shutdown the ASQ
444 * @hw: pointer to the hardware structure
446 * The main shutdown routine for the Admin Send Queue
448 enum iavf_status_code iavf_shutdown_asq(struct iavf_hw *hw)
450 enum iavf_status_code ret_code = IAVF_SUCCESS;
452 iavf_acquire_spinlock(&hw->aq.asq_spinlock);
454 if (hw->aq.asq.count == 0) {
455 ret_code = IAVF_ERR_NOT_READY;
456 goto shutdown_asq_out;
459 /* Stop firmware AdminQ processing */
460 wr32(hw, hw->aq.asq.head, 0);
461 wr32(hw, hw->aq.asq.tail, 0);
462 wr32(hw, hw->aq.asq.len, 0);
463 wr32(hw, hw->aq.asq.bal, 0);
464 wr32(hw, hw->aq.asq.bah, 0);
466 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
468 /* free ring buffers */
469 iavf_free_asq_bufs(hw);
472 iavf_release_spinlock(&hw->aq.asq_spinlock);
477 * iavf_shutdown_arq - shutdown ARQ
478 * @hw: pointer to the hardware structure
480 * The main shutdown routine for the Admin Receive Queue
482 enum iavf_status_code iavf_shutdown_arq(struct iavf_hw *hw)
484 enum iavf_status_code ret_code = IAVF_SUCCESS;
486 iavf_acquire_spinlock(&hw->aq.arq_spinlock);
488 if (hw->aq.arq.count == 0) {
489 ret_code = IAVF_ERR_NOT_READY;
490 goto shutdown_arq_out;
493 /* Stop firmware AdminQ processing */
494 wr32(hw, hw->aq.arq.head, 0);
495 wr32(hw, hw->aq.arq.tail, 0);
496 wr32(hw, hw->aq.arq.len, 0);
497 wr32(hw, hw->aq.arq.bal, 0);
498 wr32(hw, hw->aq.arq.bah, 0);
500 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
502 /* free ring buffers */
503 iavf_free_arq_bufs(hw);
506 iavf_release_spinlock(&hw->aq.arq_spinlock);
511 * iavf_init_adminq - main initialization routine for Admin Queue
512 * @hw: pointer to the hardware structure
514 * Prior to calling this function, drivers *MUST* set the following fields
515 * in the hw->aq structure:
516 * - hw->aq.num_asq_entries
517 * - hw->aq.num_arq_entries
518 * - hw->aq.arq_buf_size
519 * - hw->aq.asq_buf_size
521 enum iavf_status_code iavf_init_adminq(struct iavf_hw *hw)
523 enum iavf_status_code ret_code;
525 /* verify input for valid configuration */
526 if ((hw->aq.num_arq_entries == 0) ||
527 (hw->aq.num_asq_entries == 0) ||
528 (hw->aq.arq_buf_size == 0) ||
529 (hw->aq.asq_buf_size == 0)) {
530 ret_code = IAVF_ERR_CONFIG;
531 goto init_adminq_exit;
533 iavf_init_spinlock(&hw->aq.asq_spinlock);
534 iavf_init_spinlock(&hw->aq.arq_spinlock);
536 /* Set up register offsets */
537 iavf_adminq_init_regs(hw);
539 /* setup ASQ command write back timeout */
540 hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
542 /* allocate the ASQ */
543 ret_code = iavf_init_asq(hw);
544 if (ret_code != IAVF_SUCCESS)
545 goto init_adminq_destroy_spinlocks;
547 /* allocate the ARQ */
548 ret_code = iavf_init_arq(hw);
549 if (ret_code != IAVF_SUCCESS)
550 goto init_adminq_free_asq;
552 ret_code = IAVF_SUCCESS;
555 goto init_adminq_exit;
557 init_adminq_free_asq:
558 iavf_shutdown_asq(hw);
559 init_adminq_destroy_spinlocks:
560 iavf_destroy_spinlock(&hw->aq.asq_spinlock);
561 iavf_destroy_spinlock(&hw->aq.arq_spinlock);
568 * iavf_shutdown_adminq - shutdown routine for the Admin Queue
569 * @hw: pointer to the hardware structure
571 enum iavf_status_code iavf_shutdown_adminq(struct iavf_hw *hw)
573 enum iavf_status_code ret_code = IAVF_SUCCESS;
575 if (iavf_check_asq_alive(hw))
576 iavf_aq_queue_shutdown(hw, true);
578 iavf_shutdown_asq(hw);
579 iavf_shutdown_arq(hw);
580 iavf_destroy_spinlock(&hw->aq.asq_spinlock);
581 iavf_destroy_spinlock(&hw->aq.arq_spinlock);
584 iavf_free_virt_mem(hw, &hw->nvm_buff);
590 * iavf_clean_asq - cleans Admin send queue
591 * @hw: pointer to the hardware structure
593 * returns the number of free desc
595 u16 iavf_clean_asq(struct iavf_hw *hw)
597 struct iavf_adminq_ring *asq = &(hw->aq.asq);
598 struct iavf_asq_cmd_details *details;
599 u16 ntc = asq->next_to_clean;
600 struct iavf_aq_desc desc_cb;
601 struct iavf_aq_desc *desc;
603 desc = IAVF_ADMINQ_DESC(*asq, ntc);
604 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
605 while (rd32(hw, hw->aq.asq.head) != ntc) {
606 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
607 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
609 if (details->callback) {
610 IAVF_ADMINQ_CALLBACK cb_func =
611 (IAVF_ADMINQ_CALLBACK)details->callback;
612 iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
614 cb_func(hw, &desc_cb);
616 iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
617 iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
619 if (ntc == asq->count)
621 desc = IAVF_ADMINQ_DESC(*asq, ntc);
622 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
625 asq->next_to_clean = ntc;
627 return IAVF_DESC_UNUSED(asq);
631 * iavf_asq_done - check if FW has processed the Admin Send Queue
632 * @hw: pointer to the hw struct
634 * Returns true if the firmware has processed all descriptors on the
635 * admin send queue. Returns false if there are still requests pending.
637 bool iavf_asq_done(struct iavf_hw *hw)
639 /* AQ designers suggest use of head for better
640 * timing reliability than DD bit
642 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
647 * iavf_asq_send_command - send command to Admin Queue
648 * @hw: pointer to the hw struct
649 * @desc: prefilled descriptor describing the command (non DMA mem)
650 * @buff: buffer to use for indirect commands
651 * @buff_size: size of buffer for indirect commands
652 * @cmd_details: pointer to command details structure
654 * This is the main send command driver routine for the Admin Queue send
655 * queue. It runs the queue, cleans the queue, etc
657 enum iavf_status_code iavf_asq_send_command(struct iavf_hw *hw,
658 struct iavf_aq_desc *desc,
659 void *buff, /* can be NULL */
661 struct iavf_asq_cmd_details *cmd_details)
663 enum iavf_status_code status = IAVF_SUCCESS;
664 struct iavf_dma_mem *dma_buff = NULL;
665 struct iavf_asq_cmd_details *details;
666 struct iavf_aq_desc *desc_on_ring;
667 bool cmd_completed = false;
671 iavf_acquire_spinlock(&hw->aq.asq_spinlock);
673 hw->aq.asq_last_status = IAVF_AQ_RC_OK;
675 if (hw->aq.asq.count == 0) {
676 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
677 "AQTX: Admin queue not initialized.\n");
678 status = IAVF_ERR_QUEUE_EMPTY;
679 goto asq_send_command_error;
682 val = rd32(hw, hw->aq.asq.head);
683 if (val >= hw->aq.num_asq_entries) {
684 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
685 "AQTX: head overrun at %d\n", val);
686 status = IAVF_ERR_QUEUE_EMPTY;
687 goto asq_send_command_error;
690 details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
694 sizeof(struct iavf_asq_cmd_details),
695 IAVF_NONDMA_TO_NONDMA);
697 /* If the cmd_details are defined copy the cookie. The
698 * CPU_TO_LE32 is not needed here because the data is ignored
699 * by the FW, only used by the driver
701 if (details->cookie) {
703 CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
705 CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
708 iavf_memset(details, 0,
709 sizeof(struct iavf_asq_cmd_details),
713 /* clear requested flags and then set additional flags if defined */
714 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
715 desc->flags |= CPU_TO_LE16(details->flags_ena);
717 if (buff_size > hw->aq.asq_buf_size) {
719 IAVF_DEBUG_AQ_MESSAGE,
720 "AQTX: Invalid buffer size: %d.\n",
722 status = IAVF_ERR_INVALID_SIZE;
723 goto asq_send_command_error;
726 if (details->postpone && !details->async) {
728 IAVF_DEBUG_AQ_MESSAGE,
729 "AQTX: Async flag not set along with postpone flag");
730 status = IAVF_ERR_PARAM;
731 goto asq_send_command_error;
734 /* call clean and check queue available function to reclaim the
735 * descriptors that were processed by FW, the function returns the
736 * number of desc available
738 /* the clean function called here could be called in a separate thread
739 * in case of asynchronous completions
741 if (iavf_clean_asq(hw) == 0) {
743 IAVF_DEBUG_AQ_MESSAGE,
744 "AQTX: Error queue is full.\n");
745 status = IAVF_ERR_ADMIN_QUEUE_FULL;
746 goto asq_send_command_error;
749 /* initialize the temp desc pointer with the right desc */
750 desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
752 /* if the desc is available copy the temp desc to the right place */
753 iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
756 /* if buff is not NULL assume indirect command */
758 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
759 /* copy the user buff into the respective DMA buff */
760 iavf_memcpy(dma_buff->va, buff, buff_size,
762 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
764 /* Update the address values in the desc with the pa value
765 * for respective buffer
767 desc_on_ring->params.external.addr_high =
768 CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
769 desc_on_ring->params.external.addr_low =
770 CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
774 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
775 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
777 (hw->aq.asq.next_to_use)++;
778 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
779 hw->aq.asq.next_to_use = 0;
780 if (!details->postpone)
781 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
783 /* if cmd_details are not defined or async flag is not set,
784 * we need to wait for desc write back
786 if (!details->async && !details->postpone) {
790 /* AQ designers suggest use of head for better
791 * timing reliability than DD bit
793 if (iavf_asq_done(hw))
797 } while (total_delay < hw->aq.asq_cmd_timeout);
800 /* if ready, copy the desc back to temp */
801 if (iavf_asq_done(hw)) {
802 iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
805 iavf_memcpy(buff, dma_buff->va, buff_size,
807 retval = LE16_TO_CPU(desc->retval);
810 IAVF_DEBUG_AQ_MESSAGE,
811 "AQTX: Command completed with error 0x%X.\n",
814 /* strip off FW internal code */
817 cmd_completed = true;
818 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
819 status = IAVF_SUCCESS;
821 status = IAVF_ERR_ADMIN_QUEUE_ERROR;
822 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
825 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
826 "AQTX: desc and buffer writeback:\n");
827 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
829 /* save writeback aq if requested */
830 if (details->wb_desc)
831 iavf_memcpy(details->wb_desc, desc_on_ring,
832 sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
834 /* update the error if time out occurred */
835 if ((!cmd_completed) &&
836 (!details->async && !details->postpone)) {
837 if (rd32(hw, hw->aq.asq.len) & IAVF_ATQLEN1_ATQCRIT_MASK) {
838 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
839 "AQTX: AQ Critical error.\n");
840 status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
842 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
843 "AQTX: Writeback timeout.\n");
844 status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
848 asq_send_command_error:
849 iavf_release_spinlock(&hw->aq.asq_spinlock);
854 * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
855 * @desc: pointer to the temp descriptor (non DMA mem)
856 * @opcode: the opcode can be used to decide which flags to turn off or on
858 * Fill the desc with default values
860 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
863 /* zero out the desc */
864 iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
866 desc->opcode = CPU_TO_LE16(opcode);
867 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
871 * iavf_clean_arq_element
872 * @hw: pointer to the hw struct
873 * @e: event info from the receive descriptor, includes any buffers
874 * @pending: number of events that could be left to process
876 * This function cleans one Admin Receive Queue element and returns
877 * the contents through e. It can also return how many events are
878 * left to process through 'pending'
880 enum iavf_status_code iavf_clean_arq_element(struct iavf_hw *hw,
881 struct iavf_arq_event_info *e,
884 enum iavf_status_code ret_code = IAVF_SUCCESS;
885 u16 ntc = hw->aq.arq.next_to_clean;
886 struct iavf_aq_desc *desc;
887 struct iavf_dma_mem *bi;
893 /* pre-clean the event info */
894 iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
896 /* take the lock before we start messing with the ring */
897 iavf_acquire_spinlock(&hw->aq.arq_spinlock);
899 if (hw->aq.arq.count == 0) {
900 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
901 "AQRX: Admin queue not initialized.\n");
902 ret_code = IAVF_ERR_QUEUE_EMPTY;
903 goto clean_arq_element_err;
906 /* set next_to_use to head */
909 ntu = rd32(hw, hw->aq.arq.head) & IAVF_PF_ARQH_ARQH_MASK;
911 ntu = rd32(hw, hw->aq.arq.head) & IAVF_ARQH1_ARQH_MASK;
913 ntu = rd32(hw, hw->aq.arq.head) & IAVF_ARQH1_ARQH_MASK;
914 #endif /* INTEGRATED_VF */
916 /* nothing to do - shouldn't need to update ring's values */
917 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
918 goto clean_arq_element_out;
921 /* now clean the next descriptor */
922 desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
925 hw->aq.arq_last_status =
926 (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
927 flags = LE16_TO_CPU(desc->flags);
928 if (flags & IAVF_AQ_FLAG_ERR) {
929 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
931 IAVF_DEBUG_AQ_MESSAGE,
932 "AQRX: Event received with error 0x%X.\n",
933 hw->aq.arq_last_status);
936 iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
938 datalen = LE16_TO_CPU(desc->datalen);
939 e->msg_len = min(datalen, e->buf_len);
940 if (e->msg_buf != NULL && (e->msg_len != 0))
941 iavf_memcpy(e->msg_buf,
942 hw->aq.arq.r.arq_bi[desc_idx].va,
943 e->msg_len, IAVF_DMA_TO_NONDMA);
945 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
946 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
947 hw->aq.arq_buf_size);
949 /* Restore the original datalen and buffer address in the desc,
950 * FW updates datalen to indicate the event message
953 bi = &hw->aq.arq.r.arq_bi[ntc];
954 iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
956 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
957 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
958 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
959 desc->datalen = CPU_TO_LE16((u16)bi->size);
960 desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
961 desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
963 /* set tail = the last cleaned desc index. */
964 wr32(hw, hw->aq.arq.tail, ntc);
965 /* ntc is updated to tail + 1 */
967 if (ntc == hw->aq.num_arq_entries)
969 hw->aq.arq.next_to_clean = ntc;
970 hw->aq.arq.next_to_use = ntu;
972 clean_arq_element_out:
973 /* Set pending if needed, unlock and return */
975 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
976 clean_arq_element_err:
977 iavf_release_spinlock(&hw->aq.arq_spinlock);