1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "avf_status.h"
36 #include "avf_register.h"
37 #include "avf_adminq.h"
38 #include "avf_prototype.h"
41 * avf_adminq_init_regs - Initialize AdminQ registers
42 * @hw: pointer to the hardware structure
44 * This assumes the alloc_asq and alloc_arq functions have already been called
46 STATIC void avf_adminq_init_regs(struct avf_hw *hw)
48 /* set head and tail registers in our local struct */
50 hw->aq.asq.tail = AVF_ATQT1;
51 hw->aq.asq.head = AVF_ATQH1;
52 hw->aq.asq.len = AVF_ATQLEN1;
53 hw->aq.asq.bal = AVF_ATQBAL1;
54 hw->aq.asq.bah = AVF_ATQBAH1;
55 hw->aq.arq.tail = AVF_ARQT1;
56 hw->aq.arq.head = AVF_ARQH1;
57 hw->aq.arq.len = AVF_ARQLEN1;
58 hw->aq.arq.bal = AVF_ARQBAL1;
59 hw->aq.arq.bah = AVF_ARQBAH1;
64 * avf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
65 * @hw: pointer to the hardware structure
67 enum avf_status_code avf_alloc_adminq_asq_ring(struct avf_hw *hw)
69 enum avf_status_code ret_code;
71 ret_code = avf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
73 (hw->aq.num_asq_entries *
74 sizeof(struct avf_aq_desc)),
75 AVF_ADMINQ_DESC_ALIGNMENT);
79 ret_code = avf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
80 (hw->aq.num_asq_entries *
81 sizeof(struct avf_asq_cmd_details)));
83 avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
91 * avf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
92 * @hw: pointer to the hardware structure
94 enum avf_status_code avf_alloc_adminq_arq_ring(struct avf_hw *hw)
96 enum avf_status_code ret_code;
98 ret_code = avf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
100 (hw->aq.num_arq_entries *
101 sizeof(struct avf_aq_desc)),
102 AVF_ADMINQ_DESC_ALIGNMENT);
108 * avf_free_adminq_asq - Free Admin Queue send rings
109 * @hw: pointer to the hardware structure
111 * This assumes the posted send buffers have already been cleaned
114 void avf_free_adminq_asq(struct avf_hw *hw)
116 avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
120 * avf_free_adminq_arq - Free Admin Queue receive rings
121 * @hw: pointer to the hardware structure
123 * This assumes the posted receive buffers have already been cleaned
126 void avf_free_adminq_arq(struct avf_hw *hw)
128 avf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
132 * avf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
133 * @hw: pointer to the hardware structure
135 STATIC enum avf_status_code avf_alloc_arq_bufs(struct avf_hw *hw)
137 enum avf_status_code ret_code;
138 struct avf_aq_desc *desc;
139 struct avf_dma_mem *bi;
142 /* We'll be allocating the buffer info memory first, then we can
143 * allocate the mapped buffers for the event processing
146 /* buffer_info structures do not need alignment */
147 ret_code = avf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
148 (hw->aq.num_arq_entries * sizeof(struct avf_dma_mem)));
151 hw->aq.arq.r.arq_bi = (struct avf_dma_mem *)hw->aq.arq.dma_head.va;
153 /* allocate the mapped buffers */
154 for (i = 0; i < hw->aq.num_arq_entries; i++) {
155 bi = &hw->aq.arq.r.arq_bi[i];
156 ret_code = avf_allocate_dma_mem(hw, bi,
159 AVF_ADMINQ_DESC_ALIGNMENT);
161 goto unwind_alloc_arq_bufs;
163 /* now configure the descriptors for use */
164 desc = AVF_ADMINQ_DESC(hw->aq.arq, i);
166 desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_BUF);
167 if (hw->aq.arq_buf_size > AVF_AQ_LARGE_BUF)
168 desc->flags |= CPU_TO_LE16(AVF_AQ_FLAG_LB);
170 /* This is in accordance with Admin queue design, there is no
171 * register for buffer size configuration
173 desc->datalen = CPU_TO_LE16((u16)bi->size);
175 desc->cookie_high = 0;
176 desc->cookie_low = 0;
177 desc->params.external.addr_high =
178 CPU_TO_LE32(AVF_HI_DWORD(bi->pa));
179 desc->params.external.addr_low =
180 CPU_TO_LE32(AVF_LO_DWORD(bi->pa));
181 desc->params.external.param0 = 0;
182 desc->params.external.param1 = 0;
188 unwind_alloc_arq_bufs:
189 /* don't try to free the one that failed... */
192 avf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
193 avf_free_virt_mem(hw, &hw->aq.arq.dma_head);
199 * avf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
200 * @hw: pointer to the hardware structure
202 STATIC enum avf_status_code avf_alloc_asq_bufs(struct avf_hw *hw)
204 enum avf_status_code ret_code;
205 struct avf_dma_mem *bi;
208 /* No mapped memory needed yet, just the buffer info structures */
209 ret_code = avf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
210 (hw->aq.num_asq_entries * sizeof(struct avf_dma_mem)));
213 hw->aq.asq.r.asq_bi = (struct avf_dma_mem *)hw->aq.asq.dma_head.va;
215 /* allocate the mapped buffers */
216 for (i = 0; i < hw->aq.num_asq_entries; i++) {
217 bi = &hw->aq.asq.r.asq_bi[i];
218 ret_code = avf_allocate_dma_mem(hw, bi,
221 AVF_ADMINQ_DESC_ALIGNMENT);
223 goto unwind_alloc_asq_bufs;
228 unwind_alloc_asq_bufs:
229 /* don't try to free the one that failed... */
232 avf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
233 avf_free_virt_mem(hw, &hw->aq.asq.dma_head);
239 * avf_free_arq_bufs - Free receive queue buffer info elements
240 * @hw: pointer to the hardware structure
242 STATIC void avf_free_arq_bufs(struct avf_hw *hw)
246 /* free descriptors */
247 for (i = 0; i < hw->aq.num_arq_entries; i++)
248 avf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
250 /* free the descriptor memory */
251 avf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
253 /* free the dma header */
254 avf_free_virt_mem(hw, &hw->aq.arq.dma_head);
258 * avf_free_asq_bufs - Free send queue buffer info elements
259 * @hw: pointer to the hardware structure
261 STATIC void avf_free_asq_bufs(struct avf_hw *hw)
265 /* only unmap if the address is non-NULL */
266 for (i = 0; i < hw->aq.num_asq_entries; i++)
267 if (hw->aq.asq.r.asq_bi[i].pa)
268 avf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
270 /* free the buffer info list */
271 avf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
273 /* free the descriptor memory */
274 avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
276 /* free the dma header */
277 avf_free_virt_mem(hw, &hw->aq.asq.dma_head);
281 * avf_config_asq_regs - configure ASQ registers
282 * @hw: pointer to the hardware structure
284 * Configure base address and length registers for the transmit queue
286 STATIC enum avf_status_code avf_config_asq_regs(struct avf_hw *hw)
288 enum avf_status_code ret_code = AVF_SUCCESS;
291 /* Clear Head and Tail */
292 wr32(hw, hw->aq.asq.head, 0);
293 wr32(hw, hw->aq.asq.tail, 0);
295 /* set starting point */
298 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
299 AVF_ATQLEN1_ATQENABLE_MASK));
301 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
302 AVF_ATQLEN1_ATQENABLE_MASK));
303 #endif /* INTEGRATED_VF */
304 wr32(hw, hw->aq.asq.bal, AVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
305 wr32(hw, hw->aq.asq.bah, AVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
307 /* Check one register to verify that config was applied */
308 reg = rd32(hw, hw->aq.asq.bal);
309 if (reg != AVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
310 ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
316 * avf_config_arq_regs - ARQ register configuration
317 * @hw: pointer to the hardware structure
319 * Configure base address and length registers for the receive (event queue)
321 STATIC enum avf_status_code avf_config_arq_regs(struct avf_hw *hw)
323 enum avf_status_code ret_code = AVF_SUCCESS;
326 /* Clear Head and Tail */
327 wr32(hw, hw->aq.arq.head, 0);
328 wr32(hw, hw->aq.arq.tail, 0);
330 /* set starting point */
333 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
334 AVF_ARQLEN1_ARQENABLE_MASK));
336 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
337 AVF_ARQLEN1_ARQENABLE_MASK));
338 #endif /* INTEGRATED_VF */
339 wr32(hw, hw->aq.arq.bal, AVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
340 wr32(hw, hw->aq.arq.bah, AVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
342 /* Update tail in the HW to post pre-allocated buffers */
343 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
345 /* Check one register to verify that config was applied */
346 reg = rd32(hw, hw->aq.arq.bal);
347 if (reg != AVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
348 ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
354 * avf_init_asq - main initialization routine for ASQ
355 * @hw: pointer to the hardware structure
357 * This is the main initialization routine for the Admin Send Queue
358 * Prior to calling this function, drivers *MUST* set the following fields
359 * in the hw->aq structure:
360 * - hw->aq.num_asq_entries
361 * - hw->aq.arq_buf_size
363 * Do *NOT* hold the lock when calling this as the memory allocation routines
364 * called are not going to be atomic context safe
366 enum avf_status_code avf_init_asq(struct avf_hw *hw)
368 enum avf_status_code ret_code = AVF_SUCCESS;
370 if (hw->aq.asq.count > 0) {
371 /* queue already initialized */
372 ret_code = AVF_ERR_NOT_READY;
373 goto init_adminq_exit;
376 /* verify input for valid configuration */
377 if ((hw->aq.num_asq_entries == 0) ||
378 (hw->aq.asq_buf_size == 0)) {
379 ret_code = AVF_ERR_CONFIG;
380 goto init_adminq_exit;
383 hw->aq.asq.next_to_use = 0;
384 hw->aq.asq.next_to_clean = 0;
386 /* allocate the ring memory */
387 ret_code = avf_alloc_adminq_asq_ring(hw);
388 if (ret_code != AVF_SUCCESS)
389 goto init_adminq_exit;
391 /* allocate buffers in the rings */
392 ret_code = avf_alloc_asq_bufs(hw);
393 if (ret_code != AVF_SUCCESS)
394 goto init_adminq_free_rings;
396 /* initialize base registers */
397 ret_code = avf_config_asq_regs(hw);
398 if (ret_code != AVF_SUCCESS)
399 goto init_adminq_free_rings;
402 hw->aq.asq.count = hw->aq.num_asq_entries;
403 goto init_adminq_exit;
405 init_adminq_free_rings:
406 avf_free_adminq_asq(hw);
413 * avf_init_arq - initialize ARQ
414 * @hw: pointer to the hardware structure
416 * The main initialization routine for the Admin Receive (Event) Queue.
417 * Prior to calling this function, drivers *MUST* set the following fields
418 * in the hw->aq structure:
419 * - hw->aq.num_asq_entries
420 * - hw->aq.arq_buf_size
422 * Do *NOT* hold the lock when calling this as the memory allocation routines
423 * called are not going to be atomic context safe
425 enum avf_status_code avf_init_arq(struct avf_hw *hw)
427 enum avf_status_code ret_code = AVF_SUCCESS;
429 if (hw->aq.arq.count > 0) {
430 /* queue already initialized */
431 ret_code = AVF_ERR_NOT_READY;
432 goto init_adminq_exit;
435 /* verify input for valid configuration */
436 if ((hw->aq.num_arq_entries == 0) ||
437 (hw->aq.arq_buf_size == 0)) {
438 ret_code = AVF_ERR_CONFIG;
439 goto init_adminq_exit;
442 hw->aq.arq.next_to_use = 0;
443 hw->aq.arq.next_to_clean = 0;
445 /* allocate the ring memory */
446 ret_code = avf_alloc_adminq_arq_ring(hw);
447 if (ret_code != AVF_SUCCESS)
448 goto init_adminq_exit;
450 /* allocate buffers in the rings */
451 ret_code = avf_alloc_arq_bufs(hw);
452 if (ret_code != AVF_SUCCESS)
453 goto init_adminq_free_rings;
455 /* initialize base registers */
456 ret_code = avf_config_arq_regs(hw);
457 if (ret_code != AVF_SUCCESS)
458 goto init_adminq_free_rings;
461 hw->aq.arq.count = hw->aq.num_arq_entries;
462 goto init_adminq_exit;
464 init_adminq_free_rings:
465 avf_free_adminq_arq(hw);
472 * avf_shutdown_asq - shutdown the ASQ
473 * @hw: pointer to the hardware structure
475 * The main shutdown routine for the Admin Send Queue
477 enum avf_status_code avf_shutdown_asq(struct avf_hw *hw)
479 enum avf_status_code ret_code = AVF_SUCCESS;
481 avf_acquire_spinlock(&hw->aq.asq_spinlock);
483 if (hw->aq.asq.count == 0) {
484 ret_code = AVF_ERR_NOT_READY;
485 goto shutdown_asq_out;
488 /* Stop firmware AdminQ processing */
489 wr32(hw, hw->aq.asq.head, 0);
490 wr32(hw, hw->aq.asq.tail, 0);
491 wr32(hw, hw->aq.asq.len, 0);
492 wr32(hw, hw->aq.asq.bal, 0);
493 wr32(hw, hw->aq.asq.bah, 0);
495 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
497 /* free ring buffers */
498 avf_free_asq_bufs(hw);
501 avf_release_spinlock(&hw->aq.asq_spinlock);
506 * avf_shutdown_arq - shutdown ARQ
507 * @hw: pointer to the hardware structure
509 * The main shutdown routine for the Admin Receive Queue
511 enum avf_status_code avf_shutdown_arq(struct avf_hw *hw)
513 enum avf_status_code ret_code = AVF_SUCCESS;
515 avf_acquire_spinlock(&hw->aq.arq_spinlock);
517 if (hw->aq.arq.count == 0) {
518 ret_code = AVF_ERR_NOT_READY;
519 goto shutdown_arq_out;
522 /* Stop firmware AdminQ processing */
523 wr32(hw, hw->aq.arq.head, 0);
524 wr32(hw, hw->aq.arq.tail, 0);
525 wr32(hw, hw->aq.arq.len, 0);
526 wr32(hw, hw->aq.arq.bal, 0);
527 wr32(hw, hw->aq.arq.bah, 0);
529 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
531 /* free ring buffers */
532 avf_free_arq_bufs(hw);
535 avf_release_spinlock(&hw->aq.arq_spinlock);
540 * avf_init_adminq - main initialization routine for Admin Queue
541 * @hw: pointer to the hardware structure
543 * Prior to calling this function, drivers *MUST* set the following fields
544 * in the hw->aq structure:
545 * - hw->aq.num_asq_entries
546 * - hw->aq.num_arq_entries
547 * - hw->aq.arq_buf_size
548 * - hw->aq.asq_buf_size
550 enum avf_status_code avf_init_adminq(struct avf_hw *hw)
552 enum avf_status_code ret_code;
554 /* verify input for valid configuration */
555 if ((hw->aq.num_arq_entries == 0) ||
556 (hw->aq.num_asq_entries == 0) ||
557 (hw->aq.arq_buf_size == 0) ||
558 (hw->aq.asq_buf_size == 0)) {
559 ret_code = AVF_ERR_CONFIG;
560 goto init_adminq_exit;
562 avf_init_spinlock(&hw->aq.asq_spinlock);
563 avf_init_spinlock(&hw->aq.arq_spinlock);
565 /* Set up register offsets */
566 avf_adminq_init_regs(hw);
568 /* setup ASQ command write back timeout */
569 hw->aq.asq_cmd_timeout = AVF_ASQ_CMD_TIMEOUT;
571 /* allocate the ASQ */
572 ret_code = avf_init_asq(hw);
573 if (ret_code != AVF_SUCCESS)
574 goto init_adminq_destroy_spinlocks;
576 /* allocate the ARQ */
577 ret_code = avf_init_arq(hw);
578 if (ret_code != AVF_SUCCESS)
579 goto init_adminq_free_asq;
581 ret_code = AVF_SUCCESS;
584 goto init_adminq_exit;
586 init_adminq_free_asq:
587 avf_shutdown_asq(hw);
588 init_adminq_destroy_spinlocks:
589 avf_destroy_spinlock(&hw->aq.asq_spinlock);
590 avf_destroy_spinlock(&hw->aq.arq_spinlock);
597 * avf_shutdown_adminq - shutdown routine for the Admin Queue
598 * @hw: pointer to the hardware structure
600 enum avf_status_code avf_shutdown_adminq(struct avf_hw *hw)
602 enum avf_status_code ret_code = AVF_SUCCESS;
604 if (avf_check_asq_alive(hw))
605 avf_aq_queue_shutdown(hw, true);
607 avf_shutdown_asq(hw);
608 avf_shutdown_arq(hw);
609 avf_destroy_spinlock(&hw->aq.asq_spinlock);
610 avf_destroy_spinlock(&hw->aq.arq_spinlock);
613 avf_free_virt_mem(hw, &hw->nvm_buff);
619 * avf_clean_asq - cleans Admin send queue
620 * @hw: pointer to the hardware structure
622 * returns the number of free desc
624 u16 avf_clean_asq(struct avf_hw *hw)
626 struct avf_adminq_ring *asq = &(hw->aq.asq);
627 struct avf_asq_cmd_details *details;
628 u16 ntc = asq->next_to_clean;
629 struct avf_aq_desc desc_cb;
630 struct avf_aq_desc *desc;
632 desc = AVF_ADMINQ_DESC(*asq, ntc);
633 details = AVF_ADMINQ_DETAILS(*asq, ntc);
634 while (rd32(hw, hw->aq.asq.head) != ntc) {
635 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
636 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
638 if (details->callback) {
639 AVF_ADMINQ_CALLBACK cb_func =
640 (AVF_ADMINQ_CALLBACK)details->callback;
641 avf_memcpy(&desc_cb, desc, sizeof(struct avf_aq_desc),
643 cb_func(hw, &desc_cb);
645 avf_memset(desc, 0, sizeof(*desc), AVF_DMA_MEM);
646 avf_memset(details, 0, sizeof(*details), AVF_NONDMA_MEM);
648 if (ntc == asq->count)
650 desc = AVF_ADMINQ_DESC(*asq, ntc);
651 details = AVF_ADMINQ_DETAILS(*asq, ntc);
654 asq->next_to_clean = ntc;
656 return AVF_DESC_UNUSED(asq);
660 * avf_asq_done - check if FW has processed the Admin Send Queue
661 * @hw: pointer to the hw struct
663 * Returns true if the firmware has processed all descriptors on the
664 * admin send queue. Returns false if there are still requests pending.
666 bool avf_asq_done(struct avf_hw *hw)
668 /* AQ designers suggest use of head for better
669 * timing reliability than DD bit
671 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
676 * avf_asq_send_command - send command to Admin Queue
677 * @hw: pointer to the hw struct
678 * @desc: prefilled descriptor describing the command (non DMA mem)
679 * @buff: buffer to use for indirect commands
680 * @buff_size: size of buffer for indirect commands
681 * @cmd_details: pointer to command details structure
683 * This is the main send command driver routine for the Admin Queue send
684 * queue. It runs the queue, cleans the queue, etc
686 enum avf_status_code avf_asq_send_command(struct avf_hw *hw,
687 struct avf_aq_desc *desc,
688 void *buff, /* can be NULL */
690 struct avf_asq_cmd_details *cmd_details)
692 enum avf_status_code status = AVF_SUCCESS;
693 struct avf_dma_mem *dma_buff = NULL;
694 struct avf_asq_cmd_details *details;
695 struct avf_aq_desc *desc_on_ring;
696 bool cmd_completed = false;
700 avf_acquire_spinlock(&hw->aq.asq_spinlock);
702 hw->aq.asq_last_status = AVF_AQ_RC_OK;
704 if (hw->aq.asq.count == 0) {
705 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
706 "AQTX: Admin queue not initialized.\n");
707 status = AVF_ERR_QUEUE_EMPTY;
708 goto asq_send_command_error;
711 val = rd32(hw, hw->aq.asq.head);
712 if (val >= hw->aq.num_asq_entries) {
713 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
714 "AQTX: head overrun at %d\n", val);
715 status = AVF_ERR_QUEUE_EMPTY;
716 goto asq_send_command_error;
719 details = AVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
723 sizeof(struct avf_asq_cmd_details),
724 AVF_NONDMA_TO_NONDMA);
726 /* If the cmd_details are defined copy the cookie. The
727 * CPU_TO_LE32 is not needed here because the data is ignored
728 * by the FW, only used by the driver
730 if (details->cookie) {
732 CPU_TO_LE32(AVF_HI_DWORD(details->cookie));
734 CPU_TO_LE32(AVF_LO_DWORD(details->cookie));
737 avf_memset(details, 0,
738 sizeof(struct avf_asq_cmd_details),
742 /* clear requested flags and then set additional flags if defined */
743 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
744 desc->flags |= CPU_TO_LE16(details->flags_ena);
746 if (buff_size > hw->aq.asq_buf_size) {
748 AVF_DEBUG_AQ_MESSAGE,
749 "AQTX: Invalid buffer size: %d.\n",
751 status = AVF_ERR_INVALID_SIZE;
752 goto asq_send_command_error;
755 if (details->postpone && !details->async) {
757 AVF_DEBUG_AQ_MESSAGE,
758 "AQTX: Async flag not set along with postpone flag");
759 status = AVF_ERR_PARAM;
760 goto asq_send_command_error;
763 /* call clean and check queue available function to reclaim the
764 * descriptors that were processed by FW, the function returns the
765 * number of desc available
767 /* the clean function called here could be called in a separate thread
768 * in case of asynchronous completions
770 if (avf_clean_asq(hw) == 0) {
772 AVF_DEBUG_AQ_MESSAGE,
773 "AQTX: Error queue is full.\n");
774 status = AVF_ERR_ADMIN_QUEUE_FULL;
775 goto asq_send_command_error;
778 /* initialize the temp desc pointer with the right desc */
779 desc_on_ring = AVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
781 /* if the desc is available copy the temp desc to the right place */
782 avf_memcpy(desc_on_ring, desc, sizeof(struct avf_aq_desc),
785 /* if buff is not NULL assume indirect command */
787 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
788 /* copy the user buff into the respective DMA buff */
789 avf_memcpy(dma_buff->va, buff, buff_size,
791 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
793 /* Update the address values in the desc with the pa value
794 * for respective buffer
796 desc_on_ring->params.external.addr_high =
797 CPU_TO_LE32(AVF_HI_DWORD(dma_buff->pa));
798 desc_on_ring->params.external.addr_low =
799 CPU_TO_LE32(AVF_LO_DWORD(dma_buff->pa));
803 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
804 avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
806 (hw->aq.asq.next_to_use)++;
807 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
808 hw->aq.asq.next_to_use = 0;
809 if (!details->postpone)
810 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
812 /* if cmd_details are not defined or async flag is not set,
813 * we need to wait for desc write back
815 if (!details->async && !details->postpone) {
819 /* AQ designers suggest use of head for better
820 * timing reliability than DD bit
822 if (avf_asq_done(hw))
826 } while (total_delay < hw->aq.asq_cmd_timeout);
829 /* if ready, copy the desc back to temp */
830 if (avf_asq_done(hw)) {
831 avf_memcpy(desc, desc_on_ring, sizeof(struct avf_aq_desc),
834 avf_memcpy(buff, dma_buff->va, buff_size,
836 retval = LE16_TO_CPU(desc->retval);
839 AVF_DEBUG_AQ_MESSAGE,
840 "AQTX: Command completed with error 0x%X.\n",
843 /* strip off FW internal code */
846 cmd_completed = true;
847 if ((enum avf_admin_queue_err)retval == AVF_AQ_RC_OK)
848 status = AVF_SUCCESS;
850 status = AVF_ERR_ADMIN_QUEUE_ERROR;
851 hw->aq.asq_last_status = (enum avf_admin_queue_err)retval;
854 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
855 "AQTX: desc and buffer writeback:\n");
856 avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
858 /* save writeback aq if requested */
859 if (details->wb_desc)
860 avf_memcpy(details->wb_desc, desc_on_ring,
861 sizeof(struct avf_aq_desc), AVF_DMA_TO_NONDMA);
863 /* update the error if time out occurred */
864 if ((!cmd_completed) &&
865 (!details->async && !details->postpone)) {
866 if (rd32(hw, hw->aq.asq.len) & AVF_ATQLEN1_ATQCRIT_MASK) {
867 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
868 "AQTX: AQ Critical error.\n");
869 status = AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
871 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
872 "AQTX: Writeback timeout.\n");
873 status = AVF_ERR_ADMIN_QUEUE_TIMEOUT;
877 asq_send_command_error:
878 avf_release_spinlock(&hw->aq.asq_spinlock);
883 * avf_fill_default_direct_cmd_desc - AQ descriptor helper function
884 * @desc: pointer to the temp descriptor (non DMA mem)
885 * @opcode: the opcode can be used to decide which flags to turn off or on
887 * Fill the desc with default values
889 void avf_fill_default_direct_cmd_desc(struct avf_aq_desc *desc,
892 /* zero out the desc */
893 avf_memset((void *)desc, 0, sizeof(struct avf_aq_desc),
895 desc->opcode = CPU_TO_LE16(opcode);
896 desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_SI);
900 * avf_clean_arq_element
901 * @hw: pointer to the hw struct
902 * @e: event info from the receive descriptor, includes any buffers
903 * @pending: number of events that could be left to process
905 * This function cleans one Admin Receive Queue element and returns
906 * the contents through e. It can also return how many events are
907 * left to process through 'pending'
909 enum avf_status_code avf_clean_arq_element(struct avf_hw *hw,
910 struct avf_arq_event_info *e,
913 enum avf_status_code ret_code = AVF_SUCCESS;
914 u16 ntc = hw->aq.arq.next_to_clean;
915 struct avf_aq_desc *desc;
916 struct avf_dma_mem *bi;
922 /* pre-clean the event info */
923 avf_memset(&e->desc, 0, sizeof(e->desc), AVF_NONDMA_MEM);
925 /* take the lock before we start messing with the ring */
926 avf_acquire_spinlock(&hw->aq.arq_spinlock);
928 if (hw->aq.arq.count == 0) {
929 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
930 "AQRX: Admin queue not initialized.\n");
931 ret_code = AVF_ERR_QUEUE_EMPTY;
932 goto clean_arq_element_err;
935 /* set next_to_use to head */
938 ntu = rd32(hw, hw->aq.arq.head) & AVF_PF_ARQH_ARQH_MASK;
940 ntu = rd32(hw, hw->aq.arq.head) & AVF_ARQH1_ARQH_MASK;
942 ntu = rd32(hw, hw->aq.arq.head) & AVF_ARQH1_ARQH_MASK;
943 #endif /* INTEGRATED_VF */
945 /* nothing to do - shouldn't need to update ring's values */
946 ret_code = AVF_ERR_ADMIN_QUEUE_NO_WORK;
947 goto clean_arq_element_out;
950 /* now clean the next descriptor */
951 desc = AVF_ADMINQ_DESC(hw->aq.arq, ntc);
954 hw->aq.arq_last_status =
955 (enum avf_admin_queue_err)LE16_TO_CPU(desc->retval);
956 flags = LE16_TO_CPU(desc->flags);
957 if (flags & AVF_AQ_FLAG_ERR) {
958 ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
960 AVF_DEBUG_AQ_MESSAGE,
961 "AQRX: Event received with error 0x%X.\n",
962 hw->aq.arq_last_status);
965 avf_memcpy(&e->desc, desc, sizeof(struct avf_aq_desc),
967 datalen = LE16_TO_CPU(desc->datalen);
968 e->msg_len = min(datalen, e->buf_len);
969 if (e->msg_buf != NULL && (e->msg_len != 0))
970 avf_memcpy(e->msg_buf,
971 hw->aq.arq.r.arq_bi[desc_idx].va,
972 e->msg_len, AVF_DMA_TO_NONDMA);
974 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
975 avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
976 hw->aq.arq_buf_size);
978 /* Restore the original datalen and buffer address in the desc,
979 * FW updates datalen to indicate the event message
982 bi = &hw->aq.arq.r.arq_bi[ntc];
983 avf_memset((void *)desc, 0, sizeof(struct avf_aq_desc), AVF_DMA_MEM);
985 desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_BUF);
986 if (hw->aq.arq_buf_size > AVF_AQ_LARGE_BUF)
987 desc->flags |= CPU_TO_LE16(AVF_AQ_FLAG_LB);
988 desc->datalen = CPU_TO_LE16((u16)bi->size);
989 desc->params.external.addr_high = CPU_TO_LE32(AVF_HI_DWORD(bi->pa));
990 desc->params.external.addr_low = CPU_TO_LE32(AVF_LO_DWORD(bi->pa));
992 /* set tail = the last cleaned desc index. */
993 wr32(hw, hw->aq.arq.tail, ntc);
994 /* ntc is updated to tail + 1 */
996 if (ntc == hw->aq.num_arq_entries)
998 hw->aq.arq.next_to_clean = ntc;
999 hw->aq.arq.next_to_use = ntu;
1001 clean_arq_element_out:
1002 /* Set pending if needed, unlock and return */
1003 if (pending != NULL)
1004 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1005 clean_arq_element_err:
1006 avf_release_spinlock(&hw->aq.arq_spinlock);