1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013 - 2015 Intel Corporation
5 #include "iavf_status.h"
7 #include "iavf_register.h"
8 #include "iavf_adminq.h"
9 #include "iavf_prototype.h"
12 * iavf_adminq_init_regs - Initialize AdminQ registers
13 * @hw: pointer to the hardware structure
15 * This assumes the alloc_asq and alloc_arq functions have already been called
17 STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
19 /* set head and tail registers in our local struct */
20 hw->aq.asq.tail = IAVF_VF_ATQT1;
21 hw->aq.asq.head = IAVF_VF_ATQH1;
22 hw->aq.asq.len = IAVF_VF_ATQLEN1;
23 hw->aq.asq.bal = IAVF_VF_ATQBAL1;
24 hw->aq.asq.bah = IAVF_VF_ATQBAH1;
25 hw->aq.arq.tail = IAVF_VF_ARQT1;
26 hw->aq.arq.head = IAVF_VF_ARQH1;
27 hw->aq.arq.len = IAVF_VF_ARQLEN1;
28 hw->aq.arq.bal = IAVF_VF_ARQBAL1;
29 hw->aq.arq.bah = IAVF_VF_ARQBAH1;
33 * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
34 * @hw: pointer to the hardware structure
36 enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
38 enum iavf_status ret_code;
40 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
42 (hw->aq.num_asq_entries *
43 sizeof(struct iavf_aq_desc)),
44 IAVF_ADMINQ_DESC_ALIGNMENT);
48 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
49 (hw->aq.num_asq_entries *
50 sizeof(struct iavf_asq_cmd_details)));
52 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
60 * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
61 * @hw: pointer to the hardware structure
63 enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
65 enum iavf_status ret_code;
67 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
69 (hw->aq.num_arq_entries *
70 sizeof(struct iavf_aq_desc)),
71 IAVF_ADMINQ_DESC_ALIGNMENT);
77 * iavf_free_adminq_asq - Free Admin Queue send rings
78 * @hw: pointer to the hardware structure
80 * This assumes the posted send buffers have already been cleaned
83 void iavf_free_adminq_asq(struct iavf_hw *hw)
85 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
89 * iavf_free_adminq_arq - Free Admin Queue receive rings
90 * @hw: pointer to the hardware structure
92 * This assumes the posted receive buffers have already been cleaned
95 void iavf_free_adminq_arq(struct iavf_hw *hw)
97 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
101 * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
102 * @hw: pointer to the hardware structure
104 STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
106 enum iavf_status ret_code;
107 struct iavf_aq_desc *desc;
108 struct iavf_dma_mem *bi;
111 /* We'll be allocating the buffer info memory first, then we can
112 * allocate the mapped buffers for the event processing
115 /* buffer_info structures do not need alignment */
116 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
117 (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
120 hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
122 /* allocate the mapped buffers */
123 for (i = 0; i < hw->aq.num_arq_entries; i++) {
124 bi = &hw->aq.arq.r.arq_bi[i];
125 ret_code = iavf_allocate_dma_mem(hw, bi,
128 IAVF_ADMINQ_DESC_ALIGNMENT);
130 goto unwind_alloc_arq_bufs;
132 /* now configure the descriptors for use */
133 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
135 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
136 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
137 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
139 /* This is in accordance with Admin queue design, there is no
140 * register for buffer size configuration
142 desc->datalen = CPU_TO_LE16((u16)bi->size);
144 desc->cookie_high = 0;
145 desc->cookie_low = 0;
146 desc->params.external.addr_high =
147 CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
148 desc->params.external.addr_low =
149 CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
150 desc->params.external.param0 = 0;
151 desc->params.external.param1 = 0;
157 unwind_alloc_arq_bufs:
158 /* don't try to free the one that failed... */
161 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
162 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
168 * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
169 * @hw: pointer to the hardware structure
171 STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
173 enum iavf_status ret_code;
174 struct iavf_dma_mem *bi;
177 /* No mapped memory needed yet, just the buffer info structures */
178 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
179 (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
182 hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
184 /* allocate the mapped buffers */
185 for (i = 0; i < hw->aq.num_asq_entries; i++) {
186 bi = &hw->aq.asq.r.asq_bi[i];
187 ret_code = iavf_allocate_dma_mem(hw, bi,
190 IAVF_ADMINQ_DESC_ALIGNMENT);
192 goto unwind_alloc_asq_bufs;
197 unwind_alloc_asq_bufs:
198 /* don't try to free the one that failed... */
201 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
202 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
208 * iavf_free_arq_bufs - Free receive queue buffer info elements
209 * @hw: pointer to the hardware structure
211 STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
215 /* free descriptors */
216 for (i = 0; i < hw->aq.num_arq_entries; i++)
217 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
219 /* free the descriptor memory */
220 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
222 /* free the dma header */
223 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
227 * iavf_free_asq_bufs - Free send queue buffer info elements
228 * @hw: pointer to the hardware structure
230 STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
234 /* only unmap if the address is non-NULL */
235 for (i = 0; i < hw->aq.num_asq_entries; i++)
236 if (hw->aq.asq.r.asq_bi[i].pa)
237 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
239 /* free the buffer info list */
240 iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
242 /* free the descriptor memory */
243 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
245 /* free the dma header */
246 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
250 * iavf_config_asq_regs - configure ASQ registers
251 * @hw: pointer to the hardware structure
253 * Configure base address and length registers for the transmit queue
255 STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
257 enum iavf_status ret_code = IAVF_SUCCESS;
260 /* Clear Head and Tail */
261 wr32(hw, hw->aq.asq.head, 0);
262 wr32(hw, hw->aq.asq.tail, 0);
264 /* set starting point */
265 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
266 IAVF_VF_ATQLEN1_ATQENABLE_MASK));
267 wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
268 wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
270 /* Check one register to verify that config was applied */
271 reg = rd32(hw, hw->aq.asq.bal);
272 if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
273 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
279 * iavf_config_arq_regs - ARQ register configuration
280 * @hw: pointer to the hardware structure
282 * Configure base address and length registers for the receive (event queue)
284 STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
286 enum iavf_status ret_code = IAVF_SUCCESS;
289 /* Clear Head and Tail */
290 wr32(hw, hw->aq.arq.head, 0);
291 wr32(hw, hw->aq.arq.tail, 0);
293 /* set starting point */
294 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
295 IAVF_VF_ARQLEN1_ARQENABLE_MASK));
296 wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
297 wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
299 /* Update tail in the HW to post pre-allocated buffers */
300 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
302 /* Check one register to verify that config was applied */
303 reg = rd32(hw, hw->aq.arq.bal);
304 if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
305 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
311 * iavf_init_asq - main initialization routine for ASQ
312 * @hw: pointer to the hardware structure
314 * This is the main initialization routine for the Admin Send Queue
315 * Prior to calling this function, drivers *MUST* set the following fields
316 * in the hw->aq structure:
317 * - hw->aq.num_asq_entries
318 * - hw->aq.arq_buf_size
320 * Do *NOT* hold the lock when calling this as the memory allocation routines
321 * called are not going to be atomic context safe
323 enum iavf_status iavf_init_asq(struct iavf_hw *hw)
325 enum iavf_status ret_code = IAVF_SUCCESS;
327 if (hw->aq.asq.count > 0) {
328 /* queue already initialized */
329 ret_code = IAVF_ERR_NOT_READY;
330 goto init_adminq_exit;
333 /* verify input for valid configuration */
334 if ((hw->aq.num_asq_entries == 0) ||
335 (hw->aq.asq_buf_size == 0)) {
336 ret_code = IAVF_ERR_CONFIG;
337 goto init_adminq_exit;
340 hw->aq.asq.next_to_use = 0;
341 hw->aq.asq.next_to_clean = 0;
343 /* allocate the ring memory */
344 ret_code = iavf_alloc_adminq_asq_ring(hw);
345 if (ret_code != IAVF_SUCCESS)
346 goto init_adminq_exit;
348 /* allocate buffers in the rings */
349 ret_code = iavf_alloc_asq_bufs(hw);
350 if (ret_code != IAVF_SUCCESS)
351 goto init_adminq_free_rings;
353 /* initialize base registers */
354 ret_code = iavf_config_asq_regs(hw);
355 if (ret_code != IAVF_SUCCESS)
356 goto init_adminq_free_rings;
359 hw->aq.asq.count = hw->aq.num_asq_entries;
360 goto init_adminq_exit;
362 init_adminq_free_rings:
363 iavf_free_adminq_asq(hw);
370 * iavf_init_arq - initialize ARQ
371 * @hw: pointer to the hardware structure
373 * The main initialization routine for the Admin Receive (Event) Queue.
374 * Prior to calling this function, drivers *MUST* set the following fields
375 * in the hw->aq structure:
376 * - hw->aq.num_asq_entries
377 * - hw->aq.arq_buf_size
379 * Do *NOT* hold the lock when calling this as the memory allocation routines
380 * called are not going to be atomic context safe
382 enum iavf_status iavf_init_arq(struct iavf_hw *hw)
384 enum iavf_status ret_code = IAVF_SUCCESS;
386 if (hw->aq.arq.count > 0) {
387 /* queue already initialized */
388 ret_code = IAVF_ERR_NOT_READY;
389 goto init_adminq_exit;
392 /* verify input for valid configuration */
393 if ((hw->aq.num_arq_entries == 0) ||
394 (hw->aq.arq_buf_size == 0)) {
395 ret_code = IAVF_ERR_CONFIG;
396 goto init_adminq_exit;
399 hw->aq.arq.next_to_use = 0;
400 hw->aq.arq.next_to_clean = 0;
402 /* allocate the ring memory */
403 ret_code = iavf_alloc_adminq_arq_ring(hw);
404 if (ret_code != IAVF_SUCCESS)
405 goto init_adminq_exit;
407 /* allocate buffers in the rings */
408 ret_code = iavf_alloc_arq_bufs(hw);
409 if (ret_code != IAVF_SUCCESS)
410 goto init_adminq_free_rings;
412 /* initialize base registers */
413 ret_code = iavf_config_arq_regs(hw);
414 if (ret_code != IAVF_SUCCESS)
415 goto init_adminq_free_rings;
418 hw->aq.arq.count = hw->aq.num_arq_entries;
419 goto init_adminq_exit;
421 init_adminq_free_rings:
422 iavf_free_adminq_arq(hw);
429 * iavf_shutdown_asq - shutdown the ASQ
430 * @hw: pointer to the hardware structure
432 * The main shutdown routine for the Admin Send Queue
434 enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
436 enum iavf_status ret_code = IAVF_SUCCESS;
438 iavf_acquire_spinlock(&hw->aq.asq_spinlock);
440 if (hw->aq.asq.count == 0) {
441 ret_code = IAVF_ERR_NOT_READY;
442 goto shutdown_asq_out;
445 /* Stop firmware AdminQ processing */
446 wr32(hw, hw->aq.asq.head, 0);
447 wr32(hw, hw->aq.asq.tail, 0);
448 wr32(hw, hw->aq.asq.len, 0);
449 wr32(hw, hw->aq.asq.bal, 0);
450 wr32(hw, hw->aq.asq.bah, 0);
452 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
454 /* free ring buffers */
455 iavf_free_asq_bufs(hw);
458 iavf_release_spinlock(&hw->aq.asq_spinlock);
463 * iavf_shutdown_arq - shutdown ARQ
464 * @hw: pointer to the hardware structure
466 * The main shutdown routine for the Admin Receive Queue
468 enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
470 enum iavf_status ret_code = IAVF_SUCCESS;
472 iavf_acquire_spinlock(&hw->aq.arq_spinlock);
474 if (hw->aq.arq.count == 0) {
475 ret_code = IAVF_ERR_NOT_READY;
476 goto shutdown_arq_out;
479 /* Stop firmware AdminQ processing */
480 wr32(hw, hw->aq.arq.head, 0);
481 wr32(hw, hw->aq.arq.tail, 0);
482 wr32(hw, hw->aq.arq.len, 0);
483 wr32(hw, hw->aq.arq.bal, 0);
484 wr32(hw, hw->aq.arq.bah, 0);
486 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
488 /* free ring buffers */
489 iavf_free_arq_bufs(hw);
492 iavf_release_spinlock(&hw->aq.arq_spinlock);
497 * iavf_init_adminq - main initialization routine for Admin Queue
498 * @hw: pointer to the hardware structure
500 * Prior to calling this function, drivers *MUST* set the following fields
501 * in the hw->aq structure:
502 * - hw->aq.num_asq_entries
503 * - hw->aq.num_arq_entries
504 * - hw->aq.arq_buf_size
505 * - hw->aq.asq_buf_size
507 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
509 enum iavf_status ret_code;
511 /* verify input for valid configuration */
512 if ((hw->aq.num_arq_entries == 0) ||
513 (hw->aq.num_asq_entries == 0) ||
514 (hw->aq.arq_buf_size == 0) ||
515 (hw->aq.asq_buf_size == 0)) {
516 ret_code = IAVF_ERR_CONFIG;
517 goto init_adminq_exit;
519 iavf_init_spinlock(&hw->aq.asq_spinlock);
520 iavf_init_spinlock(&hw->aq.arq_spinlock);
522 /* Set up register offsets */
523 iavf_adminq_init_regs(hw);
525 /* setup ASQ command write back timeout */
526 hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
528 /* allocate the ASQ */
529 ret_code = iavf_init_asq(hw);
530 if (ret_code != IAVF_SUCCESS)
531 goto init_adminq_destroy_spinlocks;
533 /* allocate the ARQ */
534 ret_code = iavf_init_arq(hw);
535 if (ret_code != IAVF_SUCCESS)
536 goto init_adminq_free_asq;
539 goto init_adminq_exit;
541 init_adminq_free_asq:
542 iavf_shutdown_asq(hw);
543 init_adminq_destroy_spinlocks:
544 iavf_destroy_spinlock(&hw->aq.asq_spinlock);
545 iavf_destroy_spinlock(&hw->aq.arq_spinlock);
552 * iavf_shutdown_adminq - shutdown routine for the Admin Queue
553 * @hw: pointer to the hardware structure
555 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
557 enum iavf_status ret_code = IAVF_SUCCESS;
559 if (iavf_check_asq_alive(hw))
560 iavf_aq_queue_shutdown(hw, true);
562 iavf_shutdown_asq(hw);
563 iavf_shutdown_arq(hw);
564 iavf_destroy_spinlock(&hw->aq.asq_spinlock);
565 iavf_destroy_spinlock(&hw->aq.arq_spinlock);
571 * iavf_clean_asq - cleans Admin send queue
572 * @hw: pointer to the hardware structure
574 * returns the number of free desc
576 u16 iavf_clean_asq(struct iavf_hw *hw)
578 struct iavf_adminq_ring *asq = &(hw->aq.asq);
579 struct iavf_asq_cmd_details *details;
580 u16 ntc = asq->next_to_clean;
581 struct iavf_aq_desc desc_cb;
582 struct iavf_aq_desc *desc;
584 desc = IAVF_ADMINQ_DESC(*asq, ntc);
585 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
586 while (rd32(hw, hw->aq.asq.head) != ntc) {
587 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
588 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
590 if (details->callback) {
591 IAVF_ADMINQ_CALLBACK cb_func =
592 (IAVF_ADMINQ_CALLBACK)details->callback;
593 iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
595 cb_func(hw, &desc_cb);
597 iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
598 iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
600 if (ntc == asq->count)
602 desc = IAVF_ADMINQ_DESC(*asq, ntc);
603 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
606 asq->next_to_clean = ntc;
608 return IAVF_DESC_UNUSED(asq);
612 * iavf_asq_done - check if FW has processed the Admin Send Queue
613 * @hw: pointer to the hw struct
615 * Returns true if the firmware has processed all descriptors on the
616 * admin send queue. Returns false if there are still requests pending.
618 bool iavf_asq_done(struct iavf_hw *hw)
620 /* AQ designers suggest use of head for better
621 * timing reliability than DD bit
623 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
628 * iavf_asq_send_command - send command to Admin Queue
629 * @hw: pointer to the hw struct
630 * @desc: prefilled descriptor describing the command (non DMA mem)
631 * @buff: buffer to use for indirect commands
632 * @buff_size: size of buffer for indirect commands
633 * @cmd_details: pointer to command details structure
635 * This is the main send command driver routine for the Admin Queue send
636 * queue. It runs the queue, cleans the queue, etc
638 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
639 struct iavf_aq_desc *desc,
640 void *buff, /* can be NULL */
642 struct iavf_asq_cmd_details *cmd_details)
644 enum iavf_status status = IAVF_SUCCESS;
645 struct iavf_dma_mem *dma_buff = NULL;
646 struct iavf_asq_cmd_details *details;
647 struct iavf_aq_desc *desc_on_ring;
648 bool cmd_completed = false;
652 iavf_acquire_spinlock(&hw->aq.asq_spinlock);
654 hw->aq.asq_last_status = IAVF_AQ_RC_OK;
656 if (hw->aq.asq.count == 0) {
657 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
658 "AQTX: Admin queue not initialized.\n");
659 status = IAVF_ERR_QUEUE_EMPTY;
660 goto asq_send_command_error;
663 val = rd32(hw, hw->aq.asq.head);
664 if (val >= hw->aq.num_asq_entries) {
665 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
666 "AQTX: head overrun at %d\n", val);
667 status = IAVF_ERR_QUEUE_EMPTY;
668 goto asq_send_command_error;
671 details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
675 sizeof(struct iavf_asq_cmd_details),
676 IAVF_NONDMA_TO_NONDMA);
678 /* If the cmd_details are defined copy the cookie. The
679 * CPU_TO_LE32 is not needed here because the data is ignored
680 * by the FW, only used by the driver
682 if (details->cookie) {
684 CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
686 CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
689 iavf_memset(details, 0,
690 sizeof(struct iavf_asq_cmd_details),
694 /* clear requested flags and then set additional flags if defined */
695 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
696 desc->flags |= CPU_TO_LE16(details->flags_ena);
698 if (buff_size > hw->aq.asq_buf_size) {
700 IAVF_DEBUG_AQ_MESSAGE,
701 "AQTX: Invalid buffer size: %d.\n",
703 status = IAVF_ERR_INVALID_SIZE;
704 goto asq_send_command_error;
707 if (details->postpone && !details->async) {
709 IAVF_DEBUG_AQ_MESSAGE,
710 "AQTX: Async flag not set along with postpone flag");
711 status = IAVF_ERR_PARAM;
712 goto asq_send_command_error;
715 /* call clean and check queue available function to reclaim the
716 * descriptors that were processed by FW, the function returns the
717 * number of desc available
719 /* the clean function called here could be called in a separate thread
720 * in case of asynchronous completions
722 if (iavf_clean_asq(hw) == 0) {
724 IAVF_DEBUG_AQ_MESSAGE,
725 "AQTX: Error queue is full.\n");
726 status = IAVF_ERR_ADMIN_QUEUE_FULL;
727 goto asq_send_command_error;
730 /* initialize the temp desc pointer with the right desc */
731 desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
733 /* if the desc is available copy the temp desc to the right place */
734 iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
737 /* if buff is not NULL assume indirect command */
739 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
740 /* copy the user buff into the respective DMA buff */
741 iavf_memcpy(dma_buff->va, buff, buff_size,
743 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
745 /* Update the address values in the desc with the pa value
746 * for respective buffer
748 desc_on_ring->params.external.addr_high =
749 CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
750 desc_on_ring->params.external.addr_low =
751 CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
755 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
756 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
758 (hw->aq.asq.next_to_use)++;
759 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
760 hw->aq.asq.next_to_use = 0;
761 if (!details->postpone)
762 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
764 /* if cmd_details are not defined or async flag is not set,
765 * we need to wait for desc write back
767 if (!details->async && !details->postpone) {
771 /* AQ designers suggest use of head for better
772 * timing reliability than DD bit
774 if (iavf_asq_done(hw))
778 } while (total_delay < hw->aq.asq_cmd_timeout);
781 /* if ready, copy the desc back to temp */
782 if (iavf_asq_done(hw)) {
783 iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
786 iavf_memcpy(buff, dma_buff->va, buff_size,
788 retval = LE16_TO_CPU(desc->retval);
791 IAVF_DEBUG_AQ_MESSAGE,
792 "AQTX: Command completed with error 0x%X.\n",
795 /* strip off FW internal code */
798 cmd_completed = true;
799 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
800 status = IAVF_SUCCESS;
802 status = IAVF_ERR_ADMIN_QUEUE_ERROR;
803 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
806 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
807 "AQTX: desc and buffer writeback:\n");
808 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
810 /* save writeback aq if requested */
811 if (details->wb_desc)
812 iavf_memcpy(details->wb_desc, desc_on_ring,
813 sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
815 /* update the error if time out occurred */
816 if ((!cmd_completed) &&
817 (!details->async && !details->postpone)) {
818 if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
819 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
820 "AQTX: AQ Critical error.\n");
821 status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
823 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
824 "AQTX: Writeback timeout.\n");
825 status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
829 asq_send_command_error:
830 iavf_release_spinlock(&hw->aq.asq_spinlock);
835 * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
836 * @desc: pointer to the temp descriptor (non DMA mem)
837 * @opcode: the opcode can be used to decide which flags to turn off or on
839 * Fill the desc with default values
841 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
844 /* zero out the desc */
845 iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
847 desc->opcode = CPU_TO_LE16(opcode);
848 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
852 * iavf_clean_arq_element
853 * @hw: pointer to the hw struct
854 * @e: event info from the receive descriptor, includes any buffers
855 * @pending: number of events that could be left to process
857 * This function cleans one Admin Receive Queue element and returns
858 * the contents through e. It can also return how many events are
859 * left to process through 'pending'
861 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
862 struct iavf_arq_event_info *e,
865 enum iavf_status ret_code = IAVF_SUCCESS;
866 u16 ntc = hw->aq.arq.next_to_clean;
867 struct iavf_aq_desc *desc;
868 struct iavf_dma_mem *bi;
874 /* pre-clean the event info */
875 iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
877 /* take the lock before we start messing with the ring */
878 iavf_acquire_spinlock(&hw->aq.arq_spinlock);
880 if (hw->aq.arq.count == 0) {
881 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
882 "AQRX: Admin queue not initialized.\n");
883 ret_code = IAVF_ERR_QUEUE_EMPTY;
884 goto clean_arq_element_err;
887 /* set next_to_use to head */
888 ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
890 /* nothing to do - shouldn't need to update ring's values */
891 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
892 goto clean_arq_element_out;
895 /* now clean the next descriptor */
896 desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
899 hw->aq.arq_last_status =
900 (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
901 flags = LE16_TO_CPU(desc->flags);
902 if (flags & IAVF_AQ_FLAG_ERR) {
903 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
905 IAVF_DEBUG_AQ_MESSAGE,
906 "AQRX: Event received with error 0x%X.\n",
907 hw->aq.arq_last_status);
910 iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
912 datalen = LE16_TO_CPU(desc->datalen);
913 e->msg_len = min(datalen, e->buf_len);
914 if (e->msg_buf != NULL && (e->msg_len != 0))
915 iavf_memcpy(e->msg_buf,
916 hw->aq.arq.r.arq_bi[desc_idx].va,
917 e->msg_len, IAVF_DMA_TO_NONDMA);
919 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
920 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
921 hw->aq.arq_buf_size);
923 /* Restore the original datalen and buffer address in the desc,
924 * FW updates datalen to indicate the event message
927 bi = &hw->aq.arq.r.arq_bi[ntc];
928 iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
930 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
931 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
932 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
933 desc->datalen = CPU_TO_LE16((u16)bi->size);
934 desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
935 desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
937 /* set tail = the last cleaned desc index. */
938 wr32(hw, hw->aq.arq.tail, ntc);
939 /* ntc is updated to tail + 1 */
941 if (ntc == hw->aq.num_arq_entries)
943 hw->aq.arq.next_to_clean = ntc;
944 hw->aq.arq.next_to_use = ntu;
946 clean_arq_element_out:
947 /* Set pending if needed, unlock and return */
949 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
950 clean_arq_element_err:
951 iavf_release_spinlock(&hw->aq.arq_spinlock);