1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013 - 2015 Intel Corporation
5 #include "iavf_status.h"
7 #include "iavf_register.h"
8 #include "iavf_adminq.h"
9 #include "iavf_prototype.h"
12 * iavf_adminq_init_regs - Initialize AdminQ registers
13 * @hw: pointer to the hardware structure
15 * This assumes the alloc_asq and alloc_arq functions have already been called
17 STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
19 /* set head and tail registers in our local struct */
20 hw->aq.asq.tail = IAVF_VF_ATQT1;
21 hw->aq.asq.head = IAVF_VF_ATQH1;
22 hw->aq.asq.len = IAVF_VF_ATQLEN1;
23 hw->aq.asq.bal = IAVF_VF_ATQBAL1;
24 hw->aq.asq.bah = IAVF_VF_ATQBAH1;
25 hw->aq.arq.tail = IAVF_VF_ARQT1;
26 hw->aq.arq.head = IAVF_VF_ARQH1;
27 hw->aq.arq.len = IAVF_VF_ARQLEN1;
28 hw->aq.arq.bal = IAVF_VF_ARQBAL1;
29 hw->aq.arq.bah = IAVF_VF_ARQBAH1;
33 * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
34 * @hw: pointer to the hardware structure
36 enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
38 enum iavf_status ret_code;
40 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
42 (hw->aq.num_asq_entries *
43 sizeof(struct iavf_aq_desc)),
44 IAVF_ADMINQ_DESC_ALIGNMENT);
48 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
49 (hw->aq.num_asq_entries *
50 sizeof(struct iavf_asq_cmd_details)));
52 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
60 * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
61 * @hw: pointer to the hardware structure
63 enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
65 enum iavf_status ret_code;
67 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
69 (hw->aq.num_arq_entries *
70 sizeof(struct iavf_aq_desc)),
71 IAVF_ADMINQ_DESC_ALIGNMENT);
77 * iavf_free_adminq_asq - Free Admin Queue send rings
78 * @hw: pointer to the hardware structure
80 * This assumes the posted send buffers have already been cleaned
83 void iavf_free_adminq_asq(struct iavf_hw *hw)
85 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
89 * iavf_free_adminq_arq - Free Admin Queue receive rings
90 * @hw: pointer to the hardware structure
92 * This assumes the posted receive buffers have already been cleaned
95 void iavf_free_adminq_arq(struct iavf_hw *hw)
97 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
101 * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
102 * @hw: pointer to the hardware structure
104 STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
106 enum iavf_status ret_code;
107 struct iavf_aq_desc *desc;
108 struct iavf_dma_mem *bi;
111 /* We'll be allocating the buffer info memory first, then we can
112 * allocate the mapped buffers for the event processing
115 /* buffer_info structures do not need alignment */
116 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
117 (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
120 hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
122 /* allocate the mapped buffers */
123 for (i = 0; i < hw->aq.num_arq_entries; i++) {
124 bi = &hw->aq.arq.r.arq_bi[i];
125 ret_code = iavf_allocate_dma_mem(hw, bi,
128 IAVF_ADMINQ_DESC_ALIGNMENT);
130 goto unwind_alloc_arq_bufs;
132 /* now configure the descriptors for use */
133 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
135 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
136 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
137 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
139 /* This is in accordance with Admin queue design, there is no
140 * register for buffer size configuration
142 desc->datalen = CPU_TO_LE16((u16)bi->size);
144 desc->cookie_high = 0;
145 desc->cookie_low = 0;
146 desc->params.external.addr_high =
147 CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
148 desc->params.external.addr_low =
149 CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
150 desc->params.external.param0 = 0;
151 desc->params.external.param1 = 0;
157 unwind_alloc_arq_bufs:
158 /* don't try to free the one that failed... */
161 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
162 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
168 * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
169 * @hw: pointer to the hardware structure
171 STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
173 enum iavf_status ret_code;
174 struct iavf_dma_mem *bi;
177 /* No mapped memory needed yet, just the buffer info structures */
178 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
179 (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
182 hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
184 /* allocate the mapped buffers */
185 for (i = 0; i < hw->aq.num_asq_entries; i++) {
186 bi = &hw->aq.asq.r.asq_bi[i];
187 ret_code = iavf_allocate_dma_mem(hw, bi,
190 IAVF_ADMINQ_DESC_ALIGNMENT);
192 goto unwind_alloc_asq_bufs;
197 unwind_alloc_asq_bufs:
198 /* don't try to free the one that failed... */
201 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
202 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
208 * iavf_free_arq_bufs - Free receive queue buffer info elements
209 * @hw: pointer to the hardware structure
211 STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
215 /* free descriptors */
216 for (i = 0; i < hw->aq.num_arq_entries; i++)
217 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
219 /* free the descriptor memory */
220 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
222 /* free the dma header */
223 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
227 * iavf_free_asq_bufs - Free send queue buffer info elements
228 * @hw: pointer to the hardware structure
230 STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
234 /* only unmap if the address is non-NULL */
235 for (i = 0; i < hw->aq.num_asq_entries; i++)
236 if (hw->aq.asq.r.asq_bi[i].pa)
237 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
239 /* free the buffer info list */
240 iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
242 /* free the descriptor memory */
243 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
245 /* free the dma header */
246 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
250 * iavf_config_asq_regs - configure ASQ registers
251 * @hw: pointer to the hardware structure
253 * Configure base address and length registers for the transmit queue
255 STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
257 enum iavf_status ret_code = IAVF_SUCCESS;
260 /* Clear Head and Tail */
261 wr32(hw, hw->aq.asq.head, 0);
262 wr32(hw, hw->aq.asq.tail, 0);
264 /* set starting point */
267 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
268 IAVF_VF_ATQLEN1_ATQENABLE_MASK));
270 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
271 IAVF_VF_ATQLEN1_ATQENABLE_MASK));
272 #endif /* INTEGRATED_VF */
273 wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
274 wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
276 /* Check one register to verify that config was applied */
277 reg = rd32(hw, hw->aq.asq.bal);
278 if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
279 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
285 * iavf_config_arq_regs - ARQ register configuration
286 * @hw: pointer to the hardware structure
288 * Configure base address and length registers for the receive (event queue)
290 STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
292 enum iavf_status ret_code = IAVF_SUCCESS;
295 /* Clear Head and Tail */
296 wr32(hw, hw->aq.arq.head, 0);
297 wr32(hw, hw->aq.arq.tail, 0);
299 /* set starting point */
302 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
303 IAVF_VF_ARQLEN1_ARQENABLE_MASK));
305 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
306 IAVF_VF_ARQLEN1_ARQENABLE_MASK));
307 #endif /* INTEGRATED_VF */
308 wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
309 wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
311 /* Update tail in the HW to post pre-allocated buffers */
312 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
314 /* Check one register to verify that config was applied */
315 reg = rd32(hw, hw->aq.arq.bal);
316 if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
317 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
323 * iavf_init_asq - main initialization routine for ASQ
324 * @hw: pointer to the hardware structure
326 * This is the main initialization routine for the Admin Send Queue
327 * Prior to calling this function, drivers *MUST* set the following fields
328 * in the hw->aq structure:
329 * - hw->aq.num_asq_entries
330 * - hw->aq.arq_buf_size
332 * Do *NOT* hold the lock when calling this as the memory allocation routines
333 * called are not going to be atomic context safe
335 enum iavf_status iavf_init_asq(struct iavf_hw *hw)
337 enum iavf_status ret_code = IAVF_SUCCESS;
339 if (hw->aq.asq.count > 0) {
340 /* queue already initialized */
341 ret_code = IAVF_ERR_NOT_READY;
342 goto init_adminq_exit;
345 /* verify input for valid configuration */
346 if ((hw->aq.num_asq_entries == 0) ||
347 (hw->aq.asq_buf_size == 0)) {
348 ret_code = IAVF_ERR_CONFIG;
349 goto init_adminq_exit;
352 hw->aq.asq.next_to_use = 0;
353 hw->aq.asq.next_to_clean = 0;
355 /* allocate the ring memory */
356 ret_code = iavf_alloc_adminq_asq_ring(hw);
357 if (ret_code != IAVF_SUCCESS)
358 goto init_adminq_exit;
360 /* allocate buffers in the rings */
361 ret_code = iavf_alloc_asq_bufs(hw);
362 if (ret_code != IAVF_SUCCESS)
363 goto init_adminq_free_rings;
365 /* initialize base registers */
366 ret_code = iavf_config_asq_regs(hw);
367 if (ret_code != IAVF_SUCCESS)
368 goto init_adminq_free_rings;
371 hw->aq.asq.count = hw->aq.num_asq_entries;
372 goto init_adminq_exit;
374 init_adminq_free_rings:
375 iavf_free_adminq_asq(hw);
382 * iavf_init_arq - initialize ARQ
383 * @hw: pointer to the hardware structure
385 * The main initialization routine for the Admin Receive (Event) Queue.
386 * Prior to calling this function, drivers *MUST* set the following fields
387 * in the hw->aq structure:
388 * - hw->aq.num_asq_entries
389 * - hw->aq.arq_buf_size
391 * Do *NOT* hold the lock when calling this as the memory allocation routines
392 * called are not going to be atomic context safe
394 enum iavf_status iavf_init_arq(struct iavf_hw *hw)
396 enum iavf_status ret_code = IAVF_SUCCESS;
398 if (hw->aq.arq.count > 0) {
399 /* queue already initialized */
400 ret_code = IAVF_ERR_NOT_READY;
401 goto init_adminq_exit;
404 /* verify input for valid configuration */
405 if ((hw->aq.num_arq_entries == 0) ||
406 (hw->aq.arq_buf_size == 0)) {
407 ret_code = IAVF_ERR_CONFIG;
408 goto init_adminq_exit;
411 hw->aq.arq.next_to_use = 0;
412 hw->aq.arq.next_to_clean = 0;
414 /* allocate the ring memory */
415 ret_code = iavf_alloc_adminq_arq_ring(hw);
416 if (ret_code != IAVF_SUCCESS)
417 goto init_adminq_exit;
419 /* allocate buffers in the rings */
420 ret_code = iavf_alloc_arq_bufs(hw);
421 if (ret_code != IAVF_SUCCESS)
422 goto init_adminq_free_rings;
424 /* initialize base registers */
425 ret_code = iavf_config_arq_regs(hw);
426 if (ret_code != IAVF_SUCCESS)
427 goto init_adminq_free_rings;
430 hw->aq.arq.count = hw->aq.num_arq_entries;
431 goto init_adminq_exit;
433 init_adminq_free_rings:
434 iavf_free_adminq_arq(hw);
441 * iavf_shutdown_asq - shutdown the ASQ
442 * @hw: pointer to the hardware structure
444 * The main shutdown routine for the Admin Send Queue
446 enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
448 enum iavf_status ret_code = IAVF_SUCCESS;
450 iavf_acquire_spinlock(&hw->aq.asq_spinlock);
452 if (hw->aq.asq.count == 0) {
453 ret_code = IAVF_ERR_NOT_READY;
454 goto shutdown_asq_out;
457 /* Stop firmware AdminQ processing */
458 wr32(hw, hw->aq.asq.head, 0);
459 wr32(hw, hw->aq.asq.tail, 0);
460 wr32(hw, hw->aq.asq.len, 0);
461 wr32(hw, hw->aq.asq.bal, 0);
462 wr32(hw, hw->aq.asq.bah, 0);
464 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
466 /* free ring buffers */
467 iavf_free_asq_bufs(hw);
470 iavf_release_spinlock(&hw->aq.asq_spinlock);
475 * iavf_shutdown_arq - shutdown ARQ
476 * @hw: pointer to the hardware structure
478 * The main shutdown routine for the Admin Receive Queue
480 enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
482 enum iavf_status ret_code = IAVF_SUCCESS;
484 iavf_acquire_spinlock(&hw->aq.arq_spinlock);
486 if (hw->aq.arq.count == 0) {
487 ret_code = IAVF_ERR_NOT_READY;
488 goto shutdown_arq_out;
491 /* Stop firmware AdminQ processing */
492 wr32(hw, hw->aq.arq.head, 0);
493 wr32(hw, hw->aq.arq.tail, 0);
494 wr32(hw, hw->aq.arq.len, 0);
495 wr32(hw, hw->aq.arq.bal, 0);
496 wr32(hw, hw->aq.arq.bah, 0);
498 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
500 /* free ring buffers */
501 iavf_free_arq_bufs(hw);
504 iavf_release_spinlock(&hw->aq.arq_spinlock);
509 * iavf_init_adminq - main initialization routine for Admin Queue
510 * @hw: pointer to the hardware structure
512 * Prior to calling this function, drivers *MUST* set the following fields
513 * in the hw->aq structure:
514 * - hw->aq.num_asq_entries
515 * - hw->aq.num_arq_entries
516 * - hw->aq.arq_buf_size
517 * - hw->aq.asq_buf_size
519 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
521 enum iavf_status ret_code;
523 /* verify input for valid configuration */
524 if ((hw->aq.num_arq_entries == 0) ||
525 (hw->aq.num_asq_entries == 0) ||
526 (hw->aq.arq_buf_size == 0) ||
527 (hw->aq.asq_buf_size == 0)) {
528 ret_code = IAVF_ERR_CONFIG;
529 goto init_adminq_exit;
531 iavf_init_spinlock(&hw->aq.asq_spinlock);
532 iavf_init_spinlock(&hw->aq.arq_spinlock);
534 /* Set up register offsets */
535 iavf_adminq_init_regs(hw);
537 /* setup ASQ command write back timeout */
538 hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
540 /* allocate the ASQ */
541 ret_code = iavf_init_asq(hw);
542 if (ret_code != IAVF_SUCCESS)
543 goto init_adminq_destroy_spinlocks;
545 /* allocate the ARQ */
546 ret_code = iavf_init_arq(hw);
547 if (ret_code != IAVF_SUCCESS)
548 goto init_adminq_free_asq;
551 goto init_adminq_exit;
553 init_adminq_free_asq:
554 iavf_shutdown_asq(hw);
555 init_adminq_destroy_spinlocks:
556 iavf_destroy_spinlock(&hw->aq.asq_spinlock);
557 iavf_destroy_spinlock(&hw->aq.arq_spinlock);
564 * iavf_shutdown_adminq - shutdown routine for the Admin Queue
565 * @hw: pointer to the hardware structure
567 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
569 enum iavf_status ret_code = IAVF_SUCCESS;
571 if (iavf_check_asq_alive(hw))
572 iavf_aq_queue_shutdown(hw, true);
574 iavf_shutdown_asq(hw);
575 iavf_shutdown_arq(hw);
576 iavf_destroy_spinlock(&hw->aq.asq_spinlock);
577 iavf_destroy_spinlock(&hw->aq.arq_spinlock);
583 * iavf_clean_asq - cleans Admin send queue
584 * @hw: pointer to the hardware structure
586 * returns the number of free desc
588 u16 iavf_clean_asq(struct iavf_hw *hw)
590 struct iavf_adminq_ring *asq = &(hw->aq.asq);
591 struct iavf_asq_cmd_details *details;
592 u16 ntc = asq->next_to_clean;
593 struct iavf_aq_desc desc_cb;
594 struct iavf_aq_desc *desc;
596 desc = IAVF_ADMINQ_DESC(*asq, ntc);
597 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
598 while (rd32(hw, hw->aq.asq.head) != ntc) {
599 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
600 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
602 if (details->callback) {
603 IAVF_ADMINQ_CALLBACK cb_func =
604 (IAVF_ADMINQ_CALLBACK)details->callback;
605 iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
607 cb_func(hw, &desc_cb);
609 iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
610 iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
612 if (ntc == asq->count)
614 desc = IAVF_ADMINQ_DESC(*asq, ntc);
615 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
618 asq->next_to_clean = ntc;
620 return IAVF_DESC_UNUSED(asq);
624 * iavf_asq_done - check if FW has processed the Admin Send Queue
625 * @hw: pointer to the hw struct
627 * Returns true if the firmware has processed all descriptors on the
628 * admin send queue. Returns false if there are still requests pending.
630 bool iavf_asq_done(struct iavf_hw *hw)
632 /* AQ designers suggest use of head for better
633 * timing reliability than DD bit
635 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
640 * iavf_asq_send_command - send command to Admin Queue
641 * @hw: pointer to the hw struct
642 * @desc: prefilled descriptor describing the command (non DMA mem)
643 * @buff: buffer to use for indirect commands
644 * @buff_size: size of buffer for indirect commands
645 * @cmd_details: pointer to command details structure
647 * This is the main send command driver routine for the Admin Queue send
648 * queue. It runs the queue, cleans the queue, etc
650 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
651 struct iavf_aq_desc *desc,
652 void *buff, /* can be NULL */
654 struct iavf_asq_cmd_details *cmd_details)
656 enum iavf_status status = IAVF_SUCCESS;
657 struct iavf_dma_mem *dma_buff = NULL;
658 struct iavf_asq_cmd_details *details;
659 struct iavf_aq_desc *desc_on_ring;
660 bool cmd_completed = false;
664 iavf_acquire_spinlock(&hw->aq.asq_spinlock);
666 hw->aq.asq_last_status = IAVF_AQ_RC_OK;
668 if (hw->aq.asq.count == 0) {
669 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
670 "AQTX: Admin queue not initialized.\n");
671 status = IAVF_ERR_QUEUE_EMPTY;
672 goto asq_send_command_error;
675 val = rd32(hw, hw->aq.asq.head);
676 if (val >= hw->aq.num_asq_entries) {
677 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
678 "AQTX: head overrun at %d\n", val);
679 status = IAVF_ERR_QUEUE_EMPTY;
680 goto asq_send_command_error;
683 details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
687 sizeof(struct iavf_asq_cmd_details),
688 IAVF_NONDMA_TO_NONDMA);
690 /* If the cmd_details are defined copy the cookie. The
691 * CPU_TO_LE32 is not needed here because the data is ignored
692 * by the FW, only used by the driver
694 if (details->cookie) {
696 CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
698 CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
701 iavf_memset(details, 0,
702 sizeof(struct iavf_asq_cmd_details),
706 /* clear requested flags and then set additional flags if defined */
707 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
708 desc->flags |= CPU_TO_LE16(details->flags_ena);
710 if (buff_size > hw->aq.asq_buf_size) {
712 IAVF_DEBUG_AQ_MESSAGE,
713 "AQTX: Invalid buffer size: %d.\n",
715 status = IAVF_ERR_INVALID_SIZE;
716 goto asq_send_command_error;
719 if (details->postpone && !details->async) {
721 IAVF_DEBUG_AQ_MESSAGE,
722 "AQTX: Async flag not set along with postpone flag");
723 status = IAVF_ERR_PARAM;
724 goto asq_send_command_error;
727 /* call clean and check queue available function to reclaim the
728 * descriptors that were processed by FW, the function returns the
729 * number of desc available
731 /* the clean function called here could be called in a separate thread
732 * in case of asynchronous completions
734 if (iavf_clean_asq(hw) == 0) {
736 IAVF_DEBUG_AQ_MESSAGE,
737 "AQTX: Error queue is full.\n");
738 status = IAVF_ERR_ADMIN_QUEUE_FULL;
739 goto asq_send_command_error;
742 /* initialize the temp desc pointer with the right desc */
743 desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
745 /* if the desc is available copy the temp desc to the right place */
746 iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
749 /* if buff is not NULL assume indirect command */
751 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
752 /* copy the user buff into the respective DMA buff */
753 iavf_memcpy(dma_buff->va, buff, buff_size,
755 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
757 /* Update the address values in the desc with the pa value
758 * for respective buffer
760 desc_on_ring->params.external.addr_high =
761 CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
762 desc_on_ring->params.external.addr_low =
763 CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
767 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
768 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
770 (hw->aq.asq.next_to_use)++;
771 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
772 hw->aq.asq.next_to_use = 0;
773 if (!details->postpone)
774 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
776 /* if cmd_details are not defined or async flag is not set,
777 * we need to wait for desc write back
779 if (!details->async && !details->postpone) {
783 /* AQ designers suggest use of head for better
784 * timing reliability than DD bit
786 if (iavf_asq_done(hw))
790 } while (total_delay < hw->aq.asq_cmd_timeout);
793 /* if ready, copy the desc back to temp */
794 if (iavf_asq_done(hw)) {
795 iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
798 iavf_memcpy(buff, dma_buff->va, buff_size,
800 retval = LE16_TO_CPU(desc->retval);
803 IAVF_DEBUG_AQ_MESSAGE,
804 "AQTX: Command completed with error 0x%X.\n",
807 /* strip off FW internal code */
810 cmd_completed = true;
811 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
812 status = IAVF_SUCCESS;
814 status = IAVF_ERR_ADMIN_QUEUE_ERROR;
815 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
818 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
819 "AQTX: desc and buffer writeback:\n");
820 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
822 /* save writeback aq if requested */
823 if (details->wb_desc)
824 iavf_memcpy(details->wb_desc, desc_on_ring,
825 sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
827 /* update the error if time out occurred */
828 if ((!cmd_completed) &&
829 (!details->async && !details->postpone)) {
830 if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
831 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
832 "AQTX: AQ Critical error.\n");
833 status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
835 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
836 "AQTX: Writeback timeout.\n");
837 status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
841 asq_send_command_error:
842 iavf_release_spinlock(&hw->aq.asq_spinlock);
847 * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
848 * @desc: pointer to the temp descriptor (non DMA mem)
849 * @opcode: the opcode can be used to decide which flags to turn off or on
851 * Fill the desc with default values
853 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
856 /* zero out the desc */
857 iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
859 desc->opcode = CPU_TO_LE16(opcode);
860 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
864 * iavf_clean_arq_element
865 * @hw: pointer to the hw struct
866 * @e: event info from the receive descriptor, includes any buffers
867 * @pending: number of events that could be left to process
869 * This function cleans one Admin Receive Queue element and returns
870 * the contents through e. It can also return how many events are
871 * left to process through 'pending'
873 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
874 struct iavf_arq_event_info *e,
877 enum iavf_status ret_code = IAVF_SUCCESS;
878 u16 ntc = hw->aq.arq.next_to_clean;
879 struct iavf_aq_desc *desc;
880 struct iavf_dma_mem *bi;
886 /* pre-clean the event info */
887 iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
889 /* take the lock before we start messing with the ring */
890 iavf_acquire_spinlock(&hw->aq.arq_spinlock);
892 if (hw->aq.arq.count == 0) {
893 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
894 "AQRX: Admin queue not initialized.\n");
895 ret_code = IAVF_ERR_QUEUE_EMPTY;
896 goto clean_arq_element_err;
899 /* set next_to_use to head */
902 ntu = rd32(hw, hw->aq.arq.head) & IAVF_PF_ARQH_ARQH_MASK;
904 ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
906 ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
907 #endif /* INTEGRATED_VF */
909 /* nothing to do - shouldn't need to update ring's values */
910 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
911 goto clean_arq_element_out;
914 /* now clean the next descriptor */
915 desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
918 hw->aq.arq_last_status =
919 (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
920 flags = LE16_TO_CPU(desc->flags);
921 if (flags & IAVF_AQ_FLAG_ERR) {
922 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
924 IAVF_DEBUG_AQ_MESSAGE,
925 "AQRX: Event received with error 0x%X.\n",
926 hw->aq.arq_last_status);
929 iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
931 datalen = LE16_TO_CPU(desc->datalen);
932 e->msg_len = min(datalen, e->buf_len);
933 if (e->msg_buf != NULL && (e->msg_len != 0))
934 iavf_memcpy(e->msg_buf,
935 hw->aq.arq.r.arq_bi[desc_idx].va,
936 e->msg_len, IAVF_DMA_TO_NONDMA);
938 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
939 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
940 hw->aq.arq_buf_size);
942 /* Restore the original datalen and buffer address in the desc,
943 * FW updates datalen to indicate the event message
946 bi = &hw->aq.arq.r.arq_bi[ntc];
947 iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
949 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
950 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
951 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
952 desc->datalen = CPU_TO_LE16((u16)bi->size);
953 desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
954 desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
956 /* set tail = the last cleaned desc index. */
957 wr32(hw, hw->aq.arq.tail, ntc);
958 /* ntc is updated to tail + 1 */
960 if (ntc == hw->aq.num_arq_entries)
962 hw->aq.arq.next_to_clean = ntc;
963 hw->aq.arq.next_to_use = ntu;
965 clean_arq_element_out:
966 /* Set pending if needed, unlock and return */
968 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
969 clean_arq_element_err:
970 iavf_release_spinlock(&hw->aq.arq_spinlock);