1 /*******************************************************************************
3 Copyright (c) 2013 - 2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
42 * i40e_is_nvm_update_op - return true if this is an NVM update operation
43 * @desc: API request descriptor
45 STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
47 return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48 desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
51 #endif /* VF_DRIVER */
53 * i40e_adminq_init_regs - Initialize AdminQ registers
54 * @hw: pointer to the hardware structure
56 * This assumes the alloc_asq and alloc_arq functions have already been called
58 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
60 /* set head and tail registers in our local struct */
61 if (hw->mac.type == I40E_MAC_VF) {
62 hw->aq.asq.tail = I40E_VF_ATQT1;
63 hw->aq.asq.head = I40E_VF_ATQH1;
64 hw->aq.asq.len = I40E_VF_ATQLEN1;
65 hw->aq.asq.bal = I40E_VF_ATQBAL1;
66 hw->aq.asq.bah = I40E_VF_ATQBAH1;
67 hw->aq.arq.tail = I40E_VF_ARQT1;
68 hw->aq.arq.head = I40E_VF_ARQH1;
69 hw->aq.arq.len = I40E_VF_ARQLEN1;
70 hw->aq.arq.bal = I40E_VF_ARQBAL1;
71 hw->aq.arq.bah = I40E_VF_ARQBAH1;
73 hw->aq.asq.tail = I40E_PF_ATQT;
74 hw->aq.asq.head = I40E_PF_ATQH;
75 hw->aq.asq.len = I40E_PF_ATQLEN;
76 hw->aq.asq.bal = I40E_PF_ATQBAL;
77 hw->aq.asq.bah = I40E_PF_ATQBAH;
78 hw->aq.arq.tail = I40E_PF_ARQT;
79 hw->aq.arq.head = I40E_PF_ARQH;
80 hw->aq.arq.len = I40E_PF_ARQLEN;
81 hw->aq.arq.bal = I40E_PF_ARQBAL;
82 hw->aq.arq.bah = I40E_PF_ARQBAH;
87 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
88 * @hw: pointer to the hardware structure
90 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
92 enum i40e_status_code ret_code;
94 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
96 (hw->aq.num_asq_entries *
97 sizeof(struct i40e_aq_desc)),
98 I40E_ADMINQ_DESC_ALIGNMENT);
102 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
103 (hw->aq.num_asq_entries *
104 sizeof(struct i40e_asq_cmd_details)));
106 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
114 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
115 * @hw: pointer to the hardware structure
117 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
119 enum i40e_status_code ret_code;
121 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
123 (hw->aq.num_arq_entries *
124 sizeof(struct i40e_aq_desc)),
125 I40E_ADMINQ_DESC_ALIGNMENT);
131 * i40e_free_adminq_asq - Free Admin Queue send rings
132 * @hw: pointer to the hardware structure
134 * This assumes the posted send buffers have already been cleaned
137 void i40e_free_adminq_asq(struct i40e_hw *hw)
139 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
143 * i40e_free_adminq_arq - Free Admin Queue receive rings
144 * @hw: pointer to the hardware structure
146 * This assumes the posted receive buffers have already been cleaned
149 void i40e_free_adminq_arq(struct i40e_hw *hw)
151 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
155 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
156 * @hw: pointer to the hardware structure
158 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
160 enum i40e_status_code ret_code;
161 struct i40e_aq_desc *desc;
162 struct i40e_dma_mem *bi;
165 /* We'll be allocating the buffer info memory first, then we can
166 * allocate the mapped buffers for the event processing
169 /* buffer_info structures do not need alignment */
170 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
171 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
174 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
176 /* allocate the mapped buffers */
177 for (i = 0; i < hw->aq.num_arq_entries; i++) {
178 bi = &hw->aq.arq.r.arq_bi[i];
179 ret_code = i40e_allocate_dma_mem(hw, bi,
182 I40E_ADMINQ_DESC_ALIGNMENT);
184 goto unwind_alloc_arq_bufs;
186 /* now configure the descriptors for use */
187 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
189 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
190 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
191 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
193 /* This is in accordance with Admin queue design, there is no
194 * register for buffer size configuration
196 desc->datalen = CPU_TO_LE16((u16)bi->size);
198 desc->cookie_high = 0;
199 desc->cookie_low = 0;
200 desc->params.external.addr_high =
201 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
202 desc->params.external.addr_low =
203 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
204 desc->params.external.param0 = 0;
205 desc->params.external.param1 = 0;
211 unwind_alloc_arq_bufs:
212 /* don't try to free the one that failed... */
215 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
216 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
222 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
223 * @hw: pointer to the hardware structure
225 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
227 enum i40e_status_code ret_code;
228 struct i40e_dma_mem *bi;
231 /* No mapped memory needed yet, just the buffer info structures */
232 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
233 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
236 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
238 /* allocate the mapped buffers */
239 for (i = 0; i < hw->aq.num_asq_entries; i++) {
240 bi = &hw->aq.asq.r.asq_bi[i];
241 ret_code = i40e_allocate_dma_mem(hw, bi,
244 I40E_ADMINQ_DESC_ALIGNMENT);
246 goto unwind_alloc_asq_bufs;
251 unwind_alloc_asq_bufs:
252 /* don't try to free the one that failed... */
255 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
256 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
262 * i40e_free_arq_bufs - Free receive queue buffer info elements
263 * @hw: pointer to the hardware structure
265 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
269 /* free descriptors */
270 for (i = 0; i < hw->aq.num_arq_entries; i++)
271 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
273 /* free the descriptor memory */
274 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
276 /* free the dma header */
277 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
281 * i40e_free_asq_bufs - Free send queue buffer info elements
282 * @hw: pointer to the hardware structure
284 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
288 /* only unmap if the address is non-NULL */
289 for (i = 0; i < hw->aq.num_asq_entries; i++)
290 if (hw->aq.asq.r.asq_bi[i].pa)
291 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
293 /* free the buffer info list */
294 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
296 /* free the descriptor memory */
297 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
299 /* free the dma header */
300 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
304 * i40e_config_asq_regs - configure ASQ registers
305 * @hw: pointer to the hardware structure
307 * Configure base address and length registers for the transmit queue
309 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
311 enum i40e_status_code ret_code = I40E_SUCCESS;
314 /* Clear Head and Tail */
315 wr32(hw, hw->aq.asq.head, 0);
316 wr32(hw, hw->aq.asq.tail, 0);
318 /* set starting point */
319 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
320 I40E_PF_ATQLEN_ATQENABLE_MASK));
321 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
322 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
324 /* Check one register to verify that config was applied */
325 reg = rd32(hw, hw->aq.asq.bal);
326 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
327 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
333 * i40e_config_arq_regs - ARQ register configuration
334 * @hw: pointer to the hardware structure
336 * Configure base address and length registers for the receive (event queue)
338 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
340 enum i40e_status_code ret_code = I40E_SUCCESS;
343 /* Clear Head and Tail */
344 wr32(hw, hw->aq.arq.head, 0);
345 wr32(hw, hw->aq.arq.tail, 0);
347 /* set starting point */
348 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
349 I40E_PF_ARQLEN_ARQENABLE_MASK));
350 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
351 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
353 /* Update tail in the HW to post pre-allocated buffers */
354 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
356 /* Check one register to verify that config was applied */
357 reg = rd32(hw, hw->aq.arq.bal);
358 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
359 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
365 * i40e_init_asq - main initialization routine for ASQ
366 * @hw: pointer to the hardware structure
368 * This is the main initialization routine for the Admin Send Queue
369 * Prior to calling this function, drivers *MUST* set the following fields
370 * in the hw->aq structure:
371 * - hw->aq.num_asq_entries
372 * - hw->aq.arq_buf_size
374 * Do *NOT* hold the lock when calling this as the memory allocation routines
375 * called are not going to be atomic context safe
377 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
379 enum i40e_status_code ret_code = I40E_SUCCESS;
381 if (hw->aq.asq.count > 0) {
382 /* queue already initialized */
383 ret_code = I40E_ERR_NOT_READY;
384 goto init_adminq_exit;
387 /* verify input for valid configuration */
388 if ((hw->aq.num_asq_entries == 0) ||
389 (hw->aq.asq_buf_size == 0)) {
390 ret_code = I40E_ERR_CONFIG;
391 goto init_adminq_exit;
394 hw->aq.asq.next_to_use = 0;
395 hw->aq.asq.next_to_clean = 0;
396 hw->aq.asq.count = hw->aq.num_asq_entries;
398 /* allocate the ring memory */
399 ret_code = i40e_alloc_adminq_asq_ring(hw);
400 if (ret_code != I40E_SUCCESS)
401 goto init_adminq_exit;
403 /* allocate buffers in the rings */
404 ret_code = i40e_alloc_asq_bufs(hw);
405 if (ret_code != I40E_SUCCESS)
406 goto init_adminq_free_rings;
408 /* initialize base registers */
409 ret_code = i40e_config_asq_regs(hw);
410 if (ret_code != I40E_SUCCESS)
411 goto init_adminq_free_rings;
414 goto init_adminq_exit;
416 init_adminq_free_rings:
417 i40e_free_adminq_asq(hw);
424 * i40e_init_arq - initialize ARQ
425 * @hw: pointer to the hardware structure
427 * The main initialization routine for the Admin Receive (Event) Queue.
428 * Prior to calling this function, drivers *MUST* set the following fields
429 * in the hw->aq structure:
430 * - hw->aq.num_asq_entries
431 * - hw->aq.arq_buf_size
433 * Do *NOT* hold the lock when calling this as the memory allocation routines
434 * called are not going to be atomic context safe
436 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
438 enum i40e_status_code ret_code = I40E_SUCCESS;
440 if (hw->aq.arq.count > 0) {
441 /* queue already initialized */
442 ret_code = I40E_ERR_NOT_READY;
443 goto init_adminq_exit;
446 /* verify input for valid configuration */
447 if ((hw->aq.num_arq_entries == 0) ||
448 (hw->aq.arq_buf_size == 0)) {
449 ret_code = I40E_ERR_CONFIG;
450 goto init_adminq_exit;
453 hw->aq.arq.next_to_use = 0;
454 hw->aq.arq.next_to_clean = 0;
455 hw->aq.arq.count = hw->aq.num_arq_entries;
457 /* allocate the ring memory */
458 ret_code = i40e_alloc_adminq_arq_ring(hw);
459 if (ret_code != I40E_SUCCESS)
460 goto init_adminq_exit;
462 /* allocate buffers in the rings */
463 ret_code = i40e_alloc_arq_bufs(hw);
464 if (ret_code != I40E_SUCCESS)
465 goto init_adminq_free_rings;
467 /* initialize base registers */
468 ret_code = i40e_config_arq_regs(hw);
469 if (ret_code != I40E_SUCCESS)
470 goto init_adminq_free_rings;
473 goto init_adminq_exit;
475 init_adminq_free_rings:
476 i40e_free_adminq_arq(hw);
483 * i40e_shutdown_asq - shutdown the ASQ
484 * @hw: pointer to the hardware structure
486 * The main shutdown routine for the Admin Send Queue
488 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
490 enum i40e_status_code ret_code = I40E_SUCCESS;
492 if (hw->aq.asq.count == 0)
493 return I40E_ERR_NOT_READY;
495 /* Stop firmware AdminQ processing */
496 wr32(hw, hw->aq.asq.head, 0);
497 wr32(hw, hw->aq.asq.tail, 0);
498 wr32(hw, hw->aq.asq.len, 0);
499 wr32(hw, hw->aq.asq.bal, 0);
500 wr32(hw, hw->aq.asq.bah, 0);
502 /* make sure spinlock is available */
503 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
505 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
507 /* free ring buffers */
508 i40e_free_asq_bufs(hw);
510 i40e_release_spinlock(&hw->aq.asq_spinlock);
516 * i40e_shutdown_arq - shutdown ARQ
517 * @hw: pointer to the hardware structure
519 * The main shutdown routine for the Admin Receive Queue
521 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
523 enum i40e_status_code ret_code = I40E_SUCCESS;
525 if (hw->aq.arq.count == 0)
526 return I40E_ERR_NOT_READY;
528 /* Stop firmware AdminQ processing */
529 wr32(hw, hw->aq.arq.head, 0);
530 wr32(hw, hw->aq.arq.tail, 0);
531 wr32(hw, hw->aq.arq.len, 0);
532 wr32(hw, hw->aq.arq.bal, 0);
533 wr32(hw, hw->aq.arq.bah, 0);
535 /* make sure spinlock is available */
536 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
538 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
540 /* free ring buffers */
541 i40e_free_arq_bufs(hw);
543 i40e_release_spinlock(&hw->aq.arq_spinlock);
549 * i40e_init_adminq - main initialization routine for Admin Queue
550 * @hw: pointer to the hardware structure
552 * Prior to calling this function, drivers *MUST* set the following fields
553 * in the hw->aq structure:
554 * - hw->aq.num_asq_entries
555 * - hw->aq.num_arq_entries
556 * - hw->aq.arq_buf_size
557 * - hw->aq.asq_buf_size
559 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
561 enum i40e_status_code ret_code;
563 u16 eetrack_lo, eetrack_hi;
567 /* verify input for valid configuration */
568 if ((hw->aq.num_arq_entries == 0) ||
569 (hw->aq.num_asq_entries == 0) ||
570 (hw->aq.arq_buf_size == 0) ||
571 (hw->aq.asq_buf_size == 0)) {
572 ret_code = I40E_ERR_CONFIG;
573 goto init_adminq_exit;
576 /* initialize spin locks */
577 i40e_init_spinlock(&hw->aq.asq_spinlock);
578 i40e_init_spinlock(&hw->aq.arq_spinlock);
580 /* Set up register offsets */
581 i40e_adminq_init_regs(hw);
583 /* setup ASQ command write back timeout */
584 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
586 /* allocate the ASQ */
587 ret_code = i40e_init_asq(hw);
588 if (ret_code != I40E_SUCCESS)
589 goto init_adminq_destroy_spinlocks;
591 /* allocate the ARQ */
592 ret_code = i40e_init_arq(hw);
593 if (ret_code != I40E_SUCCESS)
594 goto init_adminq_free_asq;
597 /* There are some cases where the firmware may not be quite ready
598 * for AdminQ operations, so we retry the AdminQ setup a few times
599 * if we see timeouts in this first AQ call.
602 ret_code = i40e_aq_get_firmware_version(hw,
608 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
611 i40e_msec_delay(100);
613 } while (retry < 10);
614 if (ret_code != I40E_SUCCESS)
615 goto init_adminq_free_arq;
617 /* get the NVM version info */
618 i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
619 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
620 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
621 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
623 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
624 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
625 goto init_adminq_free_arq;
628 /* pre-emptive resource lock release */
629 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
630 hw->aq.nvm_busy = false;
632 ret_code = i40e_aq_set_hmc_resource_profile(hw,
633 I40E_HMC_PROFILE_DEFAULT,
636 ret_code = I40E_SUCCESS;
638 #endif /* VF_DRIVER */
640 goto init_adminq_exit;
643 init_adminq_free_arq:
644 i40e_shutdown_arq(hw);
646 init_adminq_free_asq:
647 i40e_shutdown_asq(hw);
648 init_adminq_destroy_spinlocks:
649 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
650 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
657 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
658 * @hw: pointer to the hardware structure
660 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
662 enum i40e_status_code ret_code = I40E_SUCCESS;
664 if (i40e_check_asq_alive(hw))
665 i40e_aq_queue_shutdown(hw, true);
667 i40e_shutdown_asq(hw);
668 i40e_shutdown_arq(hw);
670 /* destroy the spinlocks */
671 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
672 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
678 * i40e_clean_asq - cleans Admin send queue
679 * @hw: pointer to the hardware structure
681 * returns the number of free desc
683 u16 i40e_clean_asq(struct i40e_hw *hw)
685 struct i40e_adminq_ring *asq = &(hw->aq.asq);
686 struct i40e_asq_cmd_details *details;
687 u16 ntc = asq->next_to_clean;
688 struct i40e_aq_desc desc_cb;
689 struct i40e_aq_desc *desc;
691 desc = I40E_ADMINQ_DESC(*asq, ntc);
692 details = I40E_ADMINQ_DETAILS(*asq, ntc);
693 #ifdef DMA_SYNC_SUPPORT
694 I40E_DMA_SYNC(&hw->aq.asq.desc_buf, I40E_SYNC_FORKERNEL);
695 #endif /* DMA_SYNC_SUPPORT */
696 while (rd32(hw, hw->aq.asq.head) != ntc) {
697 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
698 "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
699 rd32(hw, hw->aq.asq.head));
701 if (details->callback) {
702 I40E_ADMINQ_CALLBACK cb_func =
703 (I40E_ADMINQ_CALLBACK)details->callback;
704 i40e_memcpy(&desc_cb, desc,
705 sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
706 cb_func(hw, &desc_cb);
708 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
709 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
711 if (ntc == asq->count)
713 desc = I40E_ADMINQ_DESC(*asq, ntc);
714 details = I40E_ADMINQ_DETAILS(*asq, ntc);
717 asq->next_to_clean = ntc;
719 return I40E_DESC_UNUSED(asq);
723 * i40e_asq_done - check if FW has processed the Admin Send Queue
724 * @hw: pointer to the hw struct
726 * Returns true if the firmware has processed all descriptors on the
727 * admin send queue. Returns false if there are still requests pending.
729 bool i40e_asq_done(struct i40e_hw *hw)
731 /* AQ designers suggest use of head for better
732 * timing reliability than DD bit
734 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
739 * i40e_asq_send_command - send command to Admin Queue
740 * @hw: pointer to the hw struct
741 * @desc: prefilled descriptor describing the command (non DMA mem)
742 * @buff: buffer to use for indirect commands
743 * @buff_size: size of buffer for indirect commands
744 * @cmd_details: pointer to command details structure
746 * This is the main send command driver routine for the Admin Queue send
747 * queue. It runs the queue, cleans the queue, etc
749 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
750 struct i40e_aq_desc *desc,
751 void *buff, /* can be NULL */
753 struct i40e_asq_cmd_details *cmd_details)
755 enum i40e_status_code status = I40E_SUCCESS;
756 struct i40e_dma_mem *dma_buff = NULL;
757 struct i40e_asq_cmd_details *details;
758 struct i40e_aq_desc *desc_on_ring;
759 bool cmd_completed = false;
763 val = rd32(hw, hw->aq.asq.head);
764 if (val >= hw->aq.num_asq_entries) {
765 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
766 "AQTX: head overrun at %d\n", val);
767 status = I40E_ERR_QUEUE_EMPTY;
768 goto asq_send_command_exit;
771 if (hw->aq.asq.count == 0) {
772 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
773 "AQTX: Admin queue not initialized.\n");
774 status = I40E_ERR_QUEUE_EMPTY;
775 goto asq_send_command_exit;
779 if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
780 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
781 status = I40E_ERR_NVM;
782 goto asq_send_command_exit;
786 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
790 sizeof(struct i40e_asq_cmd_details),
791 I40E_NONDMA_TO_NONDMA);
793 /* If the cmd_details are defined copy the cookie. The
794 * CPU_TO_LE32 is not needed here because the data is ignored
795 * by the FW, only used by the driver
797 if (details->cookie) {
799 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
801 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
804 i40e_memset(details, 0,
805 sizeof(struct i40e_asq_cmd_details),
809 /* clear requested flags and then set additional flags if defined */
810 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
811 desc->flags |= CPU_TO_LE16(details->flags_ena);
813 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
815 if (buff_size > hw->aq.asq_buf_size) {
817 I40E_DEBUG_AQ_MESSAGE,
818 "AQTX: Invalid buffer size: %d.\n",
820 status = I40E_ERR_INVALID_SIZE;
821 goto asq_send_command_error;
824 if (details->postpone && !details->async) {
826 I40E_DEBUG_AQ_MESSAGE,
827 "AQTX: Async flag not set along with postpone flag");
828 status = I40E_ERR_PARAM;
829 goto asq_send_command_error;
832 /* call clean and check queue available function to reclaim the
833 * descriptors that were processed by FW, the function returns the
834 * number of desc available
836 /* the clean function called here could be called in a separate thread
837 * in case of asynchronous completions
839 if (i40e_clean_asq(hw) == 0) {
841 I40E_DEBUG_AQ_MESSAGE,
842 "AQTX: Error queue is full.\n");
843 status = I40E_ERR_ADMIN_QUEUE_FULL;
844 goto asq_send_command_error;
847 /* initialize the temp desc pointer with the right desc */
848 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
850 /* if the desc is available copy the temp desc to the right place */
851 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
854 /* if buff is not NULL assume indirect command */
856 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
857 /* copy the user buff into the respective DMA buff */
858 i40e_memcpy(dma_buff->va, buff, buff_size,
860 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
862 /* Update the address values in the desc with the pa value
863 * for respective buffer
865 desc_on_ring->params.external.addr_high =
866 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
867 desc_on_ring->params.external.addr_low =
868 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
869 #ifdef DMA_SYNC_SUPPORT
870 I40E_DMA_SYNC(dma_buff, I40E_SYNC_FORDEVICE);
871 #endif /* DMA_SYNC_SUPPORT */
874 #ifdef DMA_SYNC_SUPPORT
875 I40E_DMA_SYNC(&hw->aq.asq.desc_buf, I40E_SYNC_FORDEVICE);
876 #endif /* DMA_SYNC_SUPPORT */
878 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
879 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
880 (hw->aq.asq.next_to_use)++;
881 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
882 hw->aq.asq.next_to_use = 0;
883 if (!details->postpone)
884 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
886 /* if cmd_details are not defined or async flag is not set,
887 * we need to wait for desc write back
889 if (!details->async && !details->postpone) {
894 /* AQ designers suggest use of head for better
895 * timing reliability than DD bit
897 if (i40e_asq_done(hw))
899 /* ugh! delay while spin_lock */
900 i40e_msec_delay(delay_len);
901 total_delay += delay_len;
902 } while (total_delay < hw->aq.asq_cmd_timeout);
905 /* if ready, copy the desc back to temp */
906 if (i40e_asq_done(hw)) {
907 #ifdef DMA_SYNC_SUPPORT
908 I40E_DMA_SYNC(&hw->aq.asq.desc_buf, I40E_SYNC_FORKERNEL);
909 #endif /* DMA_SYNC_SUPPORT */
910 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
913 i40e_memcpy(buff, dma_buff->va, buff_size,
915 retval = LE16_TO_CPU(desc->retval);
918 I40E_DEBUG_AQ_MESSAGE,
919 "AQTX: Command completed with error 0x%X.\n",
922 /* strip off FW internal code */
925 cmd_completed = true;
926 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
927 status = I40E_SUCCESS;
929 status = I40E_ERR_ADMIN_QUEUE_ERROR;
930 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
933 if (LE16_TO_CPU(desc->datalen) == buff_size) {
934 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
935 "AQTX: desc and buffer writeback:\n");
936 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
939 /* update the error if time out occurred */
940 if ((!cmd_completed) &&
941 (!details->async && !details->postpone)) {
943 I40E_DEBUG_AQ_MESSAGE,
944 "AQTX: Writeback timeout.\n");
945 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
949 if (!status && i40e_is_nvm_update_op(desc))
950 hw->aq.nvm_busy = true;
952 #endif /* VF_DRIVER */
953 asq_send_command_error:
954 i40e_release_spinlock(&hw->aq.asq_spinlock);
955 asq_send_command_exit:
960 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
961 * @desc: pointer to the temp descriptor (non DMA mem)
962 * @opcode: the opcode can be used to decide which flags to turn off or on
964 * Fill the desc with default values
966 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
969 /* zero out the desc */
970 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
972 desc->opcode = CPU_TO_LE16(opcode);
973 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
977 * i40e_clean_arq_element
978 * @hw: pointer to the hw struct
979 * @e: event info from the receive descriptor, includes any buffers
980 * @pending: number of events that could be left to process
982 * This function cleans one Admin Receive Queue element and returns
983 * the contents through e. It can also return how many events are
984 * left to process through 'pending'
986 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
987 struct i40e_arq_event_info *e,
990 enum i40e_status_code ret_code = I40E_SUCCESS;
991 u16 ntc = hw->aq.arq.next_to_clean;
992 struct i40e_aq_desc *desc;
993 struct i40e_dma_mem *bi;
998 #ifdef DMA_SYNC_SUPPORT
999 I40E_DMA_SYNC(&hw->aq.arq.desc_buf, I40E_SYNC_FORKERNEL);
1000 #endif /* DMA_SYNC_SUPPORT */
1002 /* take the lock before we start messing with the ring */
1003 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1005 /* set next_to_use to head */
1006 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1008 /* nothing to do - shouldn't need to update ring's values */
1010 I40E_DEBUG_AQ_MESSAGE,
1011 "AQRX: Queue is empty.\n");
1012 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1013 goto clean_arq_element_out;
1016 /* now clean the next descriptor */
1017 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1019 #ifdef DMA_SYNC_SUPPORT
1020 I40E_DMA_SYNC(&hw->aq.arq.r.arq_bi[desc_idx], I40E_SYNC_FORKERNEL);
1021 #endif /* DMA_SYNC_SUPPORT */
1023 flags = LE16_TO_CPU(desc->flags);
1024 if (flags & I40E_AQ_FLAG_ERR) {
1025 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1026 hw->aq.arq_last_status =
1027 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1029 I40E_DEBUG_AQ_MESSAGE,
1030 "AQRX: Event received with error 0x%X.\n",
1031 hw->aq.arq_last_status);
1033 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1034 I40E_DMA_TO_NONDMA);
1035 datalen = LE16_TO_CPU(desc->datalen);
1036 e->msg_size = min(datalen, e->msg_size);
1037 if (e->msg_buf != NULL && (e->msg_size != 0))
1038 i40e_memcpy(e->msg_buf,
1039 hw->aq.arq.r.arq_bi[desc_idx].va,
1040 e->msg_size, I40E_DMA_TO_NONDMA);
1043 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1044 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
1046 /* Restore the original datalen and buffer address in the desc,
1047 * FW updates datalen to indicate the event message
1050 bi = &hw->aq.arq.r.arq_bi[ntc];
1051 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1053 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1054 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1055 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1056 desc->datalen = CPU_TO_LE16((u16)bi->size);
1057 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1058 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1060 /* set tail = the last cleaned desc index. */
1061 wr32(hw, hw->aq.arq.tail, ntc);
1062 /* ntc is updated to tail + 1 */
1064 if (ntc == hw->aq.num_arq_entries)
1066 hw->aq.arq.next_to_clean = ntc;
1067 hw->aq.arq.next_to_use = ntu;
1069 clean_arq_element_out:
1070 /* Set pending if needed, unlock and return */
1071 if (pending != NULL)
1072 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1073 i40e_release_spinlock(&hw->aq.arq_spinlock);
1076 if (i40e_is_nvm_update_op(&e->desc)) {
1077 hw->aq.nvm_busy = false;
1078 if (hw->aq.nvm_release_on_done) {
1079 i40e_release_nvm(hw);
1080 hw->aq.nvm_release_on_done = false;
1088 void i40e_resume_aq(struct i40e_hw *hw)
1090 /* Registers are reset after PF reset */
1091 hw->aq.asq.next_to_use = 0;
1092 hw->aq.asq.next_to_clean = 0;
1094 #if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
1095 #error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
1097 i40e_config_asq_regs(hw);
1099 hw->aq.arq.next_to_use = 0;
1100 hw->aq.arq.next_to_clean = 0;
1102 i40e_config_arq_regs(hw);