1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
42 * i40e_is_nvm_update_op - return true if this is an NVM update operation
43 * @desc: API request descriptor
45 STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
47 return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48 desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
51 #endif /* PF_DRIVER */
53 * i40e_adminq_init_regs - Initialize AdminQ registers
54 * @hw: pointer to the hardware structure
56 * This assumes the alloc_asq and alloc_arq functions have already been called
58 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
60 /* set head and tail registers in our local struct */
62 hw->aq.asq.tail = I40E_VF_ATQT1;
63 hw->aq.asq.head = I40E_VF_ATQH1;
64 hw->aq.asq.len = I40E_VF_ATQLEN1;
65 hw->aq.asq.bal = I40E_VF_ATQBAL1;
66 hw->aq.asq.bah = I40E_VF_ATQBAH1;
67 hw->aq.arq.tail = I40E_VF_ARQT1;
68 hw->aq.arq.head = I40E_VF_ARQH1;
69 hw->aq.arq.len = I40E_VF_ARQLEN1;
70 hw->aq.arq.bal = I40E_VF_ARQBAL1;
71 hw->aq.arq.bah = I40E_VF_ARQBAH1;
74 hw->aq.asq.tail = I40E_PF_ATQT;
75 hw->aq.asq.head = I40E_PF_ATQH;
76 hw->aq.asq.len = I40E_PF_ATQLEN;
77 hw->aq.asq.bal = I40E_PF_ATQBAL;
78 hw->aq.asq.bah = I40E_PF_ATQBAH;
79 hw->aq.arq.tail = I40E_PF_ARQT;
80 hw->aq.arq.head = I40E_PF_ARQH;
81 hw->aq.arq.len = I40E_PF_ARQLEN;
82 hw->aq.arq.bal = I40E_PF_ARQBAL;
83 hw->aq.arq.bah = I40E_PF_ARQBAH;
89 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
90 * @hw: pointer to the hardware structure
92 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
94 enum i40e_status_code ret_code;
96 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
98 (hw->aq.num_asq_entries *
99 sizeof(struct i40e_aq_desc)),
100 I40E_ADMINQ_DESC_ALIGNMENT);
104 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
105 (hw->aq.num_asq_entries *
106 sizeof(struct i40e_asq_cmd_details)));
108 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
116 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
117 * @hw: pointer to the hardware structure
119 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
121 enum i40e_status_code ret_code;
123 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
125 (hw->aq.num_arq_entries *
126 sizeof(struct i40e_aq_desc)),
127 I40E_ADMINQ_DESC_ALIGNMENT);
133 * i40e_free_adminq_asq - Free Admin Queue send rings
134 * @hw: pointer to the hardware structure
136 * This assumes the posted send buffers have already been cleaned
139 void i40e_free_adminq_asq(struct i40e_hw *hw)
141 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
145 * i40e_free_adminq_arq - Free Admin Queue receive rings
146 * @hw: pointer to the hardware structure
148 * This assumes the posted receive buffers have already been cleaned
151 void i40e_free_adminq_arq(struct i40e_hw *hw)
153 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
157 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
158 * @hw: pointer to the hardware structure
160 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
162 enum i40e_status_code ret_code;
163 struct i40e_aq_desc *desc;
164 struct i40e_dma_mem *bi;
167 /* We'll be allocating the buffer info memory first, then we can
168 * allocate the mapped buffers for the event processing
171 /* buffer_info structures do not need alignment */
172 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
173 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
176 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
178 /* allocate the mapped buffers */
179 for (i = 0; i < hw->aq.num_arq_entries; i++) {
180 bi = &hw->aq.arq.r.arq_bi[i];
181 ret_code = i40e_allocate_dma_mem(hw, bi,
184 I40E_ADMINQ_DESC_ALIGNMENT);
186 goto unwind_alloc_arq_bufs;
188 /* now configure the descriptors for use */
189 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
191 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
192 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
193 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
195 /* This is in accordance with Admin queue design, there is no
196 * register for buffer size configuration
198 desc->datalen = CPU_TO_LE16((u16)bi->size);
200 desc->cookie_high = 0;
201 desc->cookie_low = 0;
202 desc->params.external.addr_high =
203 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
204 desc->params.external.addr_low =
205 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
206 desc->params.external.param0 = 0;
207 desc->params.external.param1 = 0;
213 unwind_alloc_arq_bufs:
214 /* don't try to free the one that failed... */
217 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
218 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
224 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
225 * @hw: pointer to the hardware structure
227 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
229 enum i40e_status_code ret_code;
230 struct i40e_dma_mem *bi;
233 /* No mapped memory needed yet, just the buffer info structures */
234 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
235 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
238 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
240 /* allocate the mapped buffers */
241 for (i = 0; i < hw->aq.num_asq_entries; i++) {
242 bi = &hw->aq.asq.r.asq_bi[i];
243 ret_code = i40e_allocate_dma_mem(hw, bi,
246 I40E_ADMINQ_DESC_ALIGNMENT);
248 goto unwind_alloc_asq_bufs;
253 unwind_alloc_asq_bufs:
254 /* don't try to free the one that failed... */
257 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
258 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
264 * i40e_free_arq_bufs - Free receive queue buffer info elements
265 * @hw: pointer to the hardware structure
267 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
271 /* free descriptors */
272 for (i = 0; i < hw->aq.num_arq_entries; i++)
273 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
275 /* free the descriptor memory */
276 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
278 /* free the dma header */
279 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
283 * i40e_free_asq_bufs - Free send queue buffer info elements
284 * @hw: pointer to the hardware structure
286 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
290 /* only unmap if the address is non-NULL */
291 for (i = 0; i < hw->aq.num_asq_entries; i++)
292 if (hw->aq.asq.r.asq_bi[i].pa)
293 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
295 /* free the buffer info list */
296 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
298 /* free the descriptor memory */
299 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
301 /* free the dma header */
302 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
306 * i40e_config_asq_regs - configure ASQ registers
307 * @hw: pointer to the hardware structure
309 * Configure base address and length registers for the transmit queue
311 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
313 enum i40e_status_code ret_code = I40E_SUCCESS;
316 /* Clear Head and Tail */
317 wr32(hw, hw->aq.asq.head, 0);
318 wr32(hw, hw->aq.asq.tail, 0);
320 /* set starting point */
324 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
325 I40E_PF_ATQLEN_ATQENABLE_MASK));
327 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
328 I40E_PF_ATQLEN_ATQENABLE_MASK));
329 #endif /* INTEGRATED_VF */
330 #endif /* PF_DRIVER */
334 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
335 I40E_VF_ATQLEN1_ATQENABLE_MASK));
337 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
338 I40E_VF_ATQLEN1_ATQENABLE_MASK));
339 #endif /* INTEGRATED_VF */
340 #endif /* VF_DRIVER */
341 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
342 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
344 /* Check one register to verify that config was applied */
345 reg = rd32(hw, hw->aq.asq.bal);
346 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
347 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
353 * i40e_config_arq_regs - ARQ register configuration
354 * @hw: pointer to the hardware structure
356 * Configure base address and length registers for the receive (event queue)
358 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
360 enum i40e_status_code ret_code = I40E_SUCCESS;
363 /* Clear Head and Tail */
364 wr32(hw, hw->aq.arq.head, 0);
365 wr32(hw, hw->aq.arq.tail, 0);
367 /* set starting point */
371 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
372 I40E_PF_ARQLEN_ARQENABLE_MASK));
374 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
375 I40E_PF_ARQLEN_ARQENABLE_MASK));
376 #endif /* INTEGRATED_VF */
377 #endif /* PF_DRIVER */
381 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
382 I40E_VF_ARQLEN1_ARQENABLE_MASK));
384 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
385 I40E_VF_ARQLEN1_ARQENABLE_MASK));
386 #endif /* INTEGRATED_VF */
387 #endif /* VF_DRIVER */
388 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
389 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
391 /* Update tail in the HW to post pre-allocated buffers */
392 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
394 /* Check one register to verify that config was applied */
395 reg = rd32(hw, hw->aq.arq.bal);
396 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
397 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
403 * i40e_init_asq - main initialization routine for ASQ
404 * @hw: pointer to the hardware structure
406 * This is the main initialization routine for the Admin Send Queue
407 * Prior to calling this function, drivers *MUST* set the following fields
408 * in the hw->aq structure:
409 * - hw->aq.num_asq_entries
410 * - hw->aq.arq_buf_size
412 * Do *NOT* hold the lock when calling this as the memory allocation routines
413 * called are not going to be atomic context safe
415 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
417 enum i40e_status_code ret_code = I40E_SUCCESS;
419 if (hw->aq.asq.count > 0) {
420 /* queue already initialized */
421 ret_code = I40E_ERR_NOT_READY;
422 goto init_adminq_exit;
425 /* verify input for valid configuration */
426 if ((hw->aq.num_asq_entries == 0) ||
427 (hw->aq.asq_buf_size == 0)) {
428 ret_code = I40E_ERR_CONFIG;
429 goto init_adminq_exit;
432 hw->aq.asq.next_to_use = 0;
433 hw->aq.asq.next_to_clean = 0;
434 hw->aq.asq.count = hw->aq.num_asq_entries;
436 /* allocate the ring memory */
437 ret_code = i40e_alloc_adminq_asq_ring(hw);
438 if (ret_code != I40E_SUCCESS)
439 goto init_adminq_exit;
441 /* allocate buffers in the rings */
442 ret_code = i40e_alloc_asq_bufs(hw);
443 if (ret_code != I40E_SUCCESS)
444 goto init_adminq_free_rings;
446 /* initialize base registers */
447 ret_code = i40e_config_asq_regs(hw);
448 if (ret_code != I40E_SUCCESS)
449 goto init_adminq_free_rings;
452 goto init_adminq_exit;
454 init_adminq_free_rings:
455 i40e_free_adminq_asq(hw);
462 * i40e_init_arq - initialize ARQ
463 * @hw: pointer to the hardware structure
465 * The main initialization routine for the Admin Receive (Event) Queue.
466 * Prior to calling this function, drivers *MUST* set the following fields
467 * in the hw->aq structure:
468 * - hw->aq.num_asq_entries
469 * - hw->aq.arq_buf_size
471 * Do *NOT* hold the lock when calling this as the memory allocation routines
472 * called are not going to be atomic context safe
474 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
476 enum i40e_status_code ret_code = I40E_SUCCESS;
478 if (hw->aq.arq.count > 0) {
479 /* queue already initialized */
480 ret_code = I40E_ERR_NOT_READY;
481 goto init_adminq_exit;
484 /* verify input for valid configuration */
485 if ((hw->aq.num_arq_entries == 0) ||
486 (hw->aq.arq_buf_size == 0)) {
487 ret_code = I40E_ERR_CONFIG;
488 goto init_adminq_exit;
491 hw->aq.arq.next_to_use = 0;
492 hw->aq.arq.next_to_clean = 0;
493 hw->aq.arq.count = hw->aq.num_arq_entries;
495 /* allocate the ring memory */
496 ret_code = i40e_alloc_adminq_arq_ring(hw);
497 if (ret_code != I40E_SUCCESS)
498 goto init_adminq_exit;
500 /* allocate buffers in the rings */
501 ret_code = i40e_alloc_arq_bufs(hw);
502 if (ret_code != I40E_SUCCESS)
503 goto init_adminq_free_rings;
505 /* initialize base registers */
506 ret_code = i40e_config_arq_regs(hw);
507 if (ret_code != I40E_SUCCESS)
508 goto init_adminq_free_rings;
511 goto init_adminq_exit;
513 init_adminq_free_rings:
514 i40e_free_adminq_arq(hw);
521 * i40e_shutdown_asq - shutdown the ASQ
522 * @hw: pointer to the hardware structure
524 * The main shutdown routine for the Admin Send Queue
526 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
528 enum i40e_status_code ret_code = I40E_SUCCESS;
530 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
532 if (hw->aq.asq.count == 0) {
533 ret_code = I40E_ERR_NOT_READY;
534 goto shutdown_asq_out;
537 /* Stop firmware AdminQ processing */
538 wr32(hw, hw->aq.asq.head, 0);
539 wr32(hw, hw->aq.asq.tail, 0);
540 wr32(hw, hw->aq.asq.len, 0);
541 wr32(hw, hw->aq.asq.bal, 0);
542 wr32(hw, hw->aq.asq.bah, 0);
544 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
546 /* free ring buffers */
547 i40e_free_asq_bufs(hw);
550 i40e_release_spinlock(&hw->aq.asq_spinlock);
555 * i40e_shutdown_arq - shutdown ARQ
556 * @hw: pointer to the hardware structure
558 * The main shutdown routine for the Admin Receive Queue
560 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
562 enum i40e_status_code ret_code = I40E_SUCCESS;
564 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
566 if (hw->aq.arq.count == 0) {
567 ret_code = I40E_ERR_NOT_READY;
568 goto shutdown_arq_out;
571 /* Stop firmware AdminQ processing */
572 wr32(hw, hw->aq.arq.head, 0);
573 wr32(hw, hw->aq.arq.tail, 0);
574 wr32(hw, hw->aq.arq.len, 0);
575 wr32(hw, hw->aq.arq.bal, 0);
576 wr32(hw, hw->aq.arq.bah, 0);
578 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
580 /* free ring buffers */
581 i40e_free_arq_bufs(hw);
584 i40e_release_spinlock(&hw->aq.arq_spinlock);
589 * i40e_init_adminq - main initialization routine for Admin Queue
590 * @hw: pointer to the hardware structure
592 * Prior to calling this function, drivers *MUST* set the following fields
593 * in the hw->aq structure:
594 * - hw->aq.num_asq_entries
595 * - hw->aq.num_arq_entries
596 * - hw->aq.arq_buf_size
597 * - hw->aq.asq_buf_size
599 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
601 enum i40e_status_code ret_code;
603 u16 eetrack_lo, eetrack_hi;
604 u16 cfg_ptr, oem_hi, oem_lo;
607 /* verify input for valid configuration */
608 if ((hw->aq.num_arq_entries == 0) ||
609 (hw->aq.num_asq_entries == 0) ||
610 (hw->aq.arq_buf_size == 0) ||
611 (hw->aq.asq_buf_size == 0)) {
612 ret_code = I40E_ERR_CONFIG;
613 goto init_adminq_exit;
616 /* initialize spin locks */
617 i40e_init_spinlock(&hw->aq.asq_spinlock);
618 i40e_init_spinlock(&hw->aq.arq_spinlock);
620 /* Set up register offsets */
621 i40e_adminq_init_regs(hw);
623 /* setup ASQ command write back timeout */
624 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
626 /* allocate the ASQ */
627 ret_code = i40e_init_asq(hw);
628 if (ret_code != I40E_SUCCESS)
629 goto init_adminq_destroy_spinlocks;
631 /* allocate the ARQ */
632 ret_code = i40e_init_arq(hw);
633 if (ret_code != I40E_SUCCESS)
634 goto init_adminq_free_asq;
638 /* VF has no need of firmware */
640 goto init_adminq_exit;
642 /* There are some cases where the firmware may not be quite ready
643 * for AdminQ operations, so we retry the AdminQ setup a few times
644 * if we see timeouts in this first AQ call.
647 ret_code = i40e_aq_get_firmware_version(hw,
654 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
657 i40e_msec_delay(100);
659 } while (retry < 10);
660 if (ret_code != I40E_SUCCESS)
661 goto init_adminq_free_arq;
663 /* get the NVM version info */
664 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
666 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
667 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
668 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
669 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
670 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
672 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
674 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
676 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
677 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
678 goto init_adminq_free_arq;
681 /* pre-emptive resource lock release */
682 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
683 hw->aq.nvm_release_on_done = false;
684 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
686 ret_code = i40e_aq_set_hmc_resource_profile(hw,
687 I40E_HMC_PROFILE_DEFAULT,
690 #endif /* PF_DRIVER */
691 ret_code = I40E_SUCCESS;
694 goto init_adminq_exit;
697 init_adminq_free_arq:
698 i40e_shutdown_arq(hw);
700 init_adminq_free_asq:
701 i40e_shutdown_asq(hw);
702 init_adminq_destroy_spinlocks:
703 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
704 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
711 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
712 * @hw: pointer to the hardware structure
714 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
716 enum i40e_status_code ret_code = I40E_SUCCESS;
718 if (i40e_check_asq_alive(hw))
719 i40e_aq_queue_shutdown(hw, true);
721 i40e_shutdown_asq(hw);
722 i40e_shutdown_arq(hw);
724 /* destroy the spinlocks */
725 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
726 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
729 i40e_free_virt_mem(hw, &hw->nvm_buff);
735 * i40e_clean_asq - cleans Admin send queue
736 * @hw: pointer to the hardware structure
738 * returns the number of free desc
740 u16 i40e_clean_asq(struct i40e_hw *hw)
742 struct i40e_adminq_ring *asq = &(hw->aq.asq);
743 struct i40e_asq_cmd_details *details;
744 u16 ntc = asq->next_to_clean;
745 struct i40e_aq_desc desc_cb;
746 struct i40e_aq_desc *desc;
748 desc = I40E_ADMINQ_DESC(*asq, ntc);
749 details = I40E_ADMINQ_DETAILS(*asq, ntc);
751 while (rd32(hw, hw->aq.asq.head) != ntc) {
752 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
753 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
755 if (details->callback) {
756 I40E_ADMINQ_CALLBACK cb_func =
757 (I40E_ADMINQ_CALLBACK)details->callback;
758 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
760 cb_func(hw, &desc_cb);
762 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
763 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
765 if (ntc == asq->count)
767 desc = I40E_ADMINQ_DESC(*asq, ntc);
768 details = I40E_ADMINQ_DETAILS(*asq, ntc);
771 asq->next_to_clean = ntc;
773 return I40E_DESC_UNUSED(asq);
777 * i40e_asq_done - check if FW has processed the Admin Send Queue
778 * @hw: pointer to the hw struct
780 * Returns true if the firmware has processed all descriptors on the
781 * admin send queue. Returns false if there are still requests pending.
783 bool i40e_asq_done(struct i40e_hw *hw)
785 /* AQ designers suggest use of head for better
786 * timing reliability than DD bit
788 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
793 * i40e_asq_send_command - send command to Admin Queue
794 * @hw: pointer to the hw struct
795 * @desc: prefilled descriptor describing the command (non DMA mem)
796 * @buff: buffer to use for indirect commands
797 * @buff_size: size of buffer for indirect commands
798 * @cmd_details: pointer to command details structure
800 * This is the main send command driver routine for the Admin Queue send
801 * queue. It runs the queue, cleans the queue, etc
803 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
804 struct i40e_aq_desc *desc,
805 void *buff, /* can be NULL */
807 struct i40e_asq_cmd_details *cmd_details)
809 enum i40e_status_code status = I40E_SUCCESS;
810 struct i40e_dma_mem *dma_buff = NULL;
811 struct i40e_asq_cmd_details *details;
812 struct i40e_aq_desc *desc_on_ring;
813 bool cmd_completed = false;
817 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
819 hw->aq.asq_last_status = I40E_AQ_RC_OK;
821 if (hw->aq.asq.count == 0) {
822 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
823 "AQTX: Admin queue not initialized.\n");
824 status = I40E_ERR_QUEUE_EMPTY;
825 goto asq_send_command_error;
828 val = rd32(hw, hw->aq.asq.head);
829 if (val >= hw->aq.num_asq_entries) {
830 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
831 "AQTX: head overrun at %d\n", val);
832 status = I40E_ERR_QUEUE_EMPTY;
833 goto asq_send_command_error;
836 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
840 sizeof(struct i40e_asq_cmd_details),
841 I40E_NONDMA_TO_NONDMA);
843 /* If the cmd_details are defined copy the cookie. The
844 * CPU_TO_LE32 is not needed here because the data is ignored
845 * by the FW, only used by the driver
847 if (details->cookie) {
849 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
851 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
854 i40e_memset(details, 0,
855 sizeof(struct i40e_asq_cmd_details),
859 /* clear requested flags and then set additional flags if defined */
860 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
861 desc->flags |= CPU_TO_LE16(details->flags_ena);
863 if (buff_size > hw->aq.asq_buf_size) {
865 I40E_DEBUG_AQ_MESSAGE,
866 "AQTX: Invalid buffer size: %d.\n",
868 status = I40E_ERR_INVALID_SIZE;
869 goto asq_send_command_error;
872 if (details->postpone && !details->async) {
874 I40E_DEBUG_AQ_MESSAGE,
875 "AQTX: Async flag not set along with postpone flag");
876 status = I40E_ERR_PARAM;
877 goto asq_send_command_error;
880 /* call clean and check queue available function to reclaim the
881 * descriptors that were processed by FW, the function returns the
882 * number of desc available
884 /* the clean function called here could be called in a separate thread
885 * in case of asynchronous completions
887 if (i40e_clean_asq(hw) == 0) {
889 I40E_DEBUG_AQ_MESSAGE,
890 "AQTX: Error queue is full.\n");
891 status = I40E_ERR_ADMIN_QUEUE_FULL;
892 goto asq_send_command_error;
895 /* initialize the temp desc pointer with the right desc */
896 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
898 /* if the desc is available copy the temp desc to the right place */
899 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
902 /* if buff is not NULL assume indirect command */
904 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
905 /* copy the user buff into the respective DMA buff */
906 i40e_memcpy(dma_buff->va, buff, buff_size,
908 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
910 /* Update the address values in the desc with the pa value
911 * for respective buffer
913 desc_on_ring->params.external.addr_high =
914 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
915 desc_on_ring->params.external.addr_low =
916 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
920 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
921 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
923 (hw->aq.asq.next_to_use)++;
924 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
925 hw->aq.asq.next_to_use = 0;
926 if (!details->postpone)
927 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
929 /* if cmd_details are not defined or async flag is not set,
930 * we need to wait for desc write back
932 if (!details->async && !details->postpone) {
936 /* AQ designers suggest use of head for better
937 * timing reliability than DD bit
939 if (i40e_asq_done(hw))
941 /* ugh! delay while spin_lock */
944 } while (total_delay < hw->aq.asq_cmd_timeout);
947 /* if ready, copy the desc back to temp */
948 if (i40e_asq_done(hw)) {
949 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
952 i40e_memcpy(buff, dma_buff->va, buff_size,
954 retval = LE16_TO_CPU(desc->retval);
957 I40E_DEBUG_AQ_MESSAGE,
958 "AQTX: Command completed with error 0x%X.\n",
961 /* strip off FW internal code */
964 cmd_completed = true;
965 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
966 status = I40E_SUCCESS;
968 status = I40E_ERR_ADMIN_QUEUE_ERROR;
969 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
972 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
973 "AQTX: desc and buffer writeback:\n");
974 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
976 /* save writeback aq if requested */
977 if (details->wb_desc)
978 i40e_memcpy(details->wb_desc, desc_on_ring,
979 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
981 /* update the error if time out occurred */
982 if ((!cmd_completed) &&
983 (!details->async && !details->postpone)) {
985 I40E_DEBUG_AQ_MESSAGE,
986 "AQTX: Writeback timeout.\n");
987 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
990 asq_send_command_error:
991 i40e_release_spinlock(&hw->aq.asq_spinlock);
996 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
997 * @desc: pointer to the temp descriptor (non DMA mem)
998 * @opcode: the opcode can be used to decide which flags to turn off or on
1000 * Fill the desc with default values
1002 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1005 /* zero out the desc */
1006 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1008 desc->opcode = CPU_TO_LE16(opcode);
1009 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1013 * i40e_clean_arq_element
1014 * @hw: pointer to the hw struct
1015 * @e: event info from the receive descriptor, includes any buffers
1016 * @pending: number of events that could be left to process
1018 * This function cleans one Admin Receive Queue element and returns
1019 * the contents through e. It can also return how many events are
1020 * left to process through 'pending'
1022 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1023 struct i40e_arq_event_info *e,
1026 enum i40e_status_code ret_code = I40E_SUCCESS;
1027 u16 ntc = hw->aq.arq.next_to_clean;
1028 struct i40e_aq_desc *desc;
1029 struct i40e_dma_mem *bi;
1035 /* take the lock before we start messing with the ring */
1036 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1038 /* set next_to_use to head */
1040 #ifdef INTEGRATED_VF
1041 if (!i40e_is_vf(hw))
1042 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1044 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1045 #endif /* INTEGRATED_VF */
1046 #endif /* PF_DRIVER */
1048 #ifdef INTEGRATED_VF
1050 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1052 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1053 #endif /* INTEGRATED_VF */
1054 #endif /* VF_DRIVER */
1056 /* nothing to do - shouldn't need to update ring's values */
1057 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1058 goto clean_arq_element_out;
1061 /* now clean the next descriptor */
1062 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1065 flags = LE16_TO_CPU(desc->flags);
1066 if (flags & I40E_AQ_FLAG_ERR) {
1067 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1068 hw->aq.arq_last_status =
1069 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1071 I40E_DEBUG_AQ_MESSAGE,
1072 "AQRX: Event received with error 0x%X.\n",
1073 hw->aq.arq_last_status);
1076 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1077 I40E_DMA_TO_NONDMA);
1078 datalen = LE16_TO_CPU(desc->datalen);
1079 e->msg_len = min(datalen, e->buf_len);
1080 if (e->msg_buf != NULL && (e->msg_len != 0))
1081 i40e_memcpy(e->msg_buf,
1082 hw->aq.arq.r.arq_bi[desc_idx].va,
1083 e->msg_len, I40E_DMA_TO_NONDMA);
1085 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1086 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1087 hw->aq.arq_buf_size);
1089 /* Restore the original datalen and buffer address in the desc,
1090 * FW updates datalen to indicate the event message
1093 bi = &hw->aq.arq.r.arq_bi[ntc];
1094 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1096 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1097 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1098 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1099 desc->datalen = CPU_TO_LE16((u16)bi->size);
1100 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1101 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1103 /* set tail = the last cleaned desc index. */
1104 wr32(hw, hw->aq.arq.tail, ntc);
1105 /* ntc is updated to tail + 1 */
1107 if (ntc == hw->aq.num_arq_entries)
1109 hw->aq.arq.next_to_clean = ntc;
1110 hw->aq.arq.next_to_use = ntu;
1112 clean_arq_element_out:
1113 /* Set pending if needed, unlock and return */
1114 if (pending != NULL)
1115 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1116 i40e_release_spinlock(&hw->aq.arq_spinlock);
1119 if (i40e_is_nvm_update_op(&e->desc)) {
1120 if (hw->aq.nvm_release_on_done) {
1121 i40e_release_nvm(hw);
1122 hw->aq.nvm_release_on_done = false;
1125 switch (hw->nvmupd_state) {
1126 case I40E_NVMUPD_STATE_INIT_WAIT:
1127 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1130 case I40E_NVMUPD_STATE_WRITE_WAIT:
1131 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1143 void i40e_resume_aq(struct i40e_hw *hw)
1145 /* Registers are reset after PF reset */
1146 hw->aq.asq.next_to_use = 0;
1147 hw->aq.asq.next_to_clean = 0;
1149 i40e_config_asq_regs(hw);
1151 hw->aq.arq.next_to_use = 0;
1152 hw->aq.arq.next_to_clean = 0;
1154 i40e_config_arq_regs(hw);