1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
42 * i40e_is_nvm_update_op - return true if this is an NVM update operation
43 * @desc: API request descriptor
45 STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
47 return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48 desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
51 #endif /* PF_DRIVER */
53 * i40e_adminq_init_regs - Initialize AdminQ registers
54 * @hw: pointer to the hardware structure
56 * This assumes the alloc_asq and alloc_arq functions have already been called
58 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
60 /* set head and tail registers in our local struct */
62 hw->aq.asq.tail = I40E_VF_ATQT1;
63 hw->aq.asq.head = I40E_VF_ATQH1;
64 hw->aq.asq.len = I40E_VF_ATQLEN1;
65 hw->aq.asq.bal = I40E_VF_ATQBAL1;
66 hw->aq.asq.bah = I40E_VF_ATQBAH1;
67 hw->aq.arq.tail = I40E_VF_ARQT1;
68 hw->aq.arq.head = I40E_VF_ARQH1;
69 hw->aq.arq.len = I40E_VF_ARQLEN1;
70 hw->aq.arq.bal = I40E_VF_ARQBAL1;
71 hw->aq.arq.bah = I40E_VF_ARQBAH1;
74 hw->aq.asq.tail = I40E_PF_ATQT;
75 hw->aq.asq.head = I40E_PF_ATQH;
76 hw->aq.asq.len = I40E_PF_ATQLEN;
77 hw->aq.asq.bal = I40E_PF_ATQBAL;
78 hw->aq.asq.bah = I40E_PF_ATQBAH;
79 hw->aq.arq.tail = I40E_PF_ARQT;
80 hw->aq.arq.head = I40E_PF_ARQH;
81 hw->aq.arq.len = I40E_PF_ARQLEN;
82 hw->aq.arq.bal = I40E_PF_ARQBAL;
83 hw->aq.arq.bah = I40E_PF_ARQBAH;
89 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
90 * @hw: pointer to the hardware structure
92 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
94 enum i40e_status_code ret_code;
96 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
98 (hw->aq.num_asq_entries *
99 sizeof(struct i40e_aq_desc)),
100 I40E_ADMINQ_DESC_ALIGNMENT);
104 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
105 (hw->aq.num_asq_entries *
106 sizeof(struct i40e_asq_cmd_details)));
108 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
116 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
117 * @hw: pointer to the hardware structure
119 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
121 enum i40e_status_code ret_code;
123 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
125 (hw->aq.num_arq_entries *
126 sizeof(struct i40e_aq_desc)),
127 I40E_ADMINQ_DESC_ALIGNMENT);
133 * i40e_free_adminq_asq - Free Admin Queue send rings
134 * @hw: pointer to the hardware structure
136 * This assumes the posted send buffers have already been cleaned
139 void i40e_free_adminq_asq(struct i40e_hw *hw)
141 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
145 * i40e_free_adminq_arq - Free Admin Queue receive rings
146 * @hw: pointer to the hardware structure
148 * This assumes the posted receive buffers have already been cleaned
151 void i40e_free_adminq_arq(struct i40e_hw *hw)
153 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
157 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
158 * @hw: pointer to the hardware structure
160 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
162 enum i40e_status_code ret_code;
163 struct i40e_aq_desc *desc;
164 struct i40e_dma_mem *bi;
167 /* We'll be allocating the buffer info memory first, then we can
168 * allocate the mapped buffers for the event processing
171 /* buffer_info structures do not need alignment */
172 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
173 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
176 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
178 /* allocate the mapped buffers */
179 for (i = 0; i < hw->aq.num_arq_entries; i++) {
180 bi = &hw->aq.arq.r.arq_bi[i];
181 ret_code = i40e_allocate_dma_mem(hw, bi,
184 I40E_ADMINQ_DESC_ALIGNMENT);
186 goto unwind_alloc_arq_bufs;
188 /* now configure the descriptors for use */
189 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
191 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
192 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
193 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
195 /* This is in accordance with Admin queue design, there is no
196 * register for buffer size configuration
198 desc->datalen = CPU_TO_LE16((u16)bi->size);
200 desc->cookie_high = 0;
201 desc->cookie_low = 0;
202 desc->params.external.addr_high =
203 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
204 desc->params.external.addr_low =
205 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
206 desc->params.external.param0 = 0;
207 desc->params.external.param1 = 0;
213 unwind_alloc_arq_bufs:
214 /* don't try to free the one that failed... */
217 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
218 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
224 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
225 * @hw: pointer to the hardware structure
227 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
229 enum i40e_status_code ret_code;
230 struct i40e_dma_mem *bi;
233 /* No mapped memory needed yet, just the buffer info structures */
234 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
235 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
238 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
240 /* allocate the mapped buffers */
241 for (i = 0; i < hw->aq.num_asq_entries; i++) {
242 bi = &hw->aq.asq.r.asq_bi[i];
243 ret_code = i40e_allocate_dma_mem(hw, bi,
246 I40E_ADMINQ_DESC_ALIGNMENT);
248 goto unwind_alloc_asq_bufs;
253 unwind_alloc_asq_bufs:
254 /* don't try to free the one that failed... */
257 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
258 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
264 * i40e_free_arq_bufs - Free receive queue buffer info elements
265 * @hw: pointer to the hardware structure
267 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
271 /* free descriptors */
272 for (i = 0; i < hw->aq.num_arq_entries; i++)
273 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
275 /* free the descriptor memory */
276 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
278 /* free the dma header */
279 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
283 * i40e_free_asq_bufs - Free send queue buffer info elements
284 * @hw: pointer to the hardware structure
286 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
290 /* only unmap if the address is non-NULL */
291 for (i = 0; i < hw->aq.num_asq_entries; i++)
292 if (hw->aq.asq.r.asq_bi[i].pa)
293 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
295 /* free the buffer info list */
296 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
298 /* free the descriptor memory */
299 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
301 /* free the dma header */
302 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
306 * i40e_config_asq_regs - configure ASQ registers
307 * @hw: pointer to the hardware structure
309 * Configure base address and length registers for the transmit queue
311 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
313 enum i40e_status_code ret_code = I40E_SUCCESS;
316 /* Clear Head and Tail */
317 wr32(hw, hw->aq.asq.head, 0);
318 wr32(hw, hw->aq.asq.tail, 0);
320 /* set starting point */
324 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
325 I40E_PF_ATQLEN_ATQENABLE_MASK));
327 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
328 I40E_PF_ATQLEN_ATQENABLE_MASK));
329 #endif /* INTEGRATED_VF */
330 #endif /* PF_DRIVER */
334 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
335 I40E_VF_ATQLEN1_ATQENABLE_MASK));
337 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
338 I40E_VF_ATQLEN1_ATQENABLE_MASK));
339 #endif /* INTEGRATED_VF */
340 #endif /* VF_DRIVER */
341 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
342 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
344 /* Check one register to verify that config was applied */
345 reg = rd32(hw, hw->aq.asq.bal);
346 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
347 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
353 * i40e_config_arq_regs - ARQ register configuration
354 * @hw: pointer to the hardware structure
356 * Configure base address and length registers for the receive (event queue)
358 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
360 enum i40e_status_code ret_code = I40E_SUCCESS;
363 /* Clear Head and Tail */
364 wr32(hw, hw->aq.arq.head, 0);
365 wr32(hw, hw->aq.arq.tail, 0);
367 /* set starting point */
371 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
372 I40E_PF_ARQLEN_ARQENABLE_MASK));
374 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
375 I40E_PF_ARQLEN_ARQENABLE_MASK));
376 #endif /* INTEGRATED_VF */
377 #endif /* PF_DRIVER */
381 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
382 I40E_VF_ARQLEN1_ARQENABLE_MASK));
384 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
385 I40E_VF_ARQLEN1_ARQENABLE_MASK));
386 #endif /* INTEGRATED_VF */
387 #endif /* VF_DRIVER */
388 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
389 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
391 /* Update tail in the HW to post pre-allocated buffers */
392 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
394 /* Check one register to verify that config was applied */
395 reg = rd32(hw, hw->aq.arq.bal);
396 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
397 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
403 * i40e_init_asq - main initialization routine for ASQ
404 * @hw: pointer to the hardware structure
406 * This is the main initialization routine for the Admin Send Queue
407 * Prior to calling this function, drivers *MUST* set the following fields
408 * in the hw->aq structure:
409 * - hw->aq.num_asq_entries
410 * - hw->aq.arq_buf_size
412 * Do *NOT* hold the lock when calling this as the memory allocation routines
413 * called are not going to be atomic context safe
415 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
417 enum i40e_status_code ret_code = I40E_SUCCESS;
419 if (hw->aq.asq.count > 0) {
420 /* queue already initialized */
421 ret_code = I40E_ERR_NOT_READY;
422 goto init_adminq_exit;
425 /* verify input for valid configuration */
426 if ((hw->aq.num_asq_entries == 0) ||
427 (hw->aq.asq_buf_size == 0)) {
428 ret_code = I40E_ERR_CONFIG;
429 goto init_adminq_exit;
432 hw->aq.asq.next_to_use = 0;
433 hw->aq.asq.next_to_clean = 0;
435 /* allocate the ring memory */
436 ret_code = i40e_alloc_adminq_asq_ring(hw);
437 if (ret_code != I40E_SUCCESS)
438 goto init_adminq_exit;
440 /* allocate buffers in the rings */
441 ret_code = i40e_alloc_asq_bufs(hw);
442 if (ret_code != I40E_SUCCESS)
443 goto init_adminq_free_rings;
445 /* initialize base registers */
446 ret_code = i40e_config_asq_regs(hw);
447 if (ret_code != I40E_SUCCESS)
448 goto init_adminq_free_rings;
451 hw->aq.asq.count = hw->aq.num_asq_entries;
452 goto init_adminq_exit;
454 init_adminq_free_rings:
455 i40e_free_adminq_asq(hw);
462 * i40e_init_arq - initialize ARQ
463 * @hw: pointer to the hardware structure
465 * The main initialization routine for the Admin Receive (Event) Queue.
466 * Prior to calling this function, drivers *MUST* set the following fields
467 * in the hw->aq structure:
468 * - hw->aq.num_asq_entries
469 * - hw->aq.arq_buf_size
471 * Do *NOT* hold the lock when calling this as the memory allocation routines
472 * called are not going to be atomic context safe
474 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
476 enum i40e_status_code ret_code = I40E_SUCCESS;
478 if (hw->aq.arq.count > 0) {
479 /* queue already initialized */
480 ret_code = I40E_ERR_NOT_READY;
481 goto init_adminq_exit;
484 /* verify input for valid configuration */
485 if ((hw->aq.num_arq_entries == 0) ||
486 (hw->aq.arq_buf_size == 0)) {
487 ret_code = I40E_ERR_CONFIG;
488 goto init_adminq_exit;
491 hw->aq.arq.next_to_use = 0;
492 hw->aq.arq.next_to_clean = 0;
494 /* allocate the ring memory */
495 ret_code = i40e_alloc_adminq_arq_ring(hw);
496 if (ret_code != I40E_SUCCESS)
497 goto init_adminq_exit;
499 /* allocate buffers in the rings */
500 ret_code = i40e_alloc_arq_bufs(hw);
501 if (ret_code != I40E_SUCCESS)
502 goto init_adminq_free_rings;
504 /* initialize base registers */
505 ret_code = i40e_config_arq_regs(hw);
506 if (ret_code != I40E_SUCCESS)
507 goto init_adminq_free_rings;
510 hw->aq.arq.count = hw->aq.num_arq_entries;
511 goto init_adminq_exit;
513 init_adminq_free_rings:
514 i40e_free_adminq_arq(hw);
521 * i40e_shutdown_asq - shutdown the ASQ
522 * @hw: pointer to the hardware structure
524 * The main shutdown routine for the Admin Send Queue
526 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
528 enum i40e_status_code ret_code = I40E_SUCCESS;
530 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
532 if (hw->aq.asq.count == 0) {
533 ret_code = I40E_ERR_NOT_READY;
534 goto shutdown_asq_out;
537 /* Stop firmware AdminQ processing */
538 wr32(hw, hw->aq.asq.head, 0);
539 wr32(hw, hw->aq.asq.tail, 0);
540 wr32(hw, hw->aq.asq.len, 0);
541 wr32(hw, hw->aq.asq.bal, 0);
542 wr32(hw, hw->aq.asq.bah, 0);
544 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
546 /* free ring buffers */
547 i40e_free_asq_bufs(hw);
550 i40e_release_spinlock(&hw->aq.asq_spinlock);
555 * i40e_shutdown_arq - shutdown ARQ
556 * @hw: pointer to the hardware structure
558 * The main shutdown routine for the Admin Receive Queue
560 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
562 enum i40e_status_code ret_code = I40E_SUCCESS;
564 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
566 if (hw->aq.arq.count == 0) {
567 ret_code = I40E_ERR_NOT_READY;
568 goto shutdown_arq_out;
571 /* Stop firmware AdminQ processing */
572 wr32(hw, hw->aq.arq.head, 0);
573 wr32(hw, hw->aq.arq.tail, 0);
574 wr32(hw, hw->aq.arq.len, 0);
575 wr32(hw, hw->aq.arq.bal, 0);
576 wr32(hw, hw->aq.arq.bah, 0);
578 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
580 /* free ring buffers */
581 i40e_free_arq_bufs(hw);
584 i40e_release_spinlock(&hw->aq.arq_spinlock);
589 * i40e_init_adminq - main initialization routine for Admin Queue
590 * @hw: pointer to the hardware structure
592 * Prior to calling this function, drivers *MUST* set the following fields
593 * in the hw->aq structure:
594 * - hw->aq.num_asq_entries
595 * - hw->aq.num_arq_entries
596 * - hw->aq.arq_buf_size
597 * - hw->aq.asq_buf_size
599 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
601 enum i40e_status_code ret_code;
603 u16 eetrack_lo, eetrack_hi;
604 u16 cfg_ptr, oem_hi, oem_lo;
607 /* verify input for valid configuration */
608 if ((hw->aq.num_arq_entries == 0) ||
609 (hw->aq.num_asq_entries == 0) ||
610 (hw->aq.arq_buf_size == 0) ||
611 (hw->aq.asq_buf_size == 0)) {
612 ret_code = I40E_ERR_CONFIG;
613 goto init_adminq_exit;
616 /* initialize spin locks */
617 i40e_init_spinlock(&hw->aq.asq_spinlock);
618 i40e_init_spinlock(&hw->aq.arq_spinlock);
620 /* Set up register offsets */
621 i40e_adminq_init_regs(hw);
623 /* setup ASQ command write back timeout */
624 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
626 /* allocate the ASQ */
627 ret_code = i40e_init_asq(hw);
628 if (ret_code != I40E_SUCCESS)
629 goto init_adminq_destroy_spinlocks;
631 /* allocate the ARQ */
632 ret_code = i40e_init_arq(hw);
633 if (ret_code != I40E_SUCCESS)
634 goto init_adminq_free_asq;
638 /* VF has no need of firmware */
640 goto init_adminq_exit;
642 /* There are some cases where the firmware may not be quite ready
643 * for AdminQ operations, so we retry the AdminQ setup a few times
644 * if we see timeouts in this first AQ call.
647 ret_code = i40e_aq_get_firmware_version(hw,
654 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
657 i40e_msec_delay(100);
659 } while (retry < 10);
660 if (ret_code != I40E_SUCCESS)
661 goto init_adminq_free_arq;
663 /* get the NVM version info */
664 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
666 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
667 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
668 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
669 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
670 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
672 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
674 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
676 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
677 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
678 goto init_adminq_free_arq;
681 /* pre-emptive resource lock release */
682 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
683 hw->aq.nvm_release_on_done = false;
684 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
686 #endif /* PF_DRIVER */
687 ret_code = I40E_SUCCESS;
690 goto init_adminq_exit;
693 init_adminq_free_arq:
694 i40e_shutdown_arq(hw);
696 init_adminq_free_asq:
697 i40e_shutdown_asq(hw);
698 init_adminq_destroy_spinlocks:
699 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
700 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
707 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
708 * @hw: pointer to the hardware structure
710 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
712 enum i40e_status_code ret_code = I40E_SUCCESS;
714 if (i40e_check_asq_alive(hw))
715 i40e_aq_queue_shutdown(hw, true);
717 i40e_shutdown_asq(hw);
718 i40e_shutdown_arq(hw);
720 /* destroy the spinlocks */
721 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
722 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
725 i40e_free_virt_mem(hw, &hw->nvm_buff);
731 * i40e_clean_asq - cleans Admin send queue
732 * @hw: pointer to the hardware structure
734 * returns the number of free desc
736 u16 i40e_clean_asq(struct i40e_hw *hw)
738 struct i40e_adminq_ring *asq = &(hw->aq.asq);
739 struct i40e_asq_cmd_details *details;
740 u16 ntc = asq->next_to_clean;
741 struct i40e_aq_desc desc_cb;
742 struct i40e_aq_desc *desc;
744 desc = I40E_ADMINQ_DESC(*asq, ntc);
745 details = I40E_ADMINQ_DETAILS(*asq, ntc);
747 while (rd32(hw, hw->aq.asq.head) != ntc) {
748 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
749 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
751 if (details->callback) {
752 I40E_ADMINQ_CALLBACK cb_func =
753 (I40E_ADMINQ_CALLBACK)details->callback;
754 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
756 cb_func(hw, &desc_cb);
758 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
759 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
761 if (ntc == asq->count)
763 desc = I40E_ADMINQ_DESC(*asq, ntc);
764 details = I40E_ADMINQ_DETAILS(*asq, ntc);
767 asq->next_to_clean = ntc;
769 return I40E_DESC_UNUSED(asq);
773 * i40e_asq_done - check if FW has processed the Admin Send Queue
774 * @hw: pointer to the hw struct
776 * Returns true if the firmware has processed all descriptors on the
777 * admin send queue. Returns false if there are still requests pending.
779 bool i40e_asq_done(struct i40e_hw *hw)
781 /* AQ designers suggest use of head for better
782 * timing reliability than DD bit
784 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
789 * i40e_asq_send_command - send command to Admin Queue
790 * @hw: pointer to the hw struct
791 * @desc: prefilled descriptor describing the command (non DMA mem)
792 * @buff: buffer to use for indirect commands
793 * @buff_size: size of buffer for indirect commands
794 * @cmd_details: pointer to command details structure
796 * This is the main send command driver routine for the Admin Queue send
797 * queue. It runs the queue, cleans the queue, etc
799 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
800 struct i40e_aq_desc *desc,
801 void *buff, /* can be NULL */
803 struct i40e_asq_cmd_details *cmd_details)
805 enum i40e_status_code status = I40E_SUCCESS;
806 struct i40e_dma_mem *dma_buff = NULL;
807 struct i40e_asq_cmd_details *details;
808 struct i40e_aq_desc *desc_on_ring;
809 bool cmd_completed = false;
813 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
815 hw->aq.asq_last_status = I40E_AQ_RC_OK;
817 if (hw->aq.asq.count == 0) {
818 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
819 "AQTX: Admin queue not initialized.\n");
820 status = I40E_ERR_QUEUE_EMPTY;
821 goto asq_send_command_error;
824 val = rd32(hw, hw->aq.asq.head);
825 if (val >= hw->aq.num_asq_entries) {
826 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
827 "AQTX: head overrun at %d\n", val);
828 status = I40E_ERR_QUEUE_EMPTY;
829 goto asq_send_command_error;
832 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
836 sizeof(struct i40e_asq_cmd_details),
837 I40E_NONDMA_TO_NONDMA);
839 /* If the cmd_details are defined copy the cookie. The
840 * CPU_TO_LE32 is not needed here because the data is ignored
841 * by the FW, only used by the driver
843 if (details->cookie) {
845 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
847 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
850 i40e_memset(details, 0,
851 sizeof(struct i40e_asq_cmd_details),
855 /* clear requested flags and then set additional flags if defined */
856 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
857 desc->flags |= CPU_TO_LE16(details->flags_ena);
859 if (buff_size > hw->aq.asq_buf_size) {
861 I40E_DEBUG_AQ_MESSAGE,
862 "AQTX: Invalid buffer size: %d.\n",
864 status = I40E_ERR_INVALID_SIZE;
865 goto asq_send_command_error;
868 if (details->postpone && !details->async) {
870 I40E_DEBUG_AQ_MESSAGE,
871 "AQTX: Async flag not set along with postpone flag");
872 status = I40E_ERR_PARAM;
873 goto asq_send_command_error;
876 /* call clean and check queue available function to reclaim the
877 * descriptors that were processed by FW, the function returns the
878 * number of desc available
880 /* the clean function called here could be called in a separate thread
881 * in case of asynchronous completions
883 if (i40e_clean_asq(hw) == 0) {
885 I40E_DEBUG_AQ_MESSAGE,
886 "AQTX: Error queue is full.\n");
887 status = I40E_ERR_ADMIN_QUEUE_FULL;
888 goto asq_send_command_error;
891 /* initialize the temp desc pointer with the right desc */
892 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
894 /* if the desc is available copy the temp desc to the right place */
895 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
898 /* if buff is not NULL assume indirect command */
900 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
901 /* copy the user buff into the respective DMA buff */
902 i40e_memcpy(dma_buff->va, buff, buff_size,
904 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
906 /* Update the address values in the desc with the pa value
907 * for respective buffer
909 desc_on_ring->params.external.addr_high =
910 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
911 desc_on_ring->params.external.addr_low =
912 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
916 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
917 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
919 (hw->aq.asq.next_to_use)++;
920 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
921 hw->aq.asq.next_to_use = 0;
922 if (!details->postpone)
923 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
925 /* if cmd_details are not defined or async flag is not set,
926 * we need to wait for desc write back
928 if (!details->async && !details->postpone) {
932 /* AQ designers suggest use of head for better
933 * timing reliability than DD bit
935 if (i40e_asq_done(hw))
937 /* ugh! delay while spin_lock */
940 } while (total_delay < hw->aq.asq_cmd_timeout);
943 /* if ready, copy the desc back to temp */
944 if (i40e_asq_done(hw)) {
945 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
948 i40e_memcpy(buff, dma_buff->va, buff_size,
950 retval = LE16_TO_CPU(desc->retval);
953 I40E_DEBUG_AQ_MESSAGE,
954 "AQTX: Command completed with error 0x%X.\n",
957 /* strip off FW internal code */
960 cmd_completed = true;
961 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
962 status = I40E_SUCCESS;
964 status = I40E_ERR_ADMIN_QUEUE_ERROR;
965 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
968 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
969 "AQTX: desc and buffer writeback:\n");
970 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
972 /* save writeback aq if requested */
973 if (details->wb_desc)
974 i40e_memcpy(details->wb_desc, desc_on_ring,
975 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
977 /* update the error if time out occurred */
978 if ((!cmd_completed) &&
979 (!details->async && !details->postpone)) {
981 I40E_DEBUG_AQ_MESSAGE,
982 "AQTX: Writeback timeout.\n");
983 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
986 asq_send_command_error:
987 i40e_release_spinlock(&hw->aq.asq_spinlock);
992 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
993 * @desc: pointer to the temp descriptor (non DMA mem)
994 * @opcode: the opcode can be used to decide which flags to turn off or on
996 * Fill the desc with default values
998 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1001 /* zero out the desc */
1002 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1004 desc->opcode = CPU_TO_LE16(opcode);
1005 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1009 * i40e_clean_arq_element
1010 * @hw: pointer to the hw struct
1011 * @e: event info from the receive descriptor, includes any buffers
1012 * @pending: number of events that could be left to process
1014 * This function cleans one Admin Receive Queue element and returns
1015 * the contents through e. It can also return how many events are
1016 * left to process through 'pending'
1018 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1019 struct i40e_arq_event_info *e,
1022 enum i40e_status_code ret_code = I40E_SUCCESS;
1023 u16 ntc = hw->aq.arq.next_to_clean;
1024 struct i40e_aq_desc *desc;
1025 struct i40e_dma_mem *bi;
1031 /* pre-clean the event info */
1032 i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1034 /* take the lock before we start messing with the ring */
1035 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1037 if (hw->aq.arq.count == 0) {
1038 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1039 "AQRX: Admin queue not initialized.\n");
1040 ret_code = I40E_ERR_QUEUE_EMPTY;
1041 goto clean_arq_element_err;
1044 /* set next_to_use to head */
1046 #ifdef INTEGRATED_VF
1047 if (!i40e_is_vf(hw))
1048 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1050 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1051 #endif /* INTEGRATED_VF */
1052 #endif /* PF_DRIVER */
1054 #ifdef INTEGRATED_VF
1056 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1058 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1059 #endif /* INTEGRATED_VF */
1060 #endif /* VF_DRIVER */
1062 /* nothing to do - shouldn't need to update ring's values */
1063 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1064 goto clean_arq_element_out;
1067 /* now clean the next descriptor */
1068 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1071 flags = LE16_TO_CPU(desc->flags);
1072 if (flags & I40E_AQ_FLAG_ERR) {
1073 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1074 hw->aq.arq_last_status =
1075 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1077 I40E_DEBUG_AQ_MESSAGE,
1078 "AQRX: Event received with error 0x%X.\n",
1079 hw->aq.arq_last_status);
1082 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1083 I40E_DMA_TO_NONDMA);
1084 datalen = LE16_TO_CPU(desc->datalen);
1085 e->msg_len = min(datalen, e->buf_len);
1086 if (e->msg_buf != NULL && (e->msg_len != 0))
1087 i40e_memcpy(e->msg_buf,
1088 hw->aq.arq.r.arq_bi[desc_idx].va,
1089 e->msg_len, I40E_DMA_TO_NONDMA);
1091 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1092 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1093 hw->aq.arq_buf_size);
1095 /* Restore the original datalen and buffer address in the desc,
1096 * FW updates datalen to indicate the event message
1099 bi = &hw->aq.arq.r.arq_bi[ntc];
1100 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1102 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1103 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1104 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1105 desc->datalen = CPU_TO_LE16((u16)bi->size);
1106 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1107 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1109 /* set tail = the last cleaned desc index. */
1110 wr32(hw, hw->aq.arq.tail, ntc);
1111 /* ntc is updated to tail + 1 */
1113 if (ntc == hw->aq.num_arq_entries)
1115 hw->aq.arq.next_to_clean = ntc;
1116 hw->aq.arq.next_to_use = ntu;
1119 if (i40e_is_nvm_update_op(&e->desc)) {
1120 if (hw->aq.nvm_release_on_done) {
1121 i40e_release_nvm(hw);
1122 hw->aq.nvm_release_on_done = false;
1125 switch (hw->nvmupd_state) {
1126 case I40E_NVMUPD_STATE_INIT_WAIT:
1127 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1130 case I40E_NVMUPD_STATE_WRITE_WAIT:
1131 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1140 clean_arq_element_out:
1141 /* Set pending if needed, unlock and return */
1142 if (pending != NULL)
1143 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1144 clean_arq_element_err:
1145 i40e_release_spinlock(&hw->aq.arq_spinlock);
1150 void i40e_resume_aq(struct i40e_hw *hw)
1152 /* Registers are reset after PF reset */
1153 hw->aq.asq.next_to_use = 0;
1154 hw->aq.asq.next_to_clean = 0;
1156 i40e_config_asq_regs(hw);
1158 hw->aq.arq.next_to_use = 0;
1159 hw->aq.arq.next_to_clean = 0;
1161 i40e_config_arq_regs(hw);