1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
41 * i40e_adminq_init_regs - Initialize AdminQ registers
42 * @hw: pointer to the hardware structure
44 * This assumes the alloc_asq and alloc_arq functions have already been called
46 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
48 /* set head and tail registers in our local struct */
50 hw->aq.asq.tail = I40E_VF_ATQT1;
51 hw->aq.asq.head = I40E_VF_ATQH1;
52 hw->aq.asq.len = I40E_VF_ATQLEN1;
53 hw->aq.asq.bal = I40E_VF_ATQBAL1;
54 hw->aq.asq.bah = I40E_VF_ATQBAH1;
55 hw->aq.arq.tail = I40E_VF_ARQT1;
56 hw->aq.arq.head = I40E_VF_ARQH1;
57 hw->aq.arq.len = I40E_VF_ARQLEN1;
58 hw->aq.arq.bal = I40E_VF_ARQBAL1;
59 hw->aq.arq.bah = I40E_VF_ARQBAH1;
62 hw->aq.asq.tail = I40E_PF_ATQT;
63 hw->aq.asq.head = I40E_PF_ATQH;
64 hw->aq.asq.len = I40E_PF_ATQLEN;
65 hw->aq.asq.bal = I40E_PF_ATQBAL;
66 hw->aq.asq.bah = I40E_PF_ATQBAH;
67 hw->aq.arq.tail = I40E_PF_ARQT;
68 hw->aq.arq.head = I40E_PF_ARQH;
69 hw->aq.arq.len = I40E_PF_ARQLEN;
70 hw->aq.arq.bal = I40E_PF_ARQBAL;
71 hw->aq.arq.bah = I40E_PF_ARQBAH;
77 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
78 * @hw: pointer to the hardware structure
80 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
82 enum i40e_status_code ret_code;
84 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
86 (hw->aq.num_asq_entries *
87 sizeof(struct i40e_aq_desc)),
88 I40E_ADMINQ_DESC_ALIGNMENT);
92 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
93 (hw->aq.num_asq_entries *
94 sizeof(struct i40e_asq_cmd_details)));
96 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
104 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
105 * @hw: pointer to the hardware structure
107 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
109 enum i40e_status_code ret_code;
111 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
113 (hw->aq.num_arq_entries *
114 sizeof(struct i40e_aq_desc)),
115 I40E_ADMINQ_DESC_ALIGNMENT);
121 * i40e_free_adminq_asq - Free Admin Queue send rings
122 * @hw: pointer to the hardware structure
124 * This assumes the posted send buffers have already been cleaned
127 void i40e_free_adminq_asq(struct i40e_hw *hw)
129 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
133 * i40e_free_adminq_arq - Free Admin Queue receive rings
134 * @hw: pointer to the hardware structure
136 * This assumes the posted receive buffers have already been cleaned
139 void i40e_free_adminq_arq(struct i40e_hw *hw)
141 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
145 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
146 * @hw: pointer to the hardware structure
148 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
150 enum i40e_status_code ret_code;
151 struct i40e_aq_desc *desc;
152 struct i40e_dma_mem *bi;
155 /* We'll be allocating the buffer info memory first, then we can
156 * allocate the mapped buffers for the event processing
159 /* buffer_info structures do not need alignment */
160 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
161 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
164 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
166 /* allocate the mapped buffers */
167 for (i = 0; i < hw->aq.num_arq_entries; i++) {
168 bi = &hw->aq.arq.r.arq_bi[i];
169 ret_code = i40e_allocate_dma_mem(hw, bi,
172 I40E_ADMINQ_DESC_ALIGNMENT);
174 goto unwind_alloc_arq_bufs;
176 /* now configure the descriptors for use */
177 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
179 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
180 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
181 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
183 /* This is in accordance with Admin queue design, there is no
184 * register for buffer size configuration
186 desc->datalen = CPU_TO_LE16((u16)bi->size);
188 desc->cookie_high = 0;
189 desc->cookie_low = 0;
190 desc->params.external.addr_high =
191 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
192 desc->params.external.addr_low =
193 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
194 desc->params.external.param0 = 0;
195 desc->params.external.param1 = 0;
201 unwind_alloc_arq_bufs:
202 /* don't try to free the one that failed... */
205 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
206 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
212 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
213 * @hw: pointer to the hardware structure
215 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
217 enum i40e_status_code ret_code;
218 struct i40e_dma_mem *bi;
221 /* No mapped memory needed yet, just the buffer info structures */
222 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
223 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
226 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
228 /* allocate the mapped buffers */
229 for (i = 0; i < hw->aq.num_asq_entries; i++) {
230 bi = &hw->aq.asq.r.asq_bi[i];
231 ret_code = i40e_allocate_dma_mem(hw, bi,
234 I40E_ADMINQ_DESC_ALIGNMENT);
236 goto unwind_alloc_asq_bufs;
241 unwind_alloc_asq_bufs:
242 /* don't try to free the one that failed... */
245 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
246 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
252 * i40e_free_arq_bufs - Free receive queue buffer info elements
253 * @hw: pointer to the hardware structure
255 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
259 /* free descriptors */
260 for (i = 0; i < hw->aq.num_arq_entries; i++)
261 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
263 /* free the descriptor memory */
264 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
266 /* free the dma header */
267 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
271 * i40e_free_asq_bufs - Free send queue buffer info elements
272 * @hw: pointer to the hardware structure
274 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
278 /* only unmap if the address is non-NULL */
279 for (i = 0; i < hw->aq.num_asq_entries; i++)
280 if (hw->aq.asq.r.asq_bi[i].pa)
281 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
283 /* free the buffer info list */
284 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
286 /* free the descriptor memory */
287 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
289 /* free the dma header */
290 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
294 * i40e_config_asq_regs - configure ASQ registers
295 * @hw: pointer to the hardware structure
297 * Configure base address and length registers for the transmit queue
299 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
301 enum i40e_status_code ret_code = I40E_SUCCESS;
304 /* Clear Head and Tail */
305 wr32(hw, hw->aq.asq.head, 0);
306 wr32(hw, hw->aq.asq.tail, 0);
308 /* set starting point */
312 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
313 I40E_PF_ATQLEN_ATQENABLE_MASK));
315 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
316 I40E_PF_ATQLEN_ATQENABLE_MASK));
317 #endif /* INTEGRATED_VF */
318 #endif /* PF_DRIVER */
322 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
323 I40E_VF_ATQLEN1_ATQENABLE_MASK));
325 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
326 I40E_VF_ATQLEN1_ATQENABLE_MASK));
327 #endif /* INTEGRATED_VF */
328 #endif /* VF_DRIVER */
329 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
330 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
332 /* Check one register to verify that config was applied */
333 reg = rd32(hw, hw->aq.asq.bal);
334 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
335 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
341 * i40e_config_arq_regs - ARQ register configuration
342 * @hw: pointer to the hardware structure
344 * Configure base address and length registers for the receive (event queue)
346 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
348 enum i40e_status_code ret_code = I40E_SUCCESS;
351 /* Clear Head and Tail */
352 wr32(hw, hw->aq.arq.head, 0);
353 wr32(hw, hw->aq.arq.tail, 0);
355 /* set starting point */
359 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
360 I40E_PF_ARQLEN_ARQENABLE_MASK));
362 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
363 I40E_PF_ARQLEN_ARQENABLE_MASK));
364 #endif /* INTEGRATED_VF */
365 #endif /* PF_DRIVER */
369 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
370 I40E_VF_ARQLEN1_ARQENABLE_MASK));
372 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
373 I40E_VF_ARQLEN1_ARQENABLE_MASK));
374 #endif /* INTEGRATED_VF */
375 #endif /* VF_DRIVER */
376 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
377 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
379 /* Update tail in the HW to post pre-allocated buffers */
380 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
382 /* Check one register to verify that config was applied */
383 reg = rd32(hw, hw->aq.arq.bal);
384 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
385 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
391 * i40e_init_asq - main initialization routine for ASQ
392 * @hw: pointer to the hardware structure
394 * This is the main initialization routine for the Admin Send Queue
395 * Prior to calling this function, drivers *MUST* set the following fields
396 * in the hw->aq structure:
397 * - hw->aq.num_asq_entries
398 * - hw->aq.arq_buf_size
400 * Do *NOT* hold the lock when calling this as the memory allocation routines
401 * called are not going to be atomic context safe
403 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
405 enum i40e_status_code ret_code = I40E_SUCCESS;
407 if (hw->aq.asq.count > 0) {
408 /* queue already initialized */
409 ret_code = I40E_ERR_NOT_READY;
410 goto init_adminq_exit;
413 /* verify input for valid configuration */
414 if ((hw->aq.num_asq_entries == 0) ||
415 (hw->aq.asq_buf_size == 0)) {
416 ret_code = I40E_ERR_CONFIG;
417 goto init_adminq_exit;
420 hw->aq.asq.next_to_use = 0;
421 hw->aq.asq.next_to_clean = 0;
423 /* allocate the ring memory */
424 ret_code = i40e_alloc_adminq_asq_ring(hw);
425 if (ret_code != I40E_SUCCESS)
426 goto init_adminq_exit;
428 /* allocate buffers in the rings */
429 ret_code = i40e_alloc_asq_bufs(hw);
430 if (ret_code != I40E_SUCCESS)
431 goto init_adminq_free_rings;
433 /* initialize base registers */
434 ret_code = i40e_config_asq_regs(hw);
435 if (ret_code != I40E_SUCCESS)
436 goto init_adminq_free_rings;
439 hw->aq.asq.count = hw->aq.num_asq_entries;
440 goto init_adminq_exit;
442 init_adminq_free_rings:
443 i40e_free_adminq_asq(hw);
450 * i40e_init_arq - initialize ARQ
451 * @hw: pointer to the hardware structure
453 * The main initialization routine for the Admin Receive (Event) Queue.
454 * Prior to calling this function, drivers *MUST* set the following fields
455 * in the hw->aq structure:
456 * - hw->aq.num_asq_entries
457 * - hw->aq.arq_buf_size
459 * Do *NOT* hold the lock when calling this as the memory allocation routines
460 * called are not going to be atomic context safe
462 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
464 enum i40e_status_code ret_code = I40E_SUCCESS;
466 if (hw->aq.arq.count > 0) {
467 /* queue already initialized */
468 ret_code = I40E_ERR_NOT_READY;
469 goto init_adminq_exit;
472 /* verify input for valid configuration */
473 if ((hw->aq.num_arq_entries == 0) ||
474 (hw->aq.arq_buf_size == 0)) {
475 ret_code = I40E_ERR_CONFIG;
476 goto init_adminq_exit;
479 hw->aq.arq.next_to_use = 0;
480 hw->aq.arq.next_to_clean = 0;
482 /* allocate the ring memory */
483 ret_code = i40e_alloc_adminq_arq_ring(hw);
484 if (ret_code != I40E_SUCCESS)
485 goto init_adminq_exit;
487 /* allocate buffers in the rings */
488 ret_code = i40e_alloc_arq_bufs(hw);
489 if (ret_code != I40E_SUCCESS)
490 goto init_adminq_free_rings;
492 /* initialize base registers */
493 ret_code = i40e_config_arq_regs(hw);
494 if (ret_code != I40E_SUCCESS)
495 goto init_adminq_free_rings;
498 hw->aq.arq.count = hw->aq.num_arq_entries;
499 goto init_adminq_exit;
501 init_adminq_free_rings:
502 i40e_free_adminq_arq(hw);
509 * i40e_shutdown_asq - shutdown the ASQ
510 * @hw: pointer to the hardware structure
512 * The main shutdown routine for the Admin Send Queue
514 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
516 enum i40e_status_code ret_code = I40E_SUCCESS;
518 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
520 if (hw->aq.asq.count == 0) {
521 ret_code = I40E_ERR_NOT_READY;
522 goto shutdown_asq_out;
525 /* Stop firmware AdminQ processing */
526 wr32(hw, hw->aq.asq.head, 0);
527 wr32(hw, hw->aq.asq.tail, 0);
528 wr32(hw, hw->aq.asq.len, 0);
529 wr32(hw, hw->aq.asq.bal, 0);
530 wr32(hw, hw->aq.asq.bah, 0);
532 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
534 /* free ring buffers */
535 i40e_free_asq_bufs(hw);
538 i40e_release_spinlock(&hw->aq.asq_spinlock);
543 * i40e_shutdown_arq - shutdown ARQ
544 * @hw: pointer to the hardware structure
546 * The main shutdown routine for the Admin Receive Queue
548 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
550 enum i40e_status_code ret_code = I40E_SUCCESS;
552 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
554 if (hw->aq.arq.count == 0) {
555 ret_code = I40E_ERR_NOT_READY;
556 goto shutdown_arq_out;
559 /* Stop firmware AdminQ processing */
560 wr32(hw, hw->aq.arq.head, 0);
561 wr32(hw, hw->aq.arq.tail, 0);
562 wr32(hw, hw->aq.arq.len, 0);
563 wr32(hw, hw->aq.arq.bal, 0);
564 wr32(hw, hw->aq.arq.bah, 0);
566 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
568 /* free ring buffers */
569 i40e_free_arq_bufs(hw);
572 i40e_release_spinlock(&hw->aq.arq_spinlock);
578 * i40e_resume_aq - resume AQ processing from 0
579 * @hw: pointer to the hardware structure
581 STATIC void i40e_resume_aq(struct i40e_hw *hw)
583 /* Registers are reset after PF reset */
584 hw->aq.asq.next_to_use = 0;
585 hw->aq.asq.next_to_clean = 0;
587 i40e_config_asq_regs(hw);
589 hw->aq.arq.next_to_use = 0;
590 hw->aq.arq.next_to_clean = 0;
592 i40e_config_arq_regs(hw);
594 #endif /* PF_DRIVER */
597 * i40e_init_adminq - main initialization routine for Admin Queue
598 * @hw: pointer to the hardware structure
600 * Prior to calling this function, drivers *MUST* set the following fields
601 * in the hw->aq structure:
602 * - hw->aq.num_asq_entries
603 * - hw->aq.num_arq_entries
604 * - hw->aq.arq_buf_size
605 * - hw->aq.asq_buf_size
607 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
610 u16 cfg_ptr, oem_hi, oem_lo;
611 u16 eetrack_lo, eetrack_hi;
613 enum i40e_status_code ret_code;
618 /* verify input for valid configuration */
619 if ((hw->aq.num_arq_entries == 0) ||
620 (hw->aq.num_asq_entries == 0) ||
621 (hw->aq.arq_buf_size == 0) ||
622 (hw->aq.asq_buf_size == 0)) {
623 ret_code = I40E_ERR_CONFIG;
624 goto init_adminq_exit;
626 i40e_init_spinlock(&hw->aq.asq_spinlock);
627 i40e_init_spinlock(&hw->aq.arq_spinlock);
629 /* Set up register offsets */
630 i40e_adminq_init_regs(hw);
632 /* setup ASQ command write back timeout */
633 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
635 /* allocate the ASQ */
636 ret_code = i40e_init_asq(hw);
637 if (ret_code != I40E_SUCCESS)
638 goto init_adminq_destroy_spinlocks;
640 /* allocate the ARQ */
641 ret_code = i40e_init_arq(hw);
642 if (ret_code != I40E_SUCCESS)
643 goto init_adminq_free_asq;
647 /* VF has no need of firmware */
649 goto init_adminq_exit;
651 /* There are some cases where the firmware may not be quite ready
652 * for AdminQ operations, so we retry the AdminQ setup a few times
653 * if we see timeouts in this first AQ call.
656 ret_code = i40e_aq_get_firmware_version(hw,
663 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
666 i40e_msec_delay(100);
668 } while (retry < 10);
669 if (ret_code != I40E_SUCCESS)
670 goto init_adminq_free_arq;
672 /* get the NVM version info */
673 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
675 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
676 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
677 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
678 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
679 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
681 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
683 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
685 /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
686 if ((hw->aq.api_maj_ver > 1) ||
687 ((hw->aq.api_maj_ver == 1) &&
688 (hw->aq.api_min_ver >= 7)))
689 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
691 if (hw->mac.type == I40E_MAC_XL710 &&
692 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
693 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
694 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
697 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
698 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
699 goto init_adminq_free_arq;
702 /* pre-emptive resource lock release */
703 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
704 hw->nvm_release_on_done = false;
705 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
707 #endif /* PF_DRIVER */
708 ret_code = I40E_SUCCESS;
711 goto init_adminq_exit;
714 init_adminq_free_arq:
715 i40e_shutdown_arq(hw);
717 init_adminq_free_asq:
718 i40e_shutdown_asq(hw);
719 init_adminq_destroy_spinlocks:
720 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
721 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
728 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
729 * @hw: pointer to the hardware structure
731 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
733 enum i40e_status_code ret_code = I40E_SUCCESS;
735 if (i40e_check_asq_alive(hw))
736 i40e_aq_queue_shutdown(hw, true);
738 i40e_shutdown_asq(hw);
739 i40e_shutdown_arq(hw);
740 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
741 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
744 i40e_free_virt_mem(hw, &hw->nvm_buff);
750 * i40e_clean_asq - cleans Admin send queue
751 * @hw: pointer to the hardware structure
753 * returns the number of free desc
755 u16 i40e_clean_asq(struct i40e_hw *hw)
757 struct i40e_adminq_ring *asq = &(hw->aq.asq);
758 struct i40e_asq_cmd_details *details;
759 u16 ntc = asq->next_to_clean;
760 struct i40e_aq_desc desc_cb;
761 struct i40e_aq_desc *desc;
763 desc = I40E_ADMINQ_DESC(*asq, ntc);
764 details = I40E_ADMINQ_DETAILS(*asq, ntc);
765 while (rd32(hw, hw->aq.asq.head) != ntc) {
766 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
767 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
769 if (details->callback) {
770 I40E_ADMINQ_CALLBACK cb_func =
771 (I40E_ADMINQ_CALLBACK)details->callback;
772 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
774 cb_func(hw, &desc_cb);
776 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
777 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
779 if (ntc == asq->count)
781 desc = I40E_ADMINQ_DESC(*asq, ntc);
782 details = I40E_ADMINQ_DETAILS(*asq, ntc);
785 asq->next_to_clean = ntc;
787 return I40E_DESC_UNUSED(asq);
791 * i40e_asq_done - check if FW has processed the Admin Send Queue
792 * @hw: pointer to the hw struct
794 * Returns true if the firmware has processed all descriptors on the
795 * admin send queue. Returns false if there are still requests pending.
798 bool i40e_asq_done(struct i40e_hw *hw)
800 STATIC bool i40e_asq_done(struct i40e_hw *hw)
803 /* AQ designers suggest use of head for better
804 * timing reliability than DD bit
806 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
811 * i40e_asq_send_command - send command to Admin Queue
812 * @hw: pointer to the hw struct
813 * @desc: prefilled descriptor describing the command (non DMA mem)
814 * @buff: buffer to use for indirect commands
815 * @buff_size: size of buffer for indirect commands
816 * @cmd_details: pointer to command details structure
818 * This is the main send command driver routine for the Admin Queue send
819 * queue. It runs the queue, cleans the queue, etc
821 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
822 struct i40e_aq_desc *desc,
823 void *buff, /* can be NULL */
825 struct i40e_asq_cmd_details *cmd_details)
827 enum i40e_status_code status = I40E_SUCCESS;
828 struct i40e_dma_mem *dma_buff = NULL;
829 struct i40e_asq_cmd_details *details;
830 struct i40e_aq_desc *desc_on_ring;
831 bool cmd_completed = false;
835 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
837 hw->aq.asq_last_status = I40E_AQ_RC_OK;
839 if (hw->aq.asq.count == 0) {
840 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
841 "AQTX: Admin queue not initialized.\n");
842 status = I40E_ERR_QUEUE_EMPTY;
843 goto asq_send_command_error;
846 val = rd32(hw, hw->aq.asq.head);
847 if (val >= hw->aq.num_asq_entries) {
848 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
849 "AQTX: head overrun at %d\n", val);
850 status = I40E_ERR_QUEUE_EMPTY;
851 goto asq_send_command_error;
854 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
858 sizeof(struct i40e_asq_cmd_details),
859 I40E_NONDMA_TO_NONDMA);
861 /* If the cmd_details are defined copy the cookie. The
862 * CPU_TO_LE32 is not needed here because the data is ignored
863 * by the FW, only used by the driver
865 if (details->cookie) {
867 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
869 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
872 i40e_memset(details, 0,
873 sizeof(struct i40e_asq_cmd_details),
877 /* clear requested flags and then set additional flags if defined */
878 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
879 desc->flags |= CPU_TO_LE16(details->flags_ena);
881 if (buff_size > hw->aq.asq_buf_size) {
883 I40E_DEBUG_AQ_MESSAGE,
884 "AQTX: Invalid buffer size: %d.\n",
886 status = I40E_ERR_INVALID_SIZE;
887 goto asq_send_command_error;
890 if (details->postpone && !details->async) {
892 I40E_DEBUG_AQ_MESSAGE,
893 "AQTX: Async flag not set along with postpone flag");
894 status = I40E_ERR_PARAM;
895 goto asq_send_command_error;
898 /* call clean and check queue available function to reclaim the
899 * descriptors that were processed by FW, the function returns the
900 * number of desc available
902 /* the clean function called here could be called in a separate thread
903 * in case of asynchronous completions
905 if (i40e_clean_asq(hw) == 0) {
907 I40E_DEBUG_AQ_MESSAGE,
908 "AQTX: Error queue is full.\n");
909 status = I40E_ERR_ADMIN_QUEUE_FULL;
910 goto asq_send_command_error;
913 /* initialize the temp desc pointer with the right desc */
914 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
916 /* if the desc is available copy the temp desc to the right place */
917 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
920 /* if buff is not NULL assume indirect command */
922 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
923 /* copy the user buff into the respective DMA buff */
924 i40e_memcpy(dma_buff->va, buff, buff_size,
926 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
928 /* Update the address values in the desc with the pa value
929 * for respective buffer
931 desc_on_ring->params.external.addr_high =
932 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
933 desc_on_ring->params.external.addr_low =
934 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
938 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
939 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
941 (hw->aq.asq.next_to_use)++;
942 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
943 hw->aq.asq.next_to_use = 0;
944 if (!details->postpone)
945 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
947 /* if cmd_details are not defined or async flag is not set,
948 * we need to wait for desc write back
950 if (!details->async && !details->postpone) {
954 /* AQ designers suggest use of head for better
955 * timing reliability than DD bit
957 if (i40e_asq_done(hw))
961 } while (total_delay < hw->aq.asq_cmd_timeout);
964 /* if ready, copy the desc back to temp */
965 if (i40e_asq_done(hw)) {
966 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
969 i40e_memcpy(buff, dma_buff->va, buff_size,
971 retval = LE16_TO_CPU(desc->retval);
974 I40E_DEBUG_AQ_MESSAGE,
975 "AQTX: Command completed with error 0x%X.\n",
978 /* strip off FW internal code */
981 cmd_completed = true;
982 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
983 status = I40E_SUCCESS;
985 status = I40E_ERR_ADMIN_QUEUE_ERROR;
986 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
989 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
990 "AQTX: desc and buffer writeback:\n");
991 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
993 /* save writeback aq if requested */
994 if (details->wb_desc)
995 i40e_memcpy(details->wb_desc, desc_on_ring,
996 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
998 /* update the error if time out occurred */
999 if ((!cmd_completed) &&
1000 (!details->async && !details->postpone)) {
1002 I40E_DEBUG_AQ_MESSAGE,
1003 "AQTX: Writeback timeout.\n");
1004 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1007 asq_send_command_error:
1008 i40e_release_spinlock(&hw->aq.asq_spinlock);
1013 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1014 * @desc: pointer to the temp descriptor (non DMA mem)
1015 * @opcode: the opcode can be used to decide which flags to turn off or on
1017 * Fill the desc with default values
1019 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1022 /* zero out the desc */
1023 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1025 desc->opcode = CPU_TO_LE16(opcode);
1026 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1030 * i40e_clean_arq_element
1031 * @hw: pointer to the hw struct
1032 * @e: event info from the receive descriptor, includes any buffers
1033 * @pending: number of events that could be left to process
1035 * This function cleans one Admin Receive Queue element and returns
1036 * the contents through e. It can also return how many events are
1037 * left to process through 'pending'
1039 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1040 struct i40e_arq_event_info *e,
1043 enum i40e_status_code ret_code = I40E_SUCCESS;
1044 u16 ntc = hw->aq.arq.next_to_clean;
1045 struct i40e_aq_desc *desc;
1046 struct i40e_dma_mem *bi;
1052 /* pre-clean the event info */
1053 i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1055 /* take the lock before we start messing with the ring */
1056 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1058 if (hw->aq.arq.count == 0) {
1059 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1060 "AQRX: Admin queue not initialized.\n");
1061 ret_code = I40E_ERR_QUEUE_EMPTY;
1062 goto clean_arq_element_err;
1065 /* set next_to_use to head */
1067 #ifdef INTEGRATED_VF
1068 if (!i40e_is_vf(hw))
1069 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1071 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1072 #endif /* INTEGRATED_VF */
1073 #endif /* PF_DRIVER */
1075 #ifdef INTEGRATED_VF
1077 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1079 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1080 #endif /* INTEGRATED_VF */
1081 #endif /* VF_DRIVER */
1083 /* nothing to do - shouldn't need to update ring's values */
1084 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1085 goto clean_arq_element_out;
1088 /* now clean the next descriptor */
1089 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1092 hw->aq.arq_last_status =
1093 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1094 flags = LE16_TO_CPU(desc->flags);
1095 if (flags & I40E_AQ_FLAG_ERR) {
1096 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1098 I40E_DEBUG_AQ_MESSAGE,
1099 "AQRX: Event received with error 0x%X.\n",
1100 hw->aq.arq_last_status);
1103 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1104 I40E_DMA_TO_NONDMA);
1105 datalen = LE16_TO_CPU(desc->datalen);
1106 e->msg_len = min(datalen, e->buf_len);
1107 if (e->msg_buf != NULL && (e->msg_len != 0))
1108 i40e_memcpy(e->msg_buf,
1109 hw->aq.arq.r.arq_bi[desc_idx].va,
1110 e->msg_len, I40E_DMA_TO_NONDMA);
1112 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1113 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1114 hw->aq.arq_buf_size);
1116 /* Restore the original datalen and buffer address in the desc,
1117 * FW updates datalen to indicate the event message
1120 bi = &hw->aq.arq.r.arq_bi[ntc];
1121 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1123 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1124 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1125 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1126 desc->datalen = CPU_TO_LE16((u16)bi->size);
1127 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1128 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1130 /* set tail = the last cleaned desc index. */
1131 wr32(hw, hw->aq.arq.tail, ntc);
1132 /* ntc is updated to tail + 1 */
1134 if (ntc == hw->aq.num_arq_entries)
1136 hw->aq.arq.next_to_clean = ntc;
1137 hw->aq.arq.next_to_use = ntu;
1140 i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
1141 #endif /* PF_DRIVER */
1142 clean_arq_element_out:
1143 /* Set pending if needed, unlock and return */
1144 if (pending != NULL)
1145 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1146 clean_arq_element_err:
1147 i40e_release_spinlock(&hw->aq.arq_spinlock);