1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2018
5 #include "i40e_status.h"
7 #include "i40e_register.h"
8 #include "i40e_adminq.h"
9 #include "i40e_prototype.h"
12 * i40e_adminq_init_regs - Initialize AdminQ registers
13 * @hw: pointer to the hardware structure
15 * This assumes the alloc_asq and alloc_arq functions have already been called
17 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
19 /* set head and tail registers in our local struct */
21 hw->aq.asq.tail = I40E_VF_ATQT1;
22 hw->aq.asq.head = I40E_VF_ATQH1;
23 hw->aq.asq.len = I40E_VF_ATQLEN1;
24 hw->aq.asq.bal = I40E_VF_ATQBAL1;
25 hw->aq.asq.bah = I40E_VF_ATQBAH1;
26 hw->aq.arq.tail = I40E_VF_ARQT1;
27 hw->aq.arq.head = I40E_VF_ARQH1;
28 hw->aq.arq.len = I40E_VF_ARQLEN1;
29 hw->aq.arq.bal = I40E_VF_ARQBAL1;
30 hw->aq.arq.bah = I40E_VF_ARQBAH1;
33 hw->aq.asq.tail = I40E_PF_ATQT;
34 hw->aq.asq.head = I40E_PF_ATQH;
35 hw->aq.asq.len = I40E_PF_ATQLEN;
36 hw->aq.asq.bal = I40E_PF_ATQBAL;
37 hw->aq.asq.bah = I40E_PF_ATQBAH;
38 hw->aq.arq.tail = I40E_PF_ARQT;
39 hw->aq.arq.head = I40E_PF_ARQH;
40 hw->aq.arq.len = I40E_PF_ARQLEN;
41 hw->aq.arq.bal = I40E_PF_ARQBAL;
42 hw->aq.arq.bah = I40E_PF_ARQBAH;
48 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
49 * @hw: pointer to the hardware structure
51 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
53 enum i40e_status_code ret_code;
55 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
57 (hw->aq.num_asq_entries *
58 sizeof(struct i40e_aq_desc)),
59 I40E_ADMINQ_DESC_ALIGNMENT);
63 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
64 (hw->aq.num_asq_entries *
65 sizeof(struct i40e_asq_cmd_details)));
67 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
75 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
76 * @hw: pointer to the hardware structure
78 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
80 enum i40e_status_code ret_code;
82 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
84 (hw->aq.num_arq_entries *
85 sizeof(struct i40e_aq_desc)),
86 I40E_ADMINQ_DESC_ALIGNMENT);
92 * i40e_free_adminq_asq - Free Admin Queue send rings
93 * @hw: pointer to the hardware structure
95 * This assumes the posted send buffers have already been cleaned
98 void i40e_free_adminq_asq(struct i40e_hw *hw)
100 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
101 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
105 * i40e_free_adminq_arq - Free Admin Queue receive rings
106 * @hw: pointer to the hardware structure
108 * This assumes the posted receive buffers have already been cleaned
111 void i40e_free_adminq_arq(struct i40e_hw *hw)
113 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
117 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
118 * @hw: pointer to the hardware structure
120 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
122 enum i40e_status_code ret_code;
123 struct i40e_aq_desc *desc;
124 struct i40e_dma_mem *bi;
127 /* We'll be allocating the buffer info memory first, then we can
128 * allocate the mapped buffers for the event processing
131 /* buffer_info structures do not need alignment */
132 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
133 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
136 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
138 /* allocate the mapped buffers */
139 for (i = 0; i < hw->aq.num_arq_entries; i++) {
140 bi = &hw->aq.arq.r.arq_bi[i];
141 ret_code = i40e_allocate_dma_mem(hw, bi,
144 I40E_ADMINQ_DESC_ALIGNMENT);
146 goto unwind_alloc_arq_bufs;
148 /* now configure the descriptors for use */
149 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
151 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
152 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
153 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
155 /* This is in accordance with Admin queue design, there is no
156 * register for buffer size configuration
158 desc->datalen = CPU_TO_LE16((u16)bi->size);
160 desc->cookie_high = 0;
161 desc->cookie_low = 0;
162 desc->params.external.addr_high =
163 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
164 desc->params.external.addr_low =
165 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
166 desc->params.external.param0 = 0;
167 desc->params.external.param1 = 0;
173 unwind_alloc_arq_bufs:
174 /* don't try to free the one that failed... */
177 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
178 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
184 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
185 * @hw: pointer to the hardware structure
187 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
189 enum i40e_status_code ret_code;
190 struct i40e_dma_mem *bi;
193 /* No mapped memory needed yet, just the buffer info structures */
194 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
195 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
198 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
200 /* allocate the mapped buffers */
201 for (i = 0; i < hw->aq.num_asq_entries; i++) {
202 bi = &hw->aq.asq.r.asq_bi[i];
203 ret_code = i40e_allocate_dma_mem(hw, bi,
206 I40E_ADMINQ_DESC_ALIGNMENT);
208 goto unwind_alloc_asq_bufs;
213 unwind_alloc_asq_bufs:
214 /* don't try to free the one that failed... */
217 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
218 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
224 * i40e_free_arq_bufs - Free receive queue buffer info elements
225 * @hw: pointer to the hardware structure
227 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
231 /* free descriptors */
232 for (i = 0; i < hw->aq.num_arq_entries; i++)
233 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
235 /* free the descriptor memory */
236 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
238 /* free the dma header */
239 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
243 * i40e_free_asq_bufs - Free send queue buffer info elements
244 * @hw: pointer to the hardware structure
246 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
250 /* only unmap if the address is non-NULL */
251 for (i = 0; i < hw->aq.num_asq_entries; i++)
252 if (hw->aq.asq.r.asq_bi[i].pa)
253 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
255 /* free the buffer info list */
256 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
258 /* free the descriptor memory */
259 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
261 /* free the dma header */
262 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
266 * i40e_config_asq_regs - configure ASQ registers
267 * @hw: pointer to the hardware structure
269 * Configure base address and length registers for the transmit queue
271 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
273 enum i40e_status_code ret_code = I40E_SUCCESS;
276 /* Clear Head and Tail */
277 wr32(hw, hw->aq.asq.head, 0);
278 wr32(hw, hw->aq.asq.tail, 0);
280 /* set starting point */
284 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
285 I40E_PF_ATQLEN_ATQENABLE_MASK));
287 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
288 I40E_PF_ATQLEN_ATQENABLE_MASK));
289 #endif /* INTEGRATED_VF */
290 #endif /* PF_DRIVER */
294 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
295 I40E_VF_ATQLEN1_ATQENABLE_MASK));
297 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
298 I40E_VF_ATQLEN1_ATQENABLE_MASK));
299 #endif /* INTEGRATED_VF */
300 #endif /* VF_DRIVER */
301 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
302 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
304 /* Check one register to verify that config was applied */
305 reg = rd32(hw, hw->aq.asq.bal);
306 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
307 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
313 * i40e_config_arq_regs - ARQ register configuration
314 * @hw: pointer to the hardware structure
316 * Configure base address and length registers for the receive (event queue)
318 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
320 enum i40e_status_code ret_code = I40E_SUCCESS;
323 /* Clear Head and Tail */
324 wr32(hw, hw->aq.arq.head, 0);
325 wr32(hw, hw->aq.arq.tail, 0);
327 /* set starting point */
331 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
332 I40E_PF_ARQLEN_ARQENABLE_MASK));
334 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
335 I40E_PF_ARQLEN_ARQENABLE_MASK));
336 #endif /* INTEGRATED_VF */
337 #endif /* PF_DRIVER */
341 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
342 I40E_VF_ARQLEN1_ARQENABLE_MASK));
344 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
345 I40E_VF_ARQLEN1_ARQENABLE_MASK));
346 #endif /* INTEGRATED_VF */
347 #endif /* VF_DRIVER */
348 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
349 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
351 /* Update tail in the HW to post pre-allocated buffers */
352 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
354 /* Check one register to verify that config was applied */
355 reg = rd32(hw, hw->aq.arq.bal);
356 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
357 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
363 * i40e_init_asq - main initialization routine for ASQ
364 * @hw: pointer to the hardware structure
366 * This is the main initialization routine for the Admin Send Queue
367 * Prior to calling this function, drivers *MUST* set the following fields
368 * in the hw->aq structure:
369 * - hw->aq.num_asq_entries
370 * - hw->aq.arq_buf_size
372 * Do *NOT* hold the lock when calling this as the memory allocation routines
373 * called are not going to be atomic context safe
375 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
377 enum i40e_status_code ret_code = I40E_SUCCESS;
379 if (hw->aq.asq.count > 0) {
380 /* queue already initialized */
381 ret_code = I40E_ERR_NOT_READY;
382 goto init_adminq_exit;
385 /* verify input for valid configuration */
386 if ((hw->aq.num_asq_entries == 0) ||
387 (hw->aq.asq_buf_size == 0)) {
388 ret_code = I40E_ERR_CONFIG;
389 goto init_adminq_exit;
392 hw->aq.asq.next_to_use = 0;
393 hw->aq.asq.next_to_clean = 0;
395 /* allocate the ring memory */
396 ret_code = i40e_alloc_adminq_asq_ring(hw);
397 if (ret_code != I40E_SUCCESS)
398 goto init_adminq_exit;
400 /* allocate buffers in the rings */
401 ret_code = i40e_alloc_asq_bufs(hw);
402 if (ret_code != I40E_SUCCESS)
403 goto init_adminq_free_rings;
405 /* initialize base registers */
406 ret_code = i40e_config_asq_regs(hw);
407 if (ret_code != I40E_SUCCESS)
408 goto init_config_regs;
411 hw->aq.asq.count = hw->aq.num_asq_entries;
412 goto init_adminq_exit;
414 init_adminq_free_rings:
415 i40e_free_adminq_asq(hw);
419 i40e_free_asq_bufs(hw);
426 * i40e_init_arq - initialize ARQ
427 * @hw: pointer to the hardware structure
429 * The main initialization routine for the Admin Receive (Event) Queue.
430 * Prior to calling this function, drivers *MUST* set the following fields
431 * in the hw->aq structure:
432 * - hw->aq.num_asq_entries
433 * - hw->aq.arq_buf_size
435 * Do *NOT* hold the lock when calling this as the memory allocation routines
436 * called are not going to be atomic context safe
438 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
440 enum i40e_status_code ret_code = I40E_SUCCESS;
442 if (hw->aq.arq.count > 0) {
443 /* queue already initialized */
444 ret_code = I40E_ERR_NOT_READY;
445 goto init_adminq_exit;
448 /* verify input for valid configuration */
449 if ((hw->aq.num_arq_entries == 0) ||
450 (hw->aq.arq_buf_size == 0)) {
451 ret_code = I40E_ERR_CONFIG;
452 goto init_adminq_exit;
455 hw->aq.arq.next_to_use = 0;
456 hw->aq.arq.next_to_clean = 0;
458 /* allocate the ring memory */
459 ret_code = i40e_alloc_adminq_arq_ring(hw);
460 if (ret_code != I40E_SUCCESS)
461 goto init_adminq_exit;
463 /* allocate buffers in the rings */
464 ret_code = i40e_alloc_arq_bufs(hw);
465 if (ret_code != I40E_SUCCESS)
466 goto init_adminq_free_rings;
468 /* initialize base registers */
469 ret_code = i40e_config_arq_regs(hw);
470 if (ret_code != I40E_SUCCESS)
471 goto init_adminq_free_rings;
474 hw->aq.arq.count = hw->aq.num_arq_entries;
475 goto init_adminq_exit;
477 init_adminq_free_rings:
478 i40e_free_adminq_arq(hw);
485 * i40e_shutdown_asq - shutdown the ASQ
486 * @hw: pointer to the hardware structure
488 * The main shutdown routine for the Admin Send Queue
490 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
492 enum i40e_status_code ret_code = I40E_SUCCESS;
494 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
496 if (hw->aq.asq.count == 0) {
497 ret_code = I40E_ERR_NOT_READY;
498 goto shutdown_asq_out;
501 /* Stop firmware AdminQ processing */
502 wr32(hw, hw->aq.asq.head, 0);
503 wr32(hw, hw->aq.asq.tail, 0);
504 wr32(hw, hw->aq.asq.len, 0);
505 wr32(hw, hw->aq.asq.bal, 0);
506 wr32(hw, hw->aq.asq.bah, 0);
508 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
510 /* free ring buffers */
511 i40e_free_asq_bufs(hw);
514 i40e_release_spinlock(&hw->aq.asq_spinlock);
519 * i40e_shutdown_arq - shutdown ARQ
520 * @hw: pointer to the hardware structure
522 * The main shutdown routine for the Admin Receive Queue
524 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
526 enum i40e_status_code ret_code = I40E_SUCCESS;
528 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
530 if (hw->aq.arq.count == 0) {
531 ret_code = I40E_ERR_NOT_READY;
532 goto shutdown_arq_out;
535 /* Stop firmware AdminQ processing */
536 wr32(hw, hw->aq.arq.head, 0);
537 wr32(hw, hw->aq.arq.tail, 0);
538 wr32(hw, hw->aq.arq.len, 0);
539 wr32(hw, hw->aq.arq.bal, 0);
540 wr32(hw, hw->aq.arq.bah, 0);
542 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
544 /* free ring buffers */
545 i40e_free_arq_bufs(hw);
548 i40e_release_spinlock(&hw->aq.arq_spinlock);
554 * i40e_resume_aq - resume AQ processing from 0
555 * @hw: pointer to the hardware structure
557 STATIC void i40e_resume_aq(struct i40e_hw *hw)
559 /* Registers are reset after PF reset */
560 hw->aq.asq.next_to_use = 0;
561 hw->aq.asq.next_to_clean = 0;
563 i40e_config_asq_regs(hw);
565 hw->aq.arq.next_to_use = 0;
566 hw->aq.arq.next_to_clean = 0;
568 i40e_config_arq_regs(hw);
570 #endif /* PF_DRIVER */
573 * i40e_init_adminq - main initialization routine for Admin Queue
574 * @hw: pointer to the hardware structure
576 * Prior to calling this function, drivers *MUST* set the following fields
577 * in the hw->aq structure:
578 * - hw->aq.num_asq_entries
579 * - hw->aq.num_arq_entries
580 * - hw->aq.arq_buf_size
581 * - hw->aq.asq_buf_size
583 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
586 u16 cfg_ptr, oem_hi, oem_lo;
587 u16 eetrack_lo, eetrack_hi;
589 enum i40e_status_code ret_code;
594 /* verify input for valid configuration */
595 if ((hw->aq.num_arq_entries == 0) ||
596 (hw->aq.num_asq_entries == 0) ||
597 (hw->aq.arq_buf_size == 0) ||
598 (hw->aq.asq_buf_size == 0)) {
599 ret_code = I40E_ERR_CONFIG;
600 goto init_adminq_exit;
602 i40e_init_spinlock(&hw->aq.asq_spinlock);
603 i40e_init_spinlock(&hw->aq.arq_spinlock);
605 /* Set up register offsets */
606 i40e_adminq_init_regs(hw);
608 /* setup ASQ command write back timeout */
609 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
611 /* allocate the ASQ */
612 ret_code = i40e_init_asq(hw);
613 if (ret_code != I40E_SUCCESS)
614 goto init_adminq_destroy_spinlocks;
616 /* allocate the ARQ */
617 ret_code = i40e_init_arq(hw);
618 if (ret_code != I40E_SUCCESS)
619 goto init_adminq_free_asq;
623 /* VF has no need of firmware */
625 goto init_adminq_exit;
627 /* There are some cases where the firmware may not be quite ready
628 * for AdminQ operations, so we retry the AdminQ setup a few times
629 * if we see timeouts in this first AQ call.
632 ret_code = i40e_aq_get_firmware_version(hw,
639 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
642 i40e_msec_delay(100);
644 } while (retry < 10);
645 if (ret_code != I40E_SUCCESS)
646 goto init_adminq_free_arq;
648 /* get the NVM version info */
649 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
651 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
652 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
653 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
654 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
655 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
657 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
659 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
661 /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
662 if ((hw->aq.api_maj_ver > 1) ||
663 ((hw->aq.api_maj_ver == 1) &&
664 (hw->aq.api_min_ver >= 7)))
665 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
667 if (hw->mac.type == I40E_MAC_XL710 &&
668 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
669 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
670 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
673 /* Newer versions of firmware require lock when reading the NVM */
674 if ((hw->aq.api_maj_ver > 1) ||
675 ((hw->aq.api_maj_ver == 1) &&
676 (hw->aq.api_min_ver >= 5)))
677 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
679 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
680 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
681 goto init_adminq_free_arq;
684 /* pre-emptive resource lock release */
685 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
686 hw->nvm_release_on_done = false;
687 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
689 #endif /* PF_DRIVER */
690 ret_code = I40E_SUCCESS;
693 goto init_adminq_exit;
696 init_adminq_free_arq:
697 i40e_shutdown_arq(hw);
699 init_adminq_free_asq:
700 i40e_shutdown_asq(hw);
701 init_adminq_destroy_spinlocks:
702 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
703 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
710 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
711 * @hw: pointer to the hardware structure
713 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
715 enum i40e_status_code ret_code = I40E_SUCCESS;
717 if (i40e_check_asq_alive(hw))
718 i40e_aq_queue_shutdown(hw, true);
720 i40e_shutdown_asq(hw);
721 i40e_shutdown_arq(hw);
722 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
723 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
726 i40e_free_virt_mem(hw, &hw->nvm_buff);
732 * i40e_clean_asq - cleans Admin send queue
733 * @hw: pointer to the hardware structure
735 * returns the number of free desc
737 u16 i40e_clean_asq(struct i40e_hw *hw)
739 struct i40e_adminq_ring *asq = &(hw->aq.asq);
740 struct i40e_asq_cmd_details *details;
741 u16 ntc = asq->next_to_clean;
742 struct i40e_aq_desc desc_cb;
743 struct i40e_aq_desc *desc;
745 desc = I40E_ADMINQ_DESC(*asq, ntc);
746 details = I40E_ADMINQ_DETAILS(*asq, ntc);
747 while (rd32(hw, hw->aq.asq.head) != ntc) {
748 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
749 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
751 if (details->callback) {
752 I40E_ADMINQ_CALLBACK cb_func =
753 (I40E_ADMINQ_CALLBACK)details->callback;
754 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
756 cb_func(hw, &desc_cb);
758 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
759 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
761 if (ntc == asq->count)
763 desc = I40E_ADMINQ_DESC(*asq, ntc);
764 details = I40E_ADMINQ_DETAILS(*asq, ntc);
767 asq->next_to_clean = ntc;
769 return I40E_DESC_UNUSED(asq);
773 * i40e_asq_done - check if FW has processed the Admin Send Queue
774 * @hw: pointer to the hw struct
776 * Returns true if the firmware has processed all descriptors on the
777 * admin send queue. Returns false if there are still requests pending.
780 bool i40e_asq_done(struct i40e_hw *hw)
782 STATIC bool i40e_asq_done(struct i40e_hw *hw)
785 /* AQ designers suggest use of head for better
786 * timing reliability than DD bit
788 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
793 * i40e_asq_send_command - send command to Admin Queue
794 * @hw: pointer to the hw struct
795 * @desc: prefilled descriptor describing the command (non DMA mem)
796 * @buff: buffer to use for indirect commands
797 * @buff_size: size of buffer for indirect commands
798 * @cmd_details: pointer to command details structure
800 * This is the main send command driver routine for the Admin Queue send
801 * queue. It runs the queue, cleans the queue, etc
803 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
804 struct i40e_aq_desc *desc,
805 void *buff, /* can be NULL */
807 struct i40e_asq_cmd_details *cmd_details)
809 enum i40e_status_code status = I40E_SUCCESS;
810 struct i40e_dma_mem *dma_buff = NULL;
811 struct i40e_asq_cmd_details *details;
812 struct i40e_aq_desc *desc_on_ring;
813 bool cmd_completed = false;
817 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
819 hw->aq.asq_last_status = I40E_AQ_RC_OK;
821 if (hw->aq.asq.count == 0) {
822 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
823 "AQTX: Admin queue not initialized.\n");
824 status = I40E_ERR_QUEUE_EMPTY;
825 goto asq_send_command_error;
828 val = rd32(hw, hw->aq.asq.head);
829 if (val >= hw->aq.num_asq_entries) {
830 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
831 "AQTX: head overrun at %d\n", val);
832 status = I40E_ERR_QUEUE_EMPTY;
833 goto asq_send_command_error;
836 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
840 sizeof(struct i40e_asq_cmd_details),
841 I40E_NONDMA_TO_NONDMA);
843 /* If the cmd_details are defined copy the cookie. The
844 * CPU_TO_LE32 is not needed here because the data is ignored
845 * by the FW, only used by the driver
847 if (details->cookie) {
849 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
851 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
854 i40e_memset(details, 0,
855 sizeof(struct i40e_asq_cmd_details),
859 /* clear requested flags and then set additional flags if defined */
860 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
861 desc->flags |= CPU_TO_LE16(details->flags_ena);
863 if (buff_size > hw->aq.asq_buf_size) {
865 I40E_DEBUG_AQ_MESSAGE,
866 "AQTX: Invalid buffer size: %d.\n",
868 status = I40E_ERR_INVALID_SIZE;
869 goto asq_send_command_error;
872 if (details->postpone && !details->async) {
874 I40E_DEBUG_AQ_MESSAGE,
875 "AQTX: Async flag not set along with postpone flag");
876 status = I40E_ERR_PARAM;
877 goto asq_send_command_error;
880 /* call clean and check queue available function to reclaim the
881 * descriptors that were processed by FW, the function returns the
882 * number of desc available
884 /* the clean function called here could be called in a separate thread
885 * in case of asynchronous completions
887 if (i40e_clean_asq(hw) == 0) {
889 I40E_DEBUG_AQ_MESSAGE,
890 "AQTX: Error queue is full.\n");
891 status = I40E_ERR_ADMIN_QUEUE_FULL;
892 goto asq_send_command_error;
895 /* initialize the temp desc pointer with the right desc */
896 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
898 /* if the desc is available copy the temp desc to the right place */
899 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
902 /* if buff is not NULL assume indirect command */
904 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
905 /* copy the user buff into the respective DMA buff */
906 i40e_memcpy(dma_buff->va, buff, buff_size,
908 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
910 /* Update the address values in the desc with the pa value
911 * for respective buffer
913 desc_on_ring->params.external.addr_high =
914 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
915 desc_on_ring->params.external.addr_low =
916 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
920 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
921 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
923 (hw->aq.asq.next_to_use)++;
924 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
925 hw->aq.asq.next_to_use = 0;
926 if (!details->postpone)
927 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
929 /* if cmd_details are not defined or async flag is not set,
930 * we need to wait for desc write back
932 if (!details->async && !details->postpone) {
936 /* AQ designers suggest use of head for better
937 * timing reliability than DD bit
939 if (i40e_asq_done(hw))
943 } while (total_delay < hw->aq.asq_cmd_timeout);
946 /* if ready, copy the desc back to temp */
947 if (i40e_asq_done(hw)) {
948 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
951 i40e_memcpy(buff, dma_buff->va, buff_size,
953 retval = LE16_TO_CPU(desc->retval);
956 I40E_DEBUG_AQ_MESSAGE,
957 "AQTX: Command completed with error 0x%X.\n",
960 /* strip off FW internal code */
963 cmd_completed = true;
964 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
965 status = I40E_SUCCESS;
966 else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
967 status = I40E_ERR_NOT_READY;
969 status = I40E_ERR_ADMIN_QUEUE_ERROR;
970 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
973 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
974 "AQTX: desc and buffer writeback:\n");
975 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
977 /* save writeback aq if requested */
978 if (details->wb_desc)
979 i40e_memcpy(details->wb_desc, desc_on_ring,
980 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
982 /* update the error if time out occurred */
983 if ((!cmd_completed) &&
984 (!details->async && !details->postpone)) {
986 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
988 if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
990 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
991 "AQTX: AQ Critical error.\n");
992 status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
994 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
995 "AQTX: Writeback timeout.\n");
996 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1000 asq_send_command_error:
1001 i40e_release_spinlock(&hw->aq.asq_spinlock);
1006 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1007 * @desc: pointer to the temp descriptor (non DMA mem)
1008 * @opcode: the opcode can be used to decide which flags to turn off or on
1010 * Fill the desc with default values
1012 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1015 /* zero out the desc */
1016 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1018 desc->opcode = CPU_TO_LE16(opcode);
1019 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1023 * i40e_clean_arq_element
1024 * @hw: pointer to the hw struct
1025 * @e: event info from the receive descriptor, includes any buffers
1026 * @pending: number of events that could be left to process
1028 * This function cleans one Admin Receive Queue element and returns
1029 * the contents through e. It can also return how many events are
1030 * left to process through 'pending'
1032 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1033 struct i40e_arq_event_info *e,
1036 enum i40e_status_code ret_code = I40E_SUCCESS;
1037 u16 ntc = hw->aq.arq.next_to_clean;
1038 struct i40e_aq_desc *desc;
1039 struct i40e_dma_mem *bi;
1045 /* pre-clean the event info */
1046 i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1048 /* take the lock before we start messing with the ring */
1049 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1051 if (hw->aq.arq.count == 0) {
1052 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1053 "AQRX: Admin queue not initialized.\n");
1054 ret_code = I40E_ERR_QUEUE_EMPTY;
1055 goto clean_arq_element_err;
1058 /* set next_to_use to head */
1059 #ifdef INTEGRATED_VF
1060 if (!i40e_is_vf(hw))
1061 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1063 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1066 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1067 #endif /* PF_DRIVER */
1069 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1070 #endif /* VF_DRIVER */
1071 #endif /* INTEGRATED_VF */
1073 /* nothing to do - shouldn't need to update ring's values */
1074 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1075 goto clean_arq_element_out;
1078 /* now clean the next descriptor */
1079 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1082 hw->aq.arq_last_status =
1083 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1084 flags = LE16_TO_CPU(desc->flags);
1085 if (flags & I40E_AQ_FLAG_ERR) {
1086 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1088 I40E_DEBUG_AQ_MESSAGE,
1089 "AQRX: Event received with error 0x%X.\n",
1090 hw->aq.arq_last_status);
1093 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1094 I40E_DMA_TO_NONDMA);
1095 datalen = LE16_TO_CPU(desc->datalen);
1096 e->msg_len = min(datalen, e->buf_len);
1097 if (e->msg_buf != NULL && (e->msg_len != 0))
1098 i40e_memcpy(e->msg_buf,
1099 hw->aq.arq.r.arq_bi[desc_idx].va,
1100 e->msg_len, I40E_DMA_TO_NONDMA);
1102 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1103 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1104 hw->aq.arq_buf_size);
1106 /* Restore the original datalen and buffer address in the desc,
1107 * FW updates datalen to indicate the event message
1110 bi = &hw->aq.arq.r.arq_bi[ntc];
1111 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1113 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1114 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1115 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1116 desc->datalen = CPU_TO_LE16((u16)bi->size);
1117 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1118 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1120 /* set tail = the last cleaned desc index. */
1121 wr32(hw, hw->aq.arq.tail, ntc);
1122 /* ntc is updated to tail + 1 */
1124 if (ntc == hw->aq.num_arq_entries)
1126 hw->aq.arq.next_to_clean = ntc;
1127 hw->aq.arq.next_to_use = ntu;
1130 i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1131 #endif /* PF_DRIVER */
1132 clean_arq_element_out:
1133 /* Set pending if needed, unlock and return */
1134 if (pending != NULL)
1135 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1136 clean_arq_element_err:
1137 i40e_release_spinlock(&hw->aq.arq_spinlock);