1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2018
5 #include "i40e_status.h"
7 #include "i40e_register.h"
8 #include "i40e_adminq.h"
9 #include "i40e_prototype.h"
12 * i40e_adminq_init_regs - Initialize AdminQ registers
13 * @hw: pointer to the hardware structure
15 * This assumes the alloc_asq and alloc_arq functions have already been called
17 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
19 /* set head and tail registers in our local struct */
21 hw->aq.asq.tail = I40E_VF_ATQT1;
22 hw->aq.asq.head = I40E_VF_ATQH1;
23 hw->aq.asq.len = I40E_VF_ATQLEN1;
24 hw->aq.asq.bal = I40E_VF_ATQBAL1;
25 hw->aq.asq.bah = I40E_VF_ATQBAH1;
26 hw->aq.arq.tail = I40E_VF_ARQT1;
27 hw->aq.arq.head = I40E_VF_ARQH1;
28 hw->aq.arq.len = I40E_VF_ARQLEN1;
29 hw->aq.arq.bal = I40E_VF_ARQBAL1;
30 hw->aq.arq.bah = I40E_VF_ARQBAH1;
33 hw->aq.asq.tail = I40E_PF_ATQT;
34 hw->aq.asq.head = I40E_PF_ATQH;
35 hw->aq.asq.len = I40E_PF_ATQLEN;
36 hw->aq.asq.bal = I40E_PF_ATQBAL;
37 hw->aq.asq.bah = I40E_PF_ATQBAH;
38 hw->aq.arq.tail = I40E_PF_ARQT;
39 hw->aq.arq.head = I40E_PF_ARQH;
40 hw->aq.arq.len = I40E_PF_ARQLEN;
41 hw->aq.arq.bal = I40E_PF_ARQBAL;
42 hw->aq.arq.bah = I40E_PF_ARQBAH;
48 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
49 * @hw: pointer to the hardware structure
51 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
53 enum i40e_status_code ret_code;
55 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
57 (hw->aq.num_asq_entries *
58 sizeof(struct i40e_aq_desc)),
59 I40E_ADMINQ_DESC_ALIGNMENT);
63 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
64 (hw->aq.num_asq_entries *
65 sizeof(struct i40e_asq_cmd_details)));
67 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
75 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
76 * @hw: pointer to the hardware structure
78 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
80 enum i40e_status_code ret_code;
82 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
84 (hw->aq.num_arq_entries *
85 sizeof(struct i40e_aq_desc)),
86 I40E_ADMINQ_DESC_ALIGNMENT);
92 * i40e_free_adminq_asq - Free Admin Queue send rings
93 * @hw: pointer to the hardware structure
95 * This assumes the posted send buffers have already been cleaned
98 void i40e_free_adminq_asq(struct i40e_hw *hw)
100 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
104 * i40e_free_adminq_arq - Free Admin Queue receive rings
105 * @hw: pointer to the hardware structure
107 * This assumes the posted receive buffers have already been cleaned
110 void i40e_free_adminq_arq(struct i40e_hw *hw)
112 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
116 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
117 * @hw: pointer to the hardware structure
119 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
121 enum i40e_status_code ret_code;
122 struct i40e_aq_desc *desc;
123 struct i40e_dma_mem *bi;
126 /* We'll be allocating the buffer info memory first, then we can
127 * allocate the mapped buffers for the event processing
130 /* buffer_info structures do not need alignment */
131 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
132 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
135 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
137 /* allocate the mapped buffers */
138 for (i = 0; i < hw->aq.num_arq_entries; i++) {
139 bi = &hw->aq.arq.r.arq_bi[i];
140 ret_code = i40e_allocate_dma_mem(hw, bi,
143 I40E_ADMINQ_DESC_ALIGNMENT);
145 goto unwind_alloc_arq_bufs;
147 /* now configure the descriptors for use */
148 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
150 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
151 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
152 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
154 /* This is in accordance with Admin queue design, there is no
155 * register for buffer size configuration
157 desc->datalen = CPU_TO_LE16((u16)bi->size);
159 desc->cookie_high = 0;
160 desc->cookie_low = 0;
161 desc->params.external.addr_high =
162 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
163 desc->params.external.addr_low =
164 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
165 desc->params.external.param0 = 0;
166 desc->params.external.param1 = 0;
172 unwind_alloc_arq_bufs:
173 /* don't try to free the one that failed... */
176 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
177 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
183 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
184 * @hw: pointer to the hardware structure
186 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
188 enum i40e_status_code ret_code;
189 struct i40e_dma_mem *bi;
192 /* No mapped memory needed yet, just the buffer info structures */
193 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
194 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
197 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
199 /* allocate the mapped buffers */
200 for (i = 0; i < hw->aq.num_asq_entries; i++) {
201 bi = &hw->aq.asq.r.asq_bi[i];
202 ret_code = i40e_allocate_dma_mem(hw, bi,
205 I40E_ADMINQ_DESC_ALIGNMENT);
207 goto unwind_alloc_asq_bufs;
212 unwind_alloc_asq_bufs:
213 /* don't try to free the one that failed... */
216 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
217 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
223 * i40e_free_arq_bufs - Free receive queue buffer info elements
224 * @hw: pointer to the hardware structure
226 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
230 /* free descriptors */
231 for (i = 0; i < hw->aq.num_arq_entries; i++)
232 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
234 /* free the descriptor memory */
235 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
237 /* free the dma header */
238 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
242 * i40e_free_asq_bufs - Free send queue buffer info elements
243 * @hw: pointer to the hardware structure
245 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
249 /* only unmap if the address is non-NULL */
250 for (i = 0; i < hw->aq.num_asq_entries; i++)
251 if (hw->aq.asq.r.asq_bi[i].pa)
252 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
254 /* free the buffer info list */
255 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
257 /* free the descriptor memory */
258 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
260 /* free the dma header */
261 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
265 * i40e_config_asq_regs - configure ASQ registers
266 * @hw: pointer to the hardware structure
268 * Configure base address and length registers for the transmit queue
270 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
272 enum i40e_status_code ret_code = I40E_SUCCESS;
275 /* Clear Head and Tail */
276 wr32(hw, hw->aq.asq.head, 0);
277 wr32(hw, hw->aq.asq.tail, 0);
279 /* set starting point */
283 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
284 I40E_PF_ATQLEN_ATQENABLE_MASK));
286 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
287 I40E_PF_ATQLEN_ATQENABLE_MASK));
288 #endif /* INTEGRATED_VF */
289 #endif /* PF_DRIVER */
293 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
294 I40E_VF_ATQLEN1_ATQENABLE_MASK));
296 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
297 I40E_VF_ATQLEN1_ATQENABLE_MASK));
298 #endif /* INTEGRATED_VF */
299 #endif /* VF_DRIVER */
300 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
301 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
303 /* Check one register to verify that config was applied */
304 reg = rd32(hw, hw->aq.asq.bal);
305 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
306 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
312 * i40e_config_arq_regs - ARQ register configuration
313 * @hw: pointer to the hardware structure
315 * Configure base address and length registers for the receive (event queue)
317 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
319 enum i40e_status_code ret_code = I40E_SUCCESS;
322 /* Clear Head and Tail */
323 wr32(hw, hw->aq.arq.head, 0);
324 wr32(hw, hw->aq.arq.tail, 0);
326 /* set starting point */
330 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
331 I40E_PF_ARQLEN_ARQENABLE_MASK));
333 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
334 I40E_PF_ARQLEN_ARQENABLE_MASK));
335 #endif /* INTEGRATED_VF */
336 #endif /* PF_DRIVER */
340 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
341 I40E_VF_ARQLEN1_ARQENABLE_MASK));
343 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
344 I40E_VF_ARQLEN1_ARQENABLE_MASK));
345 #endif /* INTEGRATED_VF */
346 #endif /* VF_DRIVER */
347 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
348 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
350 /* Update tail in the HW to post pre-allocated buffers */
351 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
353 /* Check one register to verify that config was applied */
354 reg = rd32(hw, hw->aq.arq.bal);
355 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
356 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
362 * i40e_init_asq - main initialization routine for ASQ
363 * @hw: pointer to the hardware structure
365 * This is the main initialization routine for the Admin Send Queue
366 * Prior to calling this function, drivers *MUST* set the following fields
367 * in the hw->aq structure:
368 * - hw->aq.num_asq_entries
369 * - hw->aq.arq_buf_size
371 * Do *NOT* hold the lock when calling this as the memory allocation routines
372 * called are not going to be atomic context safe
374 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
376 enum i40e_status_code ret_code = I40E_SUCCESS;
378 if (hw->aq.asq.count > 0) {
379 /* queue already initialized */
380 ret_code = I40E_ERR_NOT_READY;
381 goto init_adminq_exit;
384 /* verify input for valid configuration */
385 if ((hw->aq.num_asq_entries == 0) ||
386 (hw->aq.asq_buf_size == 0)) {
387 ret_code = I40E_ERR_CONFIG;
388 goto init_adminq_exit;
391 hw->aq.asq.next_to_use = 0;
392 hw->aq.asq.next_to_clean = 0;
394 /* allocate the ring memory */
395 ret_code = i40e_alloc_adminq_asq_ring(hw);
396 if (ret_code != I40E_SUCCESS)
397 goto init_adminq_exit;
399 /* allocate buffers in the rings */
400 ret_code = i40e_alloc_asq_bufs(hw);
401 if (ret_code != I40E_SUCCESS)
402 goto init_adminq_free_rings;
404 /* initialize base registers */
405 ret_code = i40e_config_asq_regs(hw);
406 if (ret_code != I40E_SUCCESS)
407 goto init_adminq_free_rings;
410 hw->aq.asq.count = hw->aq.num_asq_entries;
411 goto init_adminq_exit;
413 init_adminq_free_rings:
414 i40e_free_adminq_asq(hw);
421 * i40e_init_arq - initialize ARQ
422 * @hw: pointer to the hardware structure
424 * The main initialization routine for the Admin Receive (Event) Queue.
425 * Prior to calling this function, drivers *MUST* set the following fields
426 * in the hw->aq structure:
427 * - hw->aq.num_asq_entries
428 * - hw->aq.arq_buf_size
430 * Do *NOT* hold the lock when calling this as the memory allocation routines
431 * called are not going to be atomic context safe
433 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
435 enum i40e_status_code ret_code = I40E_SUCCESS;
437 if (hw->aq.arq.count > 0) {
438 /* queue already initialized */
439 ret_code = I40E_ERR_NOT_READY;
440 goto init_adminq_exit;
443 /* verify input for valid configuration */
444 if ((hw->aq.num_arq_entries == 0) ||
445 (hw->aq.arq_buf_size == 0)) {
446 ret_code = I40E_ERR_CONFIG;
447 goto init_adminq_exit;
450 hw->aq.arq.next_to_use = 0;
451 hw->aq.arq.next_to_clean = 0;
453 /* allocate the ring memory */
454 ret_code = i40e_alloc_adminq_arq_ring(hw);
455 if (ret_code != I40E_SUCCESS)
456 goto init_adminq_exit;
458 /* allocate buffers in the rings */
459 ret_code = i40e_alloc_arq_bufs(hw);
460 if (ret_code != I40E_SUCCESS)
461 goto init_adminq_free_rings;
463 /* initialize base registers */
464 ret_code = i40e_config_arq_regs(hw);
465 if (ret_code != I40E_SUCCESS)
466 goto init_adminq_free_rings;
469 hw->aq.arq.count = hw->aq.num_arq_entries;
470 goto init_adminq_exit;
472 init_adminq_free_rings:
473 i40e_free_adminq_arq(hw);
480 * i40e_shutdown_asq - shutdown the ASQ
481 * @hw: pointer to the hardware structure
483 * The main shutdown routine for the Admin Send Queue
485 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
487 enum i40e_status_code ret_code = I40E_SUCCESS;
489 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
491 if (hw->aq.asq.count == 0) {
492 ret_code = I40E_ERR_NOT_READY;
493 goto shutdown_asq_out;
496 /* Stop firmware AdminQ processing */
497 wr32(hw, hw->aq.asq.head, 0);
498 wr32(hw, hw->aq.asq.tail, 0);
499 wr32(hw, hw->aq.asq.len, 0);
500 wr32(hw, hw->aq.asq.bal, 0);
501 wr32(hw, hw->aq.asq.bah, 0);
503 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
505 /* free ring buffers */
506 i40e_free_asq_bufs(hw);
509 i40e_release_spinlock(&hw->aq.asq_spinlock);
514 * i40e_shutdown_arq - shutdown ARQ
515 * @hw: pointer to the hardware structure
517 * The main shutdown routine for the Admin Receive Queue
519 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
521 enum i40e_status_code ret_code = I40E_SUCCESS;
523 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
525 if (hw->aq.arq.count == 0) {
526 ret_code = I40E_ERR_NOT_READY;
527 goto shutdown_arq_out;
530 /* Stop firmware AdminQ processing */
531 wr32(hw, hw->aq.arq.head, 0);
532 wr32(hw, hw->aq.arq.tail, 0);
533 wr32(hw, hw->aq.arq.len, 0);
534 wr32(hw, hw->aq.arq.bal, 0);
535 wr32(hw, hw->aq.arq.bah, 0);
537 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
539 /* free ring buffers */
540 i40e_free_arq_bufs(hw);
543 i40e_release_spinlock(&hw->aq.arq_spinlock);
549 * i40e_resume_aq - resume AQ processing from 0
550 * @hw: pointer to the hardware structure
552 STATIC void i40e_resume_aq(struct i40e_hw *hw)
554 /* Registers are reset after PF reset */
555 hw->aq.asq.next_to_use = 0;
556 hw->aq.asq.next_to_clean = 0;
558 i40e_config_asq_regs(hw);
560 hw->aq.arq.next_to_use = 0;
561 hw->aq.arq.next_to_clean = 0;
563 i40e_config_arq_regs(hw);
565 #endif /* PF_DRIVER */
568 * i40e_init_adminq - main initialization routine for Admin Queue
569 * @hw: pointer to the hardware structure
571 * Prior to calling this function, drivers *MUST* set the following fields
572 * in the hw->aq structure:
573 * - hw->aq.num_asq_entries
574 * - hw->aq.num_arq_entries
575 * - hw->aq.arq_buf_size
576 * - hw->aq.asq_buf_size
578 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
581 u16 cfg_ptr, oem_hi, oem_lo;
582 u16 eetrack_lo, eetrack_hi;
584 enum i40e_status_code ret_code;
589 /* verify input for valid configuration */
590 if ((hw->aq.num_arq_entries == 0) ||
591 (hw->aq.num_asq_entries == 0) ||
592 (hw->aq.arq_buf_size == 0) ||
593 (hw->aq.asq_buf_size == 0)) {
594 ret_code = I40E_ERR_CONFIG;
595 goto init_adminq_exit;
597 i40e_init_spinlock(&hw->aq.asq_spinlock);
598 i40e_init_spinlock(&hw->aq.arq_spinlock);
600 /* Set up register offsets */
601 i40e_adminq_init_regs(hw);
603 /* setup ASQ command write back timeout */
604 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
606 /* allocate the ASQ */
607 ret_code = i40e_init_asq(hw);
608 if (ret_code != I40E_SUCCESS)
609 goto init_adminq_destroy_spinlocks;
611 /* allocate the ARQ */
612 ret_code = i40e_init_arq(hw);
613 if (ret_code != I40E_SUCCESS)
614 goto init_adminq_free_asq;
618 /* VF has no need of firmware */
620 goto init_adminq_exit;
622 /* There are some cases where the firmware may not be quite ready
623 * for AdminQ operations, so we retry the AdminQ setup a few times
624 * if we see timeouts in this first AQ call.
627 ret_code = i40e_aq_get_firmware_version(hw,
634 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
637 i40e_msec_delay(100);
639 } while (retry < 10);
640 if (ret_code != I40E_SUCCESS)
641 goto init_adminq_free_arq;
643 /* get the NVM version info */
644 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
646 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
647 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
648 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
649 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
650 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
652 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
654 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
656 /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
657 if ((hw->aq.api_maj_ver > 1) ||
658 ((hw->aq.api_maj_ver == 1) &&
659 (hw->aq.api_min_ver >= 7)))
660 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
662 if (hw->mac.type == I40E_MAC_XL710 &&
663 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
664 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
665 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
668 /* Newer versions of firmware require lock when reading the NVM */
669 if ((hw->aq.api_maj_ver > 1) ||
670 ((hw->aq.api_maj_ver == 1) &&
671 (hw->aq.api_min_ver >= 5)))
672 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
674 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
675 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
676 goto init_adminq_free_arq;
679 /* pre-emptive resource lock release */
680 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
681 hw->nvm_release_on_done = false;
682 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
684 #endif /* PF_DRIVER */
685 ret_code = I40E_SUCCESS;
688 goto init_adminq_exit;
691 init_adminq_free_arq:
692 i40e_shutdown_arq(hw);
694 init_adminq_free_asq:
695 i40e_shutdown_asq(hw);
696 init_adminq_destroy_spinlocks:
697 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
698 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
705 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
706 * @hw: pointer to the hardware structure
708 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
710 enum i40e_status_code ret_code = I40E_SUCCESS;
712 if (i40e_check_asq_alive(hw))
713 i40e_aq_queue_shutdown(hw, true);
715 i40e_shutdown_asq(hw);
716 i40e_shutdown_arq(hw);
717 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
718 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
721 i40e_free_virt_mem(hw, &hw->nvm_buff);
727 * i40e_clean_asq - cleans Admin send queue
728 * @hw: pointer to the hardware structure
730 * returns the number of free desc
732 u16 i40e_clean_asq(struct i40e_hw *hw)
734 struct i40e_adminq_ring *asq = &(hw->aq.asq);
735 struct i40e_asq_cmd_details *details;
736 u16 ntc = asq->next_to_clean;
737 struct i40e_aq_desc desc_cb;
738 struct i40e_aq_desc *desc;
740 desc = I40E_ADMINQ_DESC(*asq, ntc);
741 details = I40E_ADMINQ_DETAILS(*asq, ntc);
742 while (rd32(hw, hw->aq.asq.head) != ntc) {
743 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
744 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
746 if (details->callback) {
747 I40E_ADMINQ_CALLBACK cb_func =
748 (I40E_ADMINQ_CALLBACK)details->callback;
749 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
751 cb_func(hw, &desc_cb);
753 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
754 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
756 if (ntc == asq->count)
758 desc = I40E_ADMINQ_DESC(*asq, ntc);
759 details = I40E_ADMINQ_DETAILS(*asq, ntc);
762 asq->next_to_clean = ntc;
764 return I40E_DESC_UNUSED(asq);
768 * i40e_asq_done - check if FW has processed the Admin Send Queue
769 * @hw: pointer to the hw struct
771 * Returns true if the firmware has processed all descriptors on the
772 * admin send queue. Returns false if there are still requests pending.
775 bool i40e_asq_done(struct i40e_hw *hw)
777 STATIC bool i40e_asq_done(struct i40e_hw *hw)
780 /* AQ designers suggest use of head for better
781 * timing reliability than DD bit
783 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
788 * i40e_asq_send_command - send command to Admin Queue
789 * @hw: pointer to the hw struct
790 * @desc: prefilled descriptor describing the command (non DMA mem)
791 * @buff: buffer to use for indirect commands
792 * @buff_size: size of buffer for indirect commands
793 * @cmd_details: pointer to command details structure
795 * This is the main send command driver routine for the Admin Queue send
796 * queue. It runs the queue, cleans the queue, etc
798 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
799 struct i40e_aq_desc *desc,
800 void *buff, /* can be NULL */
802 struct i40e_asq_cmd_details *cmd_details)
804 enum i40e_status_code status = I40E_SUCCESS;
805 struct i40e_dma_mem *dma_buff = NULL;
806 struct i40e_asq_cmd_details *details;
807 struct i40e_aq_desc *desc_on_ring;
808 bool cmd_completed = false;
812 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
814 hw->aq.asq_last_status = I40E_AQ_RC_OK;
816 if (hw->aq.asq.count == 0) {
817 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
818 "AQTX: Admin queue not initialized.\n");
819 status = I40E_ERR_QUEUE_EMPTY;
820 goto asq_send_command_error;
823 val = rd32(hw, hw->aq.asq.head);
824 if (val >= hw->aq.num_asq_entries) {
825 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
826 "AQTX: head overrun at %d\n", val);
827 status = I40E_ERR_QUEUE_EMPTY;
828 goto asq_send_command_error;
831 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
835 sizeof(struct i40e_asq_cmd_details),
836 I40E_NONDMA_TO_NONDMA);
838 /* If the cmd_details are defined copy the cookie. The
839 * CPU_TO_LE32 is not needed here because the data is ignored
840 * by the FW, only used by the driver
842 if (details->cookie) {
844 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
846 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
849 i40e_memset(details, 0,
850 sizeof(struct i40e_asq_cmd_details),
854 /* clear requested flags and then set additional flags if defined */
855 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
856 desc->flags |= CPU_TO_LE16(details->flags_ena);
858 if (buff_size > hw->aq.asq_buf_size) {
860 I40E_DEBUG_AQ_MESSAGE,
861 "AQTX: Invalid buffer size: %d.\n",
863 status = I40E_ERR_INVALID_SIZE;
864 goto asq_send_command_error;
867 if (details->postpone && !details->async) {
869 I40E_DEBUG_AQ_MESSAGE,
870 "AQTX: Async flag not set along with postpone flag");
871 status = I40E_ERR_PARAM;
872 goto asq_send_command_error;
875 /* call clean and check queue available function to reclaim the
876 * descriptors that were processed by FW, the function returns the
877 * number of desc available
879 /* the clean function called here could be called in a separate thread
880 * in case of asynchronous completions
882 if (i40e_clean_asq(hw) == 0) {
884 I40E_DEBUG_AQ_MESSAGE,
885 "AQTX: Error queue is full.\n");
886 status = I40E_ERR_ADMIN_QUEUE_FULL;
887 goto asq_send_command_error;
890 /* initialize the temp desc pointer with the right desc */
891 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
893 /* if the desc is available copy the temp desc to the right place */
894 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
897 /* if buff is not NULL assume indirect command */
899 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
900 /* copy the user buff into the respective DMA buff */
901 i40e_memcpy(dma_buff->va, buff, buff_size,
903 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
905 /* Update the address values in the desc with the pa value
906 * for respective buffer
908 desc_on_ring->params.external.addr_high =
909 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
910 desc_on_ring->params.external.addr_low =
911 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
915 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
916 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
918 (hw->aq.asq.next_to_use)++;
919 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
920 hw->aq.asq.next_to_use = 0;
921 if (!details->postpone)
922 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
924 /* if cmd_details are not defined or async flag is not set,
925 * we need to wait for desc write back
927 if (!details->async && !details->postpone) {
931 /* AQ designers suggest use of head for better
932 * timing reliability than DD bit
934 if (i40e_asq_done(hw))
938 } while (total_delay < hw->aq.asq_cmd_timeout);
941 /* if ready, copy the desc back to temp */
942 if (i40e_asq_done(hw)) {
943 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
946 i40e_memcpy(buff, dma_buff->va, buff_size,
948 retval = LE16_TO_CPU(desc->retval);
951 I40E_DEBUG_AQ_MESSAGE,
952 "AQTX: Command completed with error 0x%X.\n",
955 /* strip off FW internal code */
958 cmd_completed = true;
959 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
960 status = I40E_SUCCESS;
962 status = I40E_ERR_ADMIN_QUEUE_ERROR;
963 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
966 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
967 "AQTX: desc and buffer writeback:\n");
968 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
970 /* save writeback aq if requested */
971 if (details->wb_desc)
972 i40e_memcpy(details->wb_desc, desc_on_ring,
973 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
975 /* update the error if time out occurred */
976 if ((!cmd_completed) &&
977 (!details->async && !details->postpone)) {
979 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
981 if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
983 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
984 "AQTX: AQ Critical error.\n");
985 status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
987 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
988 "AQTX: Writeback timeout.\n");
989 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
993 asq_send_command_error:
994 i40e_release_spinlock(&hw->aq.asq_spinlock);
999 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1000 * @desc: pointer to the temp descriptor (non DMA mem)
1001 * @opcode: the opcode can be used to decide which flags to turn off or on
1003 * Fill the desc with default values
1005 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1008 /* zero out the desc */
1009 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1011 desc->opcode = CPU_TO_LE16(opcode);
1012 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1016 * i40e_clean_arq_element
1017 * @hw: pointer to the hw struct
1018 * @e: event info from the receive descriptor, includes any buffers
1019 * @pending: number of events that could be left to process
1021 * This function cleans one Admin Receive Queue element and returns
1022 * the contents through e. It can also return how many events are
1023 * left to process through 'pending'
1025 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1026 struct i40e_arq_event_info *e,
1029 enum i40e_status_code ret_code = I40E_SUCCESS;
1030 u16 ntc = hw->aq.arq.next_to_clean;
1031 struct i40e_aq_desc *desc;
1032 struct i40e_dma_mem *bi;
1038 /* pre-clean the event info */
1039 i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1041 /* take the lock before we start messing with the ring */
1042 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1044 if (hw->aq.arq.count == 0) {
1045 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1046 "AQRX: Admin queue not initialized.\n");
1047 ret_code = I40E_ERR_QUEUE_EMPTY;
1048 goto clean_arq_element_err;
1051 /* set next_to_use to head */
1052 #ifdef INTEGRATED_VF
1053 if (!i40e_is_vf(hw))
1054 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1056 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1059 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1060 #endif /* PF_DRIVER */
1062 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1063 #endif /* VF_DRIVER */
1064 #endif /* INTEGRATED_VF */
1066 /* nothing to do - shouldn't need to update ring's values */
1067 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1068 goto clean_arq_element_out;
1071 /* now clean the next descriptor */
1072 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1075 hw->aq.arq_last_status =
1076 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1077 flags = LE16_TO_CPU(desc->flags);
1078 if (flags & I40E_AQ_FLAG_ERR) {
1079 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1081 I40E_DEBUG_AQ_MESSAGE,
1082 "AQRX: Event received with error 0x%X.\n",
1083 hw->aq.arq_last_status);
1086 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1087 I40E_DMA_TO_NONDMA);
1088 datalen = LE16_TO_CPU(desc->datalen);
1089 e->msg_len = min(datalen, e->buf_len);
1090 if (e->msg_buf != NULL && (e->msg_len != 0))
1091 i40e_memcpy(e->msg_buf,
1092 hw->aq.arq.r.arq_bi[desc_idx].va,
1093 e->msg_len, I40E_DMA_TO_NONDMA);
1095 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1096 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1097 hw->aq.arq_buf_size);
1099 /* Restore the original datalen and buffer address in the desc,
1100 * FW updates datalen to indicate the event message
1103 bi = &hw->aq.arq.r.arq_bi[ntc];
1104 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1106 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1107 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1108 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1109 desc->datalen = CPU_TO_LE16((u16)bi->size);
1110 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1111 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1113 /* set tail = the last cleaned desc index. */
1114 wr32(hw, hw->aq.arq.tail, ntc);
1115 /* ntc is updated to tail + 1 */
1117 if (ntc == hw->aq.num_arq_entries)
1119 hw->aq.arq.next_to_clean = ntc;
1120 hw->aq.arq.next_to_use = ntu;
1123 i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1124 #endif /* PF_DRIVER */
1125 clean_arq_element_out:
1126 /* Set pending if needed, unlock and return */
1127 if (pending != NULL)
1128 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1129 clean_arq_element_err:
1130 i40e_release_spinlock(&hw->aq.arq_spinlock);