96e170e1212b760323a492917f92e3d96971d4e5
[dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2018
3  */
4
5 #include "i40e_status.h"
6 #include "i40e_type.h"
7 #include "i40e_register.h"
8 #include "i40e_adminq.h"
9 #include "i40e_prototype.h"
10
11 /**
12  *  i40e_adminq_init_regs - Initialize AdminQ registers
13  *  @hw: pointer to the hardware structure
14  *
15  *  This assumes the alloc_asq and alloc_arq functions have already been called
16  **/
17 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
18 {
19         /* set head and tail registers in our local struct */
20         if (i40e_is_vf(hw)) {
21                 hw->aq.asq.tail = I40E_VF_ATQT1;
22                 hw->aq.asq.head = I40E_VF_ATQH1;
23                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
24                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
25                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
26                 hw->aq.arq.tail = I40E_VF_ARQT1;
27                 hw->aq.arq.head = I40E_VF_ARQH1;
28                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
29                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
30                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
31 #ifdef PF_DRIVER
32         } else {
33                 hw->aq.asq.tail = I40E_PF_ATQT;
34                 hw->aq.asq.head = I40E_PF_ATQH;
35                 hw->aq.asq.len  = I40E_PF_ATQLEN;
36                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
37                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
38                 hw->aq.arq.tail = I40E_PF_ARQT;
39                 hw->aq.arq.head = I40E_PF_ARQH;
40                 hw->aq.arq.len  = I40E_PF_ARQLEN;
41                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
42                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
43 #endif
44         }
45 }
46
47 /**
48  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
49  *  @hw: pointer to the hardware structure
50  **/
51 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
52 {
53         enum i40e_status_code ret_code;
54
55         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
56                                          i40e_mem_atq_ring,
57                                          (hw->aq.num_asq_entries *
58                                          sizeof(struct i40e_aq_desc)),
59                                          I40E_ADMINQ_DESC_ALIGNMENT);
60         if (ret_code)
61                 return ret_code;
62
63         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
64                                           (hw->aq.num_asq_entries *
65                                           sizeof(struct i40e_asq_cmd_details)));
66         if (ret_code) {
67                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
68                 return ret_code;
69         }
70
71         return ret_code;
72 }
73
74 /**
75  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
76  *  @hw: pointer to the hardware structure
77  **/
78 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
79 {
80         enum i40e_status_code ret_code;
81
82         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
83                                          i40e_mem_arq_ring,
84                                          (hw->aq.num_arq_entries *
85                                          sizeof(struct i40e_aq_desc)),
86                                          I40E_ADMINQ_DESC_ALIGNMENT);
87
88         return ret_code;
89 }
90
91 /**
92  *  i40e_free_adminq_asq - Free Admin Queue send rings
93  *  @hw: pointer to the hardware structure
94  *
95  *  This assumes the posted send buffers have already been cleaned
96  *  and de-allocated
97  **/
98 void i40e_free_adminq_asq(struct i40e_hw *hw)
99 {
100         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
101         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
102 }
103
104 /**
105  *  i40e_free_adminq_arq - Free Admin Queue receive rings
106  *  @hw: pointer to the hardware structure
107  *
108  *  This assumes the posted receive buffers have already been cleaned
109  *  and de-allocated
110  **/
111 void i40e_free_adminq_arq(struct i40e_hw *hw)
112 {
113         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
114 }
115
116 /**
117  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
118  *  @hw: pointer to the hardware structure
119  **/
120 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
121 {
122         enum i40e_status_code ret_code;
123         struct i40e_aq_desc *desc;
124         struct i40e_dma_mem *bi;
125         int i;
126
127         /* We'll be allocating the buffer info memory first, then we can
128          * allocate the mapped buffers for the event processing
129          */
130
131         /* buffer_info structures do not need alignment */
132         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
133                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
134         if (ret_code)
135                 goto alloc_arq_bufs;
136         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
137
138         /* allocate the mapped buffers */
139         for (i = 0; i < hw->aq.num_arq_entries; i++) {
140                 bi = &hw->aq.arq.r.arq_bi[i];
141                 ret_code = i40e_allocate_dma_mem(hw, bi,
142                                                  i40e_mem_arq_buf,
143                                                  hw->aq.arq_buf_size,
144                                                  I40E_ADMINQ_DESC_ALIGNMENT);
145                 if (ret_code)
146                         goto unwind_alloc_arq_bufs;
147
148                 /* now configure the descriptors for use */
149                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
150
151                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
152                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
153                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
154                 desc->opcode = 0;
155                 /* This is in accordance with Admin queue design, there is no
156                  * register for buffer size configuration
157                  */
158                 desc->datalen = CPU_TO_LE16((u16)bi->size);
159                 desc->retval = 0;
160                 desc->cookie_high = 0;
161                 desc->cookie_low = 0;
162                 desc->params.external.addr_high =
163                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
164                 desc->params.external.addr_low =
165                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
166                 desc->params.external.param0 = 0;
167                 desc->params.external.param1 = 0;
168         }
169
170 alloc_arq_bufs:
171         return ret_code;
172
173 unwind_alloc_arq_bufs:
174         /* don't try to free the one that failed... */
175         i--;
176         for (; i >= 0; i--)
177                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
178         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
179
180         return ret_code;
181 }
182
183 /**
184  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
185  *  @hw: pointer to the hardware structure
186  **/
187 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
188 {
189         enum i40e_status_code ret_code;
190         struct i40e_dma_mem *bi;
191         int i;
192
193         /* No mapped memory needed yet, just the buffer info structures */
194         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
195                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
196         if (ret_code)
197                 goto alloc_asq_bufs;
198         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
199
200         /* allocate the mapped buffers */
201         for (i = 0; i < hw->aq.num_asq_entries; i++) {
202                 bi = &hw->aq.asq.r.asq_bi[i];
203                 ret_code = i40e_allocate_dma_mem(hw, bi,
204                                                  i40e_mem_asq_buf,
205                                                  hw->aq.asq_buf_size,
206                                                  I40E_ADMINQ_DESC_ALIGNMENT);
207                 if (ret_code)
208                         goto unwind_alloc_asq_bufs;
209         }
210 alloc_asq_bufs:
211         return ret_code;
212
213 unwind_alloc_asq_bufs:
214         /* don't try to free the one that failed... */
215         i--;
216         for (; i >= 0; i--)
217                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
218         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
219
220         return ret_code;
221 }
222
223 /**
224  *  i40e_free_arq_bufs - Free receive queue buffer info elements
225  *  @hw: pointer to the hardware structure
226  **/
227 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
228 {
229         int i;
230
231         /* free descriptors */
232         for (i = 0; i < hw->aq.num_arq_entries; i++)
233                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
234
235         /* free the descriptor memory */
236         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
237
238         /* free the dma header */
239         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
240 }
241
242 /**
243  *  i40e_free_asq_bufs - Free send queue buffer info elements
244  *  @hw: pointer to the hardware structure
245  **/
246 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
247 {
248         int i;
249
250         /* only unmap if the address is non-NULL */
251         for (i = 0; i < hw->aq.num_asq_entries; i++)
252                 if (hw->aq.asq.r.asq_bi[i].pa)
253                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
254
255         /* free the buffer info list */
256         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
257
258         /* free the descriptor memory */
259         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
260
261         /* free the dma header */
262         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
263 }
264
265 /**
266  *  i40e_config_asq_regs - configure ASQ registers
267  *  @hw: pointer to the hardware structure
268  *
269  *  Configure base address and length registers for the transmit queue
270  **/
271 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
272 {
273         enum i40e_status_code ret_code = I40E_SUCCESS;
274         u32 reg = 0;
275
276         /* Clear Head and Tail */
277         wr32(hw, hw->aq.asq.head, 0);
278         wr32(hw, hw->aq.asq.tail, 0);
279
280         /* set starting point */
281 #ifdef PF_DRIVER
282 #ifdef INTEGRATED_VF
283         if (!i40e_is_vf(hw))
284                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
285                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
286 #else
287         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
288                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
289 #endif /* INTEGRATED_VF */
290 #endif /* PF_DRIVER */
291 #ifdef VF_DRIVER
292 #ifdef INTEGRATED_VF
293         if (i40e_is_vf(hw))
294                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
295                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
296 #else
297         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
298                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
299 #endif /* INTEGRATED_VF */
300 #endif /* VF_DRIVER */
301         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
302         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
303
304         /* Check one register to verify that config was applied */
305         reg = rd32(hw, hw->aq.asq.bal);
306         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
307                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
308
309         return ret_code;
310 }
311
312 /**
313  *  i40e_config_arq_regs - ARQ register configuration
314  *  @hw: pointer to the hardware structure
315  *
316  * Configure base address and length registers for the receive (event queue)
317  **/
318 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
319 {
320         enum i40e_status_code ret_code = I40E_SUCCESS;
321         u32 reg = 0;
322
323         /* Clear Head and Tail */
324         wr32(hw, hw->aq.arq.head, 0);
325         wr32(hw, hw->aq.arq.tail, 0);
326
327         /* set starting point */
328 #ifdef PF_DRIVER
329 #ifdef INTEGRATED_VF
330         if (!i40e_is_vf(hw))
331                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
332                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
333 #else
334         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
335                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
336 #endif /* INTEGRATED_VF */
337 #endif /* PF_DRIVER */
338 #ifdef VF_DRIVER
339 #ifdef INTEGRATED_VF
340         if (i40e_is_vf(hw))
341                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
342                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
343 #else
344         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
345                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
346 #endif /* INTEGRATED_VF */
347 #endif /* VF_DRIVER */
348         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
349         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
350
351         /* Update tail in the HW to post pre-allocated buffers */
352         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
353
354         /* Check one register to verify that config was applied */
355         reg = rd32(hw, hw->aq.arq.bal);
356         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
357                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
358
359         return ret_code;
360 }
361
362 /**
363  *  i40e_init_asq - main initialization routine for ASQ
364  *  @hw: pointer to the hardware structure
365  *
366  *  This is the main initialization routine for the Admin Send Queue
367  *  Prior to calling this function, drivers *MUST* set the following fields
368  *  in the hw->aq structure:
369  *     - hw->aq.num_asq_entries
370  *     - hw->aq.arq_buf_size
371  *
372  *  Do *NOT* hold the lock when calling this as the memory allocation routines
373  *  called are not going to be atomic context safe
374  **/
375 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
376 {
377         enum i40e_status_code ret_code = I40E_SUCCESS;
378
379         if (hw->aq.asq.count > 0) {
380                 /* queue already initialized */
381                 ret_code = I40E_ERR_NOT_READY;
382                 goto init_adminq_exit;
383         }
384
385         /* verify input for valid configuration */
386         if ((hw->aq.num_asq_entries == 0) ||
387             (hw->aq.asq_buf_size == 0)) {
388                 ret_code = I40E_ERR_CONFIG;
389                 goto init_adminq_exit;
390         }
391
392         hw->aq.asq.next_to_use = 0;
393         hw->aq.asq.next_to_clean = 0;
394
395         /* allocate the ring memory */
396         ret_code = i40e_alloc_adminq_asq_ring(hw);
397         if (ret_code != I40E_SUCCESS)
398                 goto init_adminq_exit;
399
400         /* allocate buffers in the rings */
401         ret_code = i40e_alloc_asq_bufs(hw);
402         if (ret_code != I40E_SUCCESS)
403                 goto init_adminq_free_rings;
404
405         /* initialize base registers */
406         ret_code = i40e_config_asq_regs(hw);
407         if (ret_code != I40E_SUCCESS)
408                 goto init_config_regs;
409
410         /* success! */
411         hw->aq.asq.count = hw->aq.num_asq_entries;
412         goto init_adminq_exit;
413
414 init_adminq_free_rings:
415         i40e_free_adminq_asq(hw);
416         return ret_code;
417
418 init_config_regs:
419         i40e_free_asq_bufs(hw);
420
421 init_adminq_exit:
422         return ret_code;
423 }
424
425 /**
426  *  i40e_init_arq - initialize ARQ
427  *  @hw: pointer to the hardware structure
428  *
429  *  The main initialization routine for the Admin Receive (Event) Queue.
430  *  Prior to calling this function, drivers *MUST* set the following fields
431  *  in the hw->aq structure:
432  *     - hw->aq.num_asq_entries
433  *     - hw->aq.arq_buf_size
434  *
435  *  Do *NOT* hold the lock when calling this as the memory allocation routines
436  *  called are not going to be atomic context safe
437  **/
438 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
439 {
440         enum i40e_status_code ret_code = I40E_SUCCESS;
441
442         if (hw->aq.arq.count > 0) {
443                 /* queue already initialized */
444                 ret_code = I40E_ERR_NOT_READY;
445                 goto init_adminq_exit;
446         }
447
448         /* verify input for valid configuration */
449         if ((hw->aq.num_arq_entries == 0) ||
450             (hw->aq.arq_buf_size == 0)) {
451                 ret_code = I40E_ERR_CONFIG;
452                 goto init_adminq_exit;
453         }
454
455         hw->aq.arq.next_to_use = 0;
456         hw->aq.arq.next_to_clean = 0;
457
458         /* allocate the ring memory */
459         ret_code = i40e_alloc_adminq_arq_ring(hw);
460         if (ret_code != I40E_SUCCESS)
461                 goto init_adminq_exit;
462
463         /* allocate buffers in the rings */
464         ret_code = i40e_alloc_arq_bufs(hw);
465         if (ret_code != I40E_SUCCESS)
466                 goto init_adminq_free_rings;
467
468         /* initialize base registers */
469         ret_code = i40e_config_arq_regs(hw);
470         if (ret_code != I40E_SUCCESS)
471                 goto init_adminq_free_rings;
472
473         /* success! */
474         hw->aq.arq.count = hw->aq.num_arq_entries;
475         goto init_adminq_exit;
476
477 init_adminq_free_rings:
478         i40e_free_adminq_arq(hw);
479
480 init_adminq_exit:
481         return ret_code;
482 }
483
484 /**
485  *  i40e_shutdown_asq - shutdown the ASQ
486  *  @hw: pointer to the hardware structure
487  *
488  *  The main shutdown routine for the Admin Send Queue
489  **/
490 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
491 {
492         enum i40e_status_code ret_code = I40E_SUCCESS;
493
494         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
495
496         if (hw->aq.asq.count == 0) {
497                 ret_code = I40E_ERR_NOT_READY;
498                 goto shutdown_asq_out;
499         }
500
501         /* Stop firmware AdminQ processing */
502         wr32(hw, hw->aq.asq.head, 0);
503         wr32(hw, hw->aq.asq.tail, 0);
504         wr32(hw, hw->aq.asq.len, 0);
505         wr32(hw, hw->aq.asq.bal, 0);
506         wr32(hw, hw->aq.asq.bah, 0);
507
508         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
509
510         /* free ring buffers */
511         i40e_free_asq_bufs(hw);
512
513 shutdown_asq_out:
514         i40e_release_spinlock(&hw->aq.asq_spinlock);
515         return ret_code;
516 }
517
518 /**
519  *  i40e_shutdown_arq - shutdown ARQ
520  *  @hw: pointer to the hardware structure
521  *
522  *  The main shutdown routine for the Admin Receive Queue
523  **/
524 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
525 {
526         enum i40e_status_code ret_code = I40E_SUCCESS;
527
528         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
529
530         if (hw->aq.arq.count == 0) {
531                 ret_code = I40E_ERR_NOT_READY;
532                 goto shutdown_arq_out;
533         }
534
535         /* Stop firmware AdminQ processing */
536         wr32(hw, hw->aq.arq.head, 0);
537         wr32(hw, hw->aq.arq.tail, 0);
538         wr32(hw, hw->aq.arq.len, 0);
539         wr32(hw, hw->aq.arq.bal, 0);
540         wr32(hw, hw->aq.arq.bah, 0);
541
542         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
543
544         /* free ring buffers */
545         i40e_free_arq_bufs(hw);
546
547 shutdown_arq_out:
548         i40e_release_spinlock(&hw->aq.arq_spinlock);
549         return ret_code;
550 }
551 #ifdef PF_DRIVER
552
553 /**
554  *  i40e_resume_aq - resume AQ processing from 0
555  *  @hw: pointer to the hardware structure
556  **/
557 STATIC void i40e_resume_aq(struct i40e_hw *hw)
558 {
559         /* Registers are reset after PF reset */
560         hw->aq.asq.next_to_use = 0;
561         hw->aq.asq.next_to_clean = 0;
562
563         i40e_config_asq_regs(hw);
564
565         hw->aq.arq.next_to_use = 0;
566         hw->aq.arq.next_to_clean = 0;
567
568         i40e_config_arq_regs(hw);
569 }
570 #endif /* PF_DRIVER */
571
572 /**
573  *  i40e_set_hw_flags - set HW flags
574  *  @hw: pointer to the hardware structure
575  **/
576 STATIC void i40e_set_hw_flags(struct i40e_hw *hw)
577 {
578         struct i40e_adminq_info *aq = &hw->aq;
579
580         hw->flags = 0;
581
582         switch (hw->mac.type) {
583         case I40E_MAC_XL710:
584                 if (aq->api_maj_ver > 1 ||
585                     (aq->api_maj_ver == 1 &&
586                      aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
587                         hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
588                         hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
589                         /* The ability to RX (not drop) 802.1ad frames */
590                         hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
591                 }
592                 break;
593         case I40E_MAC_X722:
594                 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
595                              I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
596
597                 if (aq->api_maj_ver > 1 ||
598                     (aq->api_maj_ver == 1 &&
599                      aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
600                         hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
601                 /* fall through */
602         default:
603                 break;
604         }
605
606         /* Newer versions of firmware require lock when reading the NVM */
607         if (aq->api_maj_ver > 1 ||
608             (aq->api_maj_ver == 1 &&
609              aq->api_min_ver >= 5))
610                 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
611
612         if (aq->api_maj_ver > 1 ||
613             (aq->api_maj_ver == 1 &&
614              aq->api_min_ver >= 8))
615                 hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
616
617         if (aq->api_maj_ver > 1 ||
618             (aq->api_maj_ver == 1 &&
619              aq->api_min_ver >= 9))
620                 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
621 }
622
623 /**
624  *  i40e_init_adminq - main initialization routine for Admin Queue
625  *  @hw: pointer to the hardware structure
626  *
627  *  Prior to calling this function, drivers *MUST* set the following fields
628  *  in the hw->aq structure:
629  *     - hw->aq.num_asq_entries
630  *     - hw->aq.num_arq_entries
631  *     - hw->aq.arq_buf_size
632  *     - hw->aq.asq_buf_size
633  **/
634 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
635 {
636         struct i40e_adminq_info *aq = &hw->aq;
637         enum i40e_status_code ret_code;
638         u16 cfg_ptr, oem_hi, oem_lo;
639         u16 eetrack_lo, eetrack_hi;
640         int retry = 0;
641
642         /* verify input for valid configuration */
643         if (aq->num_arq_entries == 0 ||
644             aq->num_asq_entries == 0 ||
645             aq->arq_buf_size == 0 ||
646             aq->asq_buf_size == 0) {
647                 ret_code = I40E_ERR_CONFIG;
648                 goto init_adminq_exit;
649         }
650         i40e_init_spinlock(&aq->asq_spinlock);
651         i40e_init_spinlock(&aq->arq_spinlock);
652
653         /* Set up register offsets */
654         i40e_adminq_init_regs(hw);
655
656         /* setup ASQ command write back timeout */
657         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
658
659         /* allocate the ASQ */
660         ret_code = i40e_init_asq(hw);
661         if (ret_code != I40E_SUCCESS)
662                 goto init_adminq_destroy_spinlocks;
663
664         /* allocate the ARQ */
665         ret_code = i40e_init_arq(hw);
666         if (ret_code != I40E_SUCCESS)
667                 goto init_adminq_free_asq;
668
669         /* There are some cases where the firmware may not be quite ready
670          * for AdminQ operations, so we retry the AdminQ setup a few times
671          * if we see timeouts in this first AQ call.
672          */
673         do {
674                 ret_code = i40e_aq_get_firmware_version(hw,
675                                                         &aq->fw_maj_ver,
676                                                         &aq->fw_min_ver,
677                                                         &aq->fw_build,
678                                                         &aq->api_maj_ver,
679                                                         &aq->api_min_ver,
680                                                         NULL);
681                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
682                         break;
683                 retry++;
684                 i40e_msec_delay(100);
685                 i40e_resume_aq(hw);
686         } while (retry < 10);
687         if (ret_code != I40E_SUCCESS)
688                 goto init_adminq_free_arq;
689
690         /*
691          * Some features were introduced in different FW API version
692          * for different MAC type.
693          */
694         i40e_set_hw_flags(hw);
695
696         /* get the NVM version info */
697         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
698                            &hw->nvm.version);
699         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
700         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
701         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
702         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
703         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
704                            &oem_hi);
705         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
706                            &oem_lo);
707         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
708
709         if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
710                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
711                 goto init_adminq_free_arq;
712         }
713
714         /* pre-emptive resource lock release */
715         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
716         hw->nvm_release_on_done = false;
717         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
718
719         ret_code = I40E_SUCCESS;
720
721         /* success! */
722         goto init_adminq_exit;
723
724 init_adminq_free_arq:
725         i40e_shutdown_arq(hw);
726 init_adminq_free_asq:
727         i40e_shutdown_asq(hw);
728 init_adminq_destroy_spinlocks:
729         i40e_destroy_spinlock(&aq->asq_spinlock);
730         i40e_destroy_spinlock(&aq->arq_spinlock);
731
732 init_adminq_exit:
733         return ret_code;
734 }
735
736 /**
737  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
738  *  @hw: pointer to the hardware structure
739  **/
740 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
741 {
742         enum i40e_status_code ret_code = I40E_SUCCESS;
743
744         if (i40e_check_asq_alive(hw))
745                 i40e_aq_queue_shutdown(hw, true);
746
747         i40e_shutdown_asq(hw);
748         i40e_shutdown_arq(hw);
749         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
750         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
751
752         if (hw->nvm_buff.va)
753                 i40e_free_virt_mem(hw, &hw->nvm_buff);
754
755         return ret_code;
756 }
757
758 /**
759  *  i40e_clean_asq - cleans Admin send queue
760  *  @hw: pointer to the hardware structure
761  *
762  *  returns the number of free desc
763  **/
764 u16 i40e_clean_asq(struct i40e_hw *hw)
765 {
766         struct i40e_adminq_ring *asq = &(hw->aq.asq);
767         struct i40e_asq_cmd_details *details;
768         u16 ntc = asq->next_to_clean;
769         struct i40e_aq_desc desc_cb;
770         struct i40e_aq_desc *desc;
771
772         desc = I40E_ADMINQ_DESC(*asq, ntc);
773         details = I40E_ADMINQ_DETAILS(*asq, ntc);
774         while (rd32(hw, hw->aq.asq.head) != ntc) {
775                 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
776                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
777
778                 if (details->callback) {
779                         I40E_ADMINQ_CALLBACK cb_func =
780                                         (I40E_ADMINQ_CALLBACK)details->callback;
781                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
782                                     I40E_DMA_TO_DMA);
783                         cb_func(hw, &desc_cb);
784                 }
785                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
786                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
787                 ntc++;
788                 if (ntc == asq->count)
789                         ntc = 0;
790                 desc = I40E_ADMINQ_DESC(*asq, ntc);
791                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
792         }
793
794         asq->next_to_clean = ntc;
795
796         return I40E_DESC_UNUSED(asq);
797 }
798
799 /**
800  *  i40e_asq_done - check if FW has processed the Admin Send Queue
801  *  @hw: pointer to the hw struct
802  *
803  *  Returns true if the firmware has processed all descriptors on the
804  *  admin send queue. Returns false if there are still requests pending.
805  **/
806 #ifdef VF_DRIVER
807 bool i40e_asq_done(struct i40e_hw *hw)
808 #else
809 STATIC bool i40e_asq_done(struct i40e_hw *hw)
810 #endif
811 {
812         /* AQ designers suggest use of head for better
813          * timing reliability than DD bit
814          */
815         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
816
817 }
818
819 /**
820  *  i40e_asq_send_command - send command to Admin Queue
821  *  @hw: pointer to the hw struct
822  *  @desc: prefilled descriptor describing the command (non DMA mem)
823  *  @buff: buffer to use for indirect commands
824  *  @buff_size: size of buffer for indirect commands
825  *  @cmd_details: pointer to command details structure
826  *
827  *  This is the main send command driver routine for the Admin Queue send
828  *  queue.  It runs the queue, cleans the queue, etc
829  **/
830 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
831                                 struct i40e_aq_desc *desc,
832                                 void *buff, /* can be NULL */
833                                 u16  buff_size,
834                                 struct i40e_asq_cmd_details *cmd_details)
835 {
836         enum i40e_status_code status = I40E_SUCCESS;
837         struct i40e_dma_mem *dma_buff = NULL;
838         struct i40e_asq_cmd_details *details;
839         struct i40e_aq_desc *desc_on_ring;
840         bool cmd_completed = false;
841         u16  retval = 0;
842         u32  val = 0;
843
844         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
845
846         hw->aq.asq_last_status = I40E_AQ_RC_OK;
847
848         if (hw->aq.asq.count == 0) {
849                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
850                            "AQTX: Admin queue not initialized.\n");
851                 status = I40E_ERR_QUEUE_EMPTY;
852                 goto asq_send_command_error;
853         }
854
855         val = rd32(hw, hw->aq.asq.head);
856         if (val >= hw->aq.num_asq_entries) {
857                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
858                            "AQTX: head overrun at %d\n", val);
859                 status = I40E_ERR_ADMIN_QUEUE_FULL;
860                 goto asq_send_command_error;
861         }
862
863         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
864         if (cmd_details) {
865                 i40e_memcpy(details,
866                             cmd_details,
867                             sizeof(struct i40e_asq_cmd_details),
868                             I40E_NONDMA_TO_NONDMA);
869
870                 /* If the cmd_details are defined copy the cookie.  The
871                  * CPU_TO_LE32 is not needed here because the data is ignored
872                  * by the FW, only used by the driver
873                  */
874                 if (details->cookie) {
875                         desc->cookie_high =
876                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
877                         desc->cookie_low =
878                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
879                 }
880         } else {
881                 i40e_memset(details, 0,
882                             sizeof(struct i40e_asq_cmd_details),
883                             I40E_NONDMA_MEM);
884         }
885
886         /* clear requested flags and then set additional flags if defined */
887         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
888         desc->flags |= CPU_TO_LE16(details->flags_ena);
889
890         if (buff_size > hw->aq.asq_buf_size) {
891                 i40e_debug(hw,
892                            I40E_DEBUG_AQ_MESSAGE,
893                            "AQTX: Invalid buffer size: %d.\n",
894                            buff_size);
895                 status = I40E_ERR_INVALID_SIZE;
896                 goto asq_send_command_error;
897         }
898
899         if (details->postpone && !details->async) {
900                 i40e_debug(hw,
901                            I40E_DEBUG_AQ_MESSAGE,
902                            "AQTX: Async flag not set along with postpone flag");
903                 status = I40E_ERR_PARAM;
904                 goto asq_send_command_error;
905         }
906
907         /* call clean and check queue available function to reclaim the
908          * descriptors that were processed by FW, the function returns the
909          * number of desc available
910          */
911         /* the clean function called here could be called in a separate thread
912          * in case of asynchronous completions
913          */
914         if (i40e_clean_asq(hw) == 0) {
915                 i40e_debug(hw,
916                            I40E_DEBUG_AQ_MESSAGE,
917                            "AQTX: Error queue is full.\n");
918                 status = I40E_ERR_ADMIN_QUEUE_FULL;
919                 goto asq_send_command_error;
920         }
921
922         /* initialize the temp desc pointer with the right desc */
923         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
924
925         /* if the desc is available copy the temp desc to the right place */
926         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
927                     I40E_NONDMA_TO_DMA);
928
929         /* if buff is not NULL assume indirect command */
930         if (buff != NULL) {
931                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
932                 /* copy the user buff into the respective DMA buff */
933                 i40e_memcpy(dma_buff->va, buff, buff_size,
934                             I40E_NONDMA_TO_DMA);
935                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
936
937                 /* Update the address values in the desc with the pa value
938                  * for respective buffer
939                  */
940                 desc_on_ring->params.external.addr_high =
941                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
942                 desc_on_ring->params.external.addr_low =
943                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
944         }
945
946         /* bump the tail */
947         i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
948         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
949                       buff, buff_size);
950         (hw->aq.asq.next_to_use)++;
951         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
952                 hw->aq.asq.next_to_use = 0;
953         if (!details->postpone)
954                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
955
956         /* if cmd_details are not defined or async flag is not set,
957          * we need to wait for desc write back
958          */
959         if (!details->async && !details->postpone) {
960                 u32 total_delay = 0;
961
962                 do {
963                         /* AQ designers suggest use of head for better
964                          * timing reliability than DD bit
965                          */
966                         if (i40e_asq_done(hw))
967                                 break;
968                         i40e_usec_delay(50);
969                         total_delay += 50;
970                 } while (total_delay < hw->aq.asq_cmd_timeout);
971         }
972
973         /* if ready, copy the desc back to temp */
974         if (i40e_asq_done(hw)) {
975                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
976                             I40E_DMA_TO_NONDMA);
977                 if (buff != NULL)
978                         i40e_memcpy(buff, dma_buff->va, buff_size,
979                                     I40E_DMA_TO_NONDMA);
980                 retval = LE16_TO_CPU(desc->retval);
981                 if (retval != 0) {
982                         i40e_debug(hw,
983                                    I40E_DEBUG_AQ_MESSAGE,
984                                    "AQTX: Command completed with error 0x%X.\n",
985                                    retval);
986
987                         /* strip off FW internal code */
988                         retval &= 0xff;
989                 }
990                 cmd_completed = true;
991                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
992                         status = I40E_SUCCESS;
993                 else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
994                         status = I40E_ERR_NOT_READY;
995                 else
996                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
997                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
998         }
999
1000         i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
1001                    "AQTX: desc and buffer writeback:\n");
1002         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
1003
1004         /* save writeback aq if requested */
1005         if (details->wb_desc)
1006                 i40e_memcpy(details->wb_desc, desc_on_ring,
1007                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
1008
1009         /* update the error if time out occurred */
1010         if ((!cmd_completed) &&
1011             (!details->async && !details->postpone)) {
1012 #ifdef PF_DRIVER
1013                 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
1014 #else
1015                 if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1016 #endif
1017                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1018                                    "AQTX: AQ Critical error.\n");
1019                         status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
1020                 } else {
1021                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1022                                    "AQTX: Writeback timeout.\n");
1023                         status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1024                 }
1025         }
1026
1027 asq_send_command_error:
1028         i40e_release_spinlock(&hw->aq.asq_spinlock);
1029         return status;
1030 }
1031
1032 /**
1033  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1034  *  @desc:     pointer to the temp descriptor (non DMA mem)
1035  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1036  *
1037  *  Fill the desc with default values
1038  **/
1039 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1040                                        u16 opcode)
1041 {
1042         /* zero out the desc */
1043         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1044                     I40E_NONDMA_MEM);
1045         desc->opcode = CPU_TO_LE16(opcode);
1046         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1047 }
1048
1049 /**
1050  *  i40e_clean_arq_element
1051  *  @hw: pointer to the hw struct
1052  *  @e: event info from the receive descriptor, includes any buffers
1053  *  @pending: number of events that could be left to process
1054  *
1055  *  This function cleans one Admin Receive Queue element and returns
1056  *  the contents through e.  It can also return how many events are
1057  *  left to process through 'pending'
1058  **/
1059 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1060                                              struct i40e_arq_event_info *e,
1061                                              u16 *pending)
1062 {
1063         enum i40e_status_code ret_code = I40E_SUCCESS;
1064         u16 ntc = hw->aq.arq.next_to_clean;
1065         struct i40e_aq_desc *desc;
1066         struct i40e_dma_mem *bi;
1067         u16 desc_idx;
1068         u16 datalen;
1069         u16 flags;
1070         u16 ntu;
1071
1072         /* pre-clean the event info */
1073         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1074
1075         /* take the lock before we start messing with the ring */
1076         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1077
1078         if (hw->aq.arq.count == 0) {
1079                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1080                            "AQRX: Admin queue not initialized.\n");
1081                 ret_code = I40E_ERR_QUEUE_EMPTY;
1082                 goto clean_arq_element_err;
1083         }
1084
1085         /* set next_to_use to head */
1086 #ifdef INTEGRATED_VF
1087         if (!i40e_is_vf(hw))
1088                 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1089         else
1090                 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1091 #else
1092 #ifdef PF_DRIVER
1093         ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1094 #endif /* PF_DRIVER */
1095 #ifdef VF_DRIVER
1096         ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1097 #endif /* VF_DRIVER */
1098 #endif /* INTEGRATED_VF */
1099         if (ntu == ntc) {
1100                 /* nothing to do - shouldn't need to update ring's values */
1101                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1102                 goto clean_arq_element_out;
1103         }
1104
1105         /* now clean the next descriptor */
1106         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1107         desc_idx = ntc;
1108
1109         hw->aq.arq_last_status =
1110                 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1111         flags = LE16_TO_CPU(desc->flags);
1112         if (flags & I40E_AQ_FLAG_ERR) {
1113                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1114                 i40e_debug(hw,
1115                            I40E_DEBUG_AQ_MESSAGE,
1116                            "AQRX: Event received with error 0x%X.\n",
1117                            hw->aq.arq_last_status);
1118         }
1119
1120         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1121                     I40E_DMA_TO_NONDMA);
1122         datalen = LE16_TO_CPU(desc->datalen);
1123         e->msg_len = min(datalen, e->buf_len);
1124         if (e->msg_buf != NULL && (e->msg_len != 0))
1125                 i40e_memcpy(e->msg_buf,
1126                             hw->aq.arq.r.arq_bi[desc_idx].va,
1127                             e->msg_len, I40E_DMA_TO_NONDMA);
1128
1129         i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1130         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1131                       hw->aq.arq_buf_size);
1132
1133         /* Restore the original datalen and buffer address in the desc,
1134          * FW updates datalen to indicate the event message
1135          * size
1136          */
1137         bi = &hw->aq.arq.r.arq_bi[ntc];
1138         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1139
1140         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1141         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1142                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1143         desc->datalen = CPU_TO_LE16((u16)bi->size);
1144         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1145         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1146
1147         /* set tail = the last cleaned desc index. */
1148         wr32(hw, hw->aq.arq.tail, ntc);
1149         /* ntc is updated to tail + 1 */
1150         ntc++;
1151         if (ntc == hw->aq.num_arq_entries)
1152                 ntc = 0;
1153         hw->aq.arq.next_to_clean = ntc;
1154         hw->aq.arq.next_to_use = ntu;
1155
1156 #ifdef PF_DRIVER
1157         i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1158 #endif /* PF_DRIVER */
1159 clean_arq_element_out:
1160         /* Set pending if needed, unlock and return */
1161         if (pending != NULL)
1162                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1163 clean_arq_element_err:
1164         i40e_release_spinlock(&hw->aq.arq_spinlock);
1165
1166         return ret_code;
1167 }
1168