update Cavium Inc copyright headers
[dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
39
40 /**
41  *  i40e_adminq_init_regs - Initialize AdminQ registers
42  *  @hw: pointer to the hardware structure
43  *
44  *  This assumes the alloc_asq and alloc_arq functions have already been called
45  **/
46 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
47 {
48         /* set head and tail registers in our local struct */
49         if (i40e_is_vf(hw)) {
50                 hw->aq.asq.tail = I40E_VF_ATQT1;
51                 hw->aq.asq.head = I40E_VF_ATQH1;
52                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
53                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
54                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
55                 hw->aq.arq.tail = I40E_VF_ARQT1;
56                 hw->aq.arq.head = I40E_VF_ARQH1;
57                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
58                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
59                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
60 #ifdef PF_DRIVER
61         } else {
62                 hw->aq.asq.tail = I40E_PF_ATQT;
63                 hw->aq.asq.head = I40E_PF_ATQH;
64                 hw->aq.asq.len  = I40E_PF_ATQLEN;
65                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
66                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
67                 hw->aq.arq.tail = I40E_PF_ARQT;
68                 hw->aq.arq.head = I40E_PF_ARQH;
69                 hw->aq.arq.len  = I40E_PF_ARQLEN;
70                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
71                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
72 #endif
73         }
74 }
75
76 /**
77  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
78  *  @hw: pointer to the hardware structure
79  **/
80 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
81 {
82         enum i40e_status_code ret_code;
83
84         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
85                                          i40e_mem_atq_ring,
86                                          (hw->aq.num_asq_entries *
87                                          sizeof(struct i40e_aq_desc)),
88                                          I40E_ADMINQ_DESC_ALIGNMENT);
89         if (ret_code)
90                 return ret_code;
91
92         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
93                                           (hw->aq.num_asq_entries *
94                                           sizeof(struct i40e_asq_cmd_details)));
95         if (ret_code) {
96                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
97                 return ret_code;
98         }
99
100         return ret_code;
101 }
102
103 /**
104  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
105  *  @hw: pointer to the hardware structure
106  **/
107 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
108 {
109         enum i40e_status_code ret_code;
110
111         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
112                                          i40e_mem_arq_ring,
113                                          (hw->aq.num_arq_entries *
114                                          sizeof(struct i40e_aq_desc)),
115                                          I40E_ADMINQ_DESC_ALIGNMENT);
116
117         return ret_code;
118 }
119
120 /**
121  *  i40e_free_adminq_asq - Free Admin Queue send rings
122  *  @hw: pointer to the hardware structure
123  *
124  *  This assumes the posted send buffers have already been cleaned
125  *  and de-allocated
126  **/
127 void i40e_free_adminq_asq(struct i40e_hw *hw)
128 {
129         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
130 }
131
132 /**
133  *  i40e_free_adminq_arq - Free Admin Queue receive rings
134  *  @hw: pointer to the hardware structure
135  *
136  *  This assumes the posted receive buffers have already been cleaned
137  *  and de-allocated
138  **/
139 void i40e_free_adminq_arq(struct i40e_hw *hw)
140 {
141         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
142 }
143
144 /**
145  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
146  *  @hw: pointer to the hardware structure
147  **/
148 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
149 {
150         enum i40e_status_code ret_code;
151         struct i40e_aq_desc *desc;
152         struct i40e_dma_mem *bi;
153         int i;
154
155         /* We'll be allocating the buffer info memory first, then we can
156          * allocate the mapped buffers for the event processing
157          */
158
159         /* buffer_info structures do not need alignment */
160         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
161                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
162         if (ret_code)
163                 goto alloc_arq_bufs;
164         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
165
166         /* allocate the mapped buffers */
167         for (i = 0; i < hw->aq.num_arq_entries; i++) {
168                 bi = &hw->aq.arq.r.arq_bi[i];
169                 ret_code = i40e_allocate_dma_mem(hw, bi,
170                                                  i40e_mem_arq_buf,
171                                                  hw->aq.arq_buf_size,
172                                                  I40E_ADMINQ_DESC_ALIGNMENT);
173                 if (ret_code)
174                         goto unwind_alloc_arq_bufs;
175
176                 /* now configure the descriptors for use */
177                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
178
179                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
180                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
181                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
182                 desc->opcode = 0;
183                 /* This is in accordance with Admin queue design, there is no
184                  * register for buffer size configuration
185                  */
186                 desc->datalen = CPU_TO_LE16((u16)bi->size);
187                 desc->retval = 0;
188                 desc->cookie_high = 0;
189                 desc->cookie_low = 0;
190                 desc->params.external.addr_high =
191                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
192                 desc->params.external.addr_low =
193                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
194                 desc->params.external.param0 = 0;
195                 desc->params.external.param1 = 0;
196         }
197
198 alloc_arq_bufs:
199         return ret_code;
200
201 unwind_alloc_arq_bufs:
202         /* don't try to free the one that failed... */
203         i--;
204         for (; i >= 0; i--)
205                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
206         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
207
208         return ret_code;
209 }
210
211 /**
212  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
213  *  @hw: pointer to the hardware structure
214  **/
215 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
216 {
217         enum i40e_status_code ret_code;
218         struct i40e_dma_mem *bi;
219         int i;
220
221         /* No mapped memory needed yet, just the buffer info structures */
222         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
223                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
224         if (ret_code)
225                 goto alloc_asq_bufs;
226         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
227
228         /* allocate the mapped buffers */
229         for (i = 0; i < hw->aq.num_asq_entries; i++) {
230                 bi = &hw->aq.asq.r.asq_bi[i];
231                 ret_code = i40e_allocate_dma_mem(hw, bi,
232                                                  i40e_mem_asq_buf,
233                                                  hw->aq.asq_buf_size,
234                                                  I40E_ADMINQ_DESC_ALIGNMENT);
235                 if (ret_code)
236                         goto unwind_alloc_asq_bufs;
237         }
238 alloc_asq_bufs:
239         return ret_code;
240
241 unwind_alloc_asq_bufs:
242         /* don't try to free the one that failed... */
243         i--;
244         for (; i >= 0; i--)
245                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
246         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
247
248         return ret_code;
249 }
250
251 /**
252  *  i40e_free_arq_bufs - Free receive queue buffer info elements
253  *  @hw: pointer to the hardware structure
254  **/
255 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
256 {
257         int i;
258
259         /* free descriptors */
260         for (i = 0; i < hw->aq.num_arq_entries; i++)
261                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
262
263         /* free the descriptor memory */
264         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
265
266         /* free the dma header */
267         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
268 }
269
270 /**
271  *  i40e_free_asq_bufs - Free send queue buffer info elements
272  *  @hw: pointer to the hardware structure
273  **/
274 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
275 {
276         int i;
277
278         /* only unmap if the address is non-NULL */
279         for (i = 0; i < hw->aq.num_asq_entries; i++)
280                 if (hw->aq.asq.r.asq_bi[i].pa)
281                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
282
283         /* free the buffer info list */
284         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
285
286         /* free the descriptor memory */
287         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
288
289         /* free the dma header */
290         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
291 }
292
293 /**
294  *  i40e_config_asq_regs - configure ASQ registers
295  *  @hw: pointer to the hardware structure
296  *
297  *  Configure base address and length registers for the transmit queue
298  **/
299 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
300 {
301         enum i40e_status_code ret_code = I40E_SUCCESS;
302         u32 reg = 0;
303
304         /* Clear Head and Tail */
305         wr32(hw, hw->aq.asq.head, 0);
306         wr32(hw, hw->aq.asq.tail, 0);
307
308         /* set starting point */
309 #ifdef PF_DRIVER
310 #ifdef INTEGRATED_VF
311         if (!i40e_is_vf(hw))
312                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
313                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
314 #else
315         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
316                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
317 #endif /* INTEGRATED_VF */
318 #endif /* PF_DRIVER */
319 #ifdef VF_DRIVER
320 #ifdef INTEGRATED_VF
321         if (i40e_is_vf(hw))
322                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
323                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
324 #else
325         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
326                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
327 #endif /* INTEGRATED_VF */
328 #endif /* VF_DRIVER */
329         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
330         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
331
332         /* Check one register to verify that config was applied */
333         reg = rd32(hw, hw->aq.asq.bal);
334         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
335                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
336
337         return ret_code;
338 }
339
340 /**
341  *  i40e_config_arq_regs - ARQ register configuration
342  *  @hw: pointer to the hardware structure
343  *
344  * Configure base address and length registers for the receive (event queue)
345  **/
346 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
347 {
348         enum i40e_status_code ret_code = I40E_SUCCESS;
349         u32 reg = 0;
350
351         /* Clear Head and Tail */
352         wr32(hw, hw->aq.arq.head, 0);
353         wr32(hw, hw->aq.arq.tail, 0);
354
355         /* set starting point */
356 #ifdef PF_DRIVER
357 #ifdef INTEGRATED_VF
358         if (!i40e_is_vf(hw))
359                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
360                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
361 #else
362         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
363                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
364 #endif /* INTEGRATED_VF */
365 #endif /* PF_DRIVER */
366 #ifdef VF_DRIVER
367 #ifdef INTEGRATED_VF
368         if (i40e_is_vf(hw))
369                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
370                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
371 #else
372         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
373                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
374 #endif /* INTEGRATED_VF */
375 #endif /* VF_DRIVER */
376         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
377         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
378
379         /* Update tail in the HW to post pre-allocated buffers */
380         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
381
382         /* Check one register to verify that config was applied */
383         reg = rd32(hw, hw->aq.arq.bal);
384         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
385                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
386
387         return ret_code;
388 }
389
390 /**
391  *  i40e_init_asq - main initialization routine for ASQ
392  *  @hw: pointer to the hardware structure
393  *
394  *  This is the main initialization routine for the Admin Send Queue
395  *  Prior to calling this function, drivers *MUST* set the following fields
396  *  in the hw->aq structure:
397  *     - hw->aq.num_asq_entries
398  *     - hw->aq.arq_buf_size
399  *
400  *  Do *NOT* hold the lock when calling this as the memory allocation routines
401  *  called are not going to be atomic context safe
402  **/
403 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
404 {
405         enum i40e_status_code ret_code = I40E_SUCCESS;
406
407         if (hw->aq.asq.count > 0) {
408                 /* queue already initialized */
409                 ret_code = I40E_ERR_NOT_READY;
410                 goto init_adminq_exit;
411         }
412
413         /* verify input for valid configuration */
414         if ((hw->aq.num_asq_entries == 0) ||
415             (hw->aq.asq_buf_size == 0)) {
416                 ret_code = I40E_ERR_CONFIG;
417                 goto init_adminq_exit;
418         }
419
420         hw->aq.asq.next_to_use = 0;
421         hw->aq.asq.next_to_clean = 0;
422
423         /* allocate the ring memory */
424         ret_code = i40e_alloc_adminq_asq_ring(hw);
425         if (ret_code != I40E_SUCCESS)
426                 goto init_adminq_exit;
427
428         /* allocate buffers in the rings */
429         ret_code = i40e_alloc_asq_bufs(hw);
430         if (ret_code != I40E_SUCCESS)
431                 goto init_adminq_free_rings;
432
433         /* initialize base registers */
434         ret_code = i40e_config_asq_regs(hw);
435         if (ret_code != I40E_SUCCESS)
436                 goto init_adminq_free_rings;
437
438         /* success! */
439         hw->aq.asq.count = hw->aq.num_asq_entries;
440         goto init_adminq_exit;
441
442 init_adminq_free_rings:
443         i40e_free_adminq_asq(hw);
444
445 init_adminq_exit:
446         return ret_code;
447 }
448
449 /**
450  *  i40e_init_arq - initialize ARQ
451  *  @hw: pointer to the hardware structure
452  *
453  *  The main initialization routine for the Admin Receive (Event) Queue.
454  *  Prior to calling this function, drivers *MUST* set the following fields
455  *  in the hw->aq structure:
456  *     - hw->aq.num_asq_entries
457  *     - hw->aq.arq_buf_size
458  *
459  *  Do *NOT* hold the lock when calling this as the memory allocation routines
460  *  called are not going to be atomic context safe
461  **/
462 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
463 {
464         enum i40e_status_code ret_code = I40E_SUCCESS;
465
466         if (hw->aq.arq.count > 0) {
467                 /* queue already initialized */
468                 ret_code = I40E_ERR_NOT_READY;
469                 goto init_adminq_exit;
470         }
471
472         /* verify input for valid configuration */
473         if ((hw->aq.num_arq_entries == 0) ||
474             (hw->aq.arq_buf_size == 0)) {
475                 ret_code = I40E_ERR_CONFIG;
476                 goto init_adminq_exit;
477         }
478
479         hw->aq.arq.next_to_use = 0;
480         hw->aq.arq.next_to_clean = 0;
481
482         /* allocate the ring memory */
483         ret_code = i40e_alloc_adminq_arq_ring(hw);
484         if (ret_code != I40E_SUCCESS)
485                 goto init_adminq_exit;
486
487         /* allocate buffers in the rings */
488         ret_code = i40e_alloc_arq_bufs(hw);
489         if (ret_code != I40E_SUCCESS)
490                 goto init_adminq_free_rings;
491
492         /* initialize base registers */
493         ret_code = i40e_config_arq_regs(hw);
494         if (ret_code != I40E_SUCCESS)
495                 goto init_adminq_free_rings;
496
497         /* success! */
498         hw->aq.arq.count = hw->aq.num_arq_entries;
499         goto init_adminq_exit;
500
501 init_adminq_free_rings:
502         i40e_free_adminq_arq(hw);
503
504 init_adminq_exit:
505         return ret_code;
506 }
507
508 /**
509  *  i40e_shutdown_asq - shutdown the ASQ
510  *  @hw: pointer to the hardware structure
511  *
512  *  The main shutdown routine for the Admin Send Queue
513  **/
514 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
515 {
516         enum i40e_status_code ret_code = I40E_SUCCESS;
517
518         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
519
520         if (hw->aq.asq.count == 0) {
521                 ret_code = I40E_ERR_NOT_READY;
522                 goto shutdown_asq_out;
523         }
524
525         /* Stop firmware AdminQ processing */
526         wr32(hw, hw->aq.asq.head, 0);
527         wr32(hw, hw->aq.asq.tail, 0);
528         wr32(hw, hw->aq.asq.len, 0);
529         wr32(hw, hw->aq.asq.bal, 0);
530         wr32(hw, hw->aq.asq.bah, 0);
531
532         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
533
534         /* free ring buffers */
535         i40e_free_asq_bufs(hw);
536
537 shutdown_asq_out:
538         i40e_release_spinlock(&hw->aq.asq_spinlock);
539         return ret_code;
540 }
541
542 /**
543  *  i40e_shutdown_arq - shutdown ARQ
544  *  @hw: pointer to the hardware structure
545  *
546  *  The main shutdown routine for the Admin Receive Queue
547  **/
548 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
549 {
550         enum i40e_status_code ret_code = I40E_SUCCESS;
551
552         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
553
554         if (hw->aq.arq.count == 0) {
555                 ret_code = I40E_ERR_NOT_READY;
556                 goto shutdown_arq_out;
557         }
558
559         /* Stop firmware AdminQ processing */
560         wr32(hw, hw->aq.arq.head, 0);
561         wr32(hw, hw->aq.arq.tail, 0);
562         wr32(hw, hw->aq.arq.len, 0);
563         wr32(hw, hw->aq.arq.bal, 0);
564         wr32(hw, hw->aq.arq.bah, 0);
565
566         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
567
568         /* free ring buffers */
569         i40e_free_arq_bufs(hw);
570
571 shutdown_arq_out:
572         i40e_release_spinlock(&hw->aq.arq_spinlock);
573         return ret_code;
574 }
575 #ifdef PF_DRIVER
576
577 /**
578  *  i40e_resume_aq - resume AQ processing from 0
579  *  @hw: pointer to the hardware structure
580  **/
581 STATIC void i40e_resume_aq(struct i40e_hw *hw)
582 {
583         /* Registers are reset after PF reset */
584         hw->aq.asq.next_to_use = 0;
585         hw->aq.asq.next_to_clean = 0;
586
587         i40e_config_asq_regs(hw);
588
589         hw->aq.arq.next_to_use = 0;
590         hw->aq.arq.next_to_clean = 0;
591
592         i40e_config_arq_regs(hw);
593 }
594 #endif /* PF_DRIVER */
595
596 /**
597  *  i40e_init_adminq - main initialization routine for Admin Queue
598  *  @hw: pointer to the hardware structure
599  *
600  *  Prior to calling this function, drivers *MUST* set the following fields
601  *  in the hw->aq structure:
602  *     - hw->aq.num_asq_entries
603  *     - hw->aq.num_arq_entries
604  *     - hw->aq.arq_buf_size
605  *     - hw->aq.asq_buf_size
606  **/
607 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
608 {
609 #ifdef PF_DRIVER
610         u16 cfg_ptr, oem_hi, oem_lo;
611         u16 eetrack_lo, eetrack_hi;
612 #endif
613         enum i40e_status_code ret_code;
614 #ifdef PF_DRIVER
615         int retry = 0;
616 #endif
617
618         /* verify input for valid configuration */
619         if ((hw->aq.num_arq_entries == 0) ||
620             (hw->aq.num_asq_entries == 0) ||
621             (hw->aq.arq_buf_size == 0) ||
622             (hw->aq.asq_buf_size == 0)) {
623                 ret_code = I40E_ERR_CONFIG;
624                 goto init_adminq_exit;
625         }
626         i40e_init_spinlock(&hw->aq.asq_spinlock);
627         i40e_init_spinlock(&hw->aq.arq_spinlock);
628
629         /* Set up register offsets */
630         i40e_adminq_init_regs(hw);
631
632         /* setup ASQ command write back timeout */
633         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
634
635         /* allocate the ASQ */
636         ret_code = i40e_init_asq(hw);
637         if (ret_code != I40E_SUCCESS)
638                 goto init_adminq_destroy_spinlocks;
639
640         /* allocate the ARQ */
641         ret_code = i40e_init_arq(hw);
642         if (ret_code != I40E_SUCCESS)
643                 goto init_adminq_free_asq;
644
645 #ifdef PF_DRIVER
646 #ifdef INTEGRATED_VF
647         /* VF has no need of firmware */
648         if (i40e_is_vf(hw))
649                 goto init_adminq_exit;
650 #endif
651         /* There are some cases where the firmware may not be quite ready
652          * for AdminQ operations, so we retry the AdminQ setup a few times
653          * if we see timeouts in this first AQ call.
654          */
655         do {
656                 ret_code = i40e_aq_get_firmware_version(hw,
657                                                         &hw->aq.fw_maj_ver,
658                                                         &hw->aq.fw_min_ver,
659                                                         &hw->aq.fw_build,
660                                                         &hw->aq.api_maj_ver,
661                                                         &hw->aq.api_min_ver,
662                                                         NULL);
663                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
664                         break;
665                 retry++;
666                 i40e_msec_delay(100);
667                 i40e_resume_aq(hw);
668         } while (retry < 10);
669         if (ret_code != I40E_SUCCESS)
670                 goto init_adminq_free_arq;
671
672         /* get the NVM version info */
673         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
674                            &hw->nvm.version);
675         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
676         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
677         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
678         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
679         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
680                            &oem_hi);
681         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
682                            &oem_lo);
683         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
684
685         /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
686         if ((hw->aq.api_maj_ver > 1) ||
687             ((hw->aq.api_maj_ver == 1) &&
688              (hw->aq.api_min_ver >= 7)))
689                 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
690
691         if (hw->mac.type ==  I40E_MAC_XL710 &&
692             hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
693             hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
694                 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
695         }
696
697         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
698                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
699                 goto init_adminq_free_arq;
700         }
701
702         /* pre-emptive resource lock release */
703         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
704         hw->nvm_release_on_done = false;
705         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
706
707 #endif /* PF_DRIVER */
708         ret_code = I40E_SUCCESS;
709
710         /* success! */
711         goto init_adminq_exit;
712
713 #ifdef PF_DRIVER
714 init_adminq_free_arq:
715         i40e_shutdown_arq(hw);
716 #endif
717 init_adminq_free_asq:
718         i40e_shutdown_asq(hw);
719 init_adminq_destroy_spinlocks:
720         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
721         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
722
723 init_adminq_exit:
724         return ret_code;
725 }
726
727 /**
728  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
729  *  @hw: pointer to the hardware structure
730  **/
731 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
732 {
733         enum i40e_status_code ret_code = I40E_SUCCESS;
734
735         if (i40e_check_asq_alive(hw))
736                 i40e_aq_queue_shutdown(hw, true);
737
738         i40e_shutdown_asq(hw);
739         i40e_shutdown_arq(hw);
740         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
741         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
742
743         if (hw->nvm_buff.va)
744                 i40e_free_virt_mem(hw, &hw->nvm_buff);
745
746         return ret_code;
747 }
748
749 /**
750  *  i40e_clean_asq - cleans Admin send queue
751  *  @hw: pointer to the hardware structure
752  *
753  *  returns the number of free desc
754  **/
755 u16 i40e_clean_asq(struct i40e_hw *hw)
756 {
757         struct i40e_adminq_ring *asq = &(hw->aq.asq);
758         struct i40e_asq_cmd_details *details;
759         u16 ntc = asq->next_to_clean;
760         struct i40e_aq_desc desc_cb;
761         struct i40e_aq_desc *desc;
762
763         desc = I40E_ADMINQ_DESC(*asq, ntc);
764         details = I40E_ADMINQ_DETAILS(*asq, ntc);
765         while (rd32(hw, hw->aq.asq.head) != ntc) {
766                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
767                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
768
769                 if (details->callback) {
770                         I40E_ADMINQ_CALLBACK cb_func =
771                                         (I40E_ADMINQ_CALLBACK)details->callback;
772                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
773                                     I40E_DMA_TO_DMA);
774                         cb_func(hw, &desc_cb);
775                 }
776                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
777                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
778                 ntc++;
779                 if (ntc == asq->count)
780                         ntc = 0;
781                 desc = I40E_ADMINQ_DESC(*asq, ntc);
782                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
783         }
784
785         asq->next_to_clean = ntc;
786
787         return I40E_DESC_UNUSED(asq);
788 }
789
790 /**
791  *  i40e_asq_done - check if FW has processed the Admin Send Queue
792  *  @hw: pointer to the hw struct
793  *
794  *  Returns true if the firmware has processed all descriptors on the
795  *  admin send queue. Returns false if there are still requests pending.
796  **/
797 #ifdef VF_DRIVER
798 bool i40e_asq_done(struct i40e_hw *hw)
799 #else
800 STATIC bool i40e_asq_done(struct i40e_hw *hw)
801 #endif
802 {
803         /* AQ designers suggest use of head for better
804          * timing reliability than DD bit
805          */
806         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
807
808 }
809
810 /**
811  *  i40e_asq_send_command - send command to Admin Queue
812  *  @hw: pointer to the hw struct
813  *  @desc: prefilled descriptor describing the command (non DMA mem)
814  *  @buff: buffer to use for indirect commands
815  *  @buff_size: size of buffer for indirect commands
816  *  @cmd_details: pointer to command details structure
817  *
818  *  This is the main send command driver routine for the Admin Queue send
819  *  queue.  It runs the queue, cleans the queue, etc
820  **/
821 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
822                                 struct i40e_aq_desc *desc,
823                                 void *buff, /* can be NULL */
824                                 u16  buff_size,
825                                 struct i40e_asq_cmd_details *cmd_details)
826 {
827         enum i40e_status_code status = I40E_SUCCESS;
828         struct i40e_dma_mem *dma_buff = NULL;
829         struct i40e_asq_cmd_details *details;
830         struct i40e_aq_desc *desc_on_ring;
831         bool cmd_completed = false;
832         u16  retval = 0;
833         u32  val = 0;
834
835         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
836
837         hw->aq.asq_last_status = I40E_AQ_RC_OK;
838
839         if (hw->aq.asq.count == 0) {
840                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
841                            "AQTX: Admin queue not initialized.\n");
842                 status = I40E_ERR_QUEUE_EMPTY;
843                 goto asq_send_command_error;
844         }
845
846         val = rd32(hw, hw->aq.asq.head);
847         if (val >= hw->aq.num_asq_entries) {
848                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
849                            "AQTX: head overrun at %d\n", val);
850                 status = I40E_ERR_QUEUE_EMPTY;
851                 goto asq_send_command_error;
852         }
853
854         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
855         if (cmd_details) {
856                 i40e_memcpy(details,
857                             cmd_details,
858                             sizeof(struct i40e_asq_cmd_details),
859                             I40E_NONDMA_TO_NONDMA);
860
861                 /* If the cmd_details are defined copy the cookie.  The
862                  * CPU_TO_LE32 is not needed here because the data is ignored
863                  * by the FW, only used by the driver
864                  */
865                 if (details->cookie) {
866                         desc->cookie_high =
867                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
868                         desc->cookie_low =
869                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
870                 }
871         } else {
872                 i40e_memset(details, 0,
873                             sizeof(struct i40e_asq_cmd_details),
874                             I40E_NONDMA_MEM);
875         }
876
877         /* clear requested flags and then set additional flags if defined */
878         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
879         desc->flags |= CPU_TO_LE16(details->flags_ena);
880
881         if (buff_size > hw->aq.asq_buf_size) {
882                 i40e_debug(hw,
883                            I40E_DEBUG_AQ_MESSAGE,
884                            "AQTX: Invalid buffer size: %d.\n",
885                            buff_size);
886                 status = I40E_ERR_INVALID_SIZE;
887                 goto asq_send_command_error;
888         }
889
890         if (details->postpone && !details->async) {
891                 i40e_debug(hw,
892                            I40E_DEBUG_AQ_MESSAGE,
893                            "AQTX: Async flag not set along with postpone flag");
894                 status = I40E_ERR_PARAM;
895                 goto asq_send_command_error;
896         }
897
898         /* call clean and check queue available function to reclaim the
899          * descriptors that were processed by FW, the function returns the
900          * number of desc available
901          */
902         /* the clean function called here could be called in a separate thread
903          * in case of asynchronous completions
904          */
905         if (i40e_clean_asq(hw) == 0) {
906                 i40e_debug(hw,
907                            I40E_DEBUG_AQ_MESSAGE,
908                            "AQTX: Error queue is full.\n");
909                 status = I40E_ERR_ADMIN_QUEUE_FULL;
910                 goto asq_send_command_error;
911         }
912
913         /* initialize the temp desc pointer with the right desc */
914         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
915
916         /* if the desc is available copy the temp desc to the right place */
917         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
918                     I40E_NONDMA_TO_DMA);
919
920         /* if buff is not NULL assume indirect command */
921         if (buff != NULL) {
922                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
923                 /* copy the user buff into the respective DMA buff */
924                 i40e_memcpy(dma_buff->va, buff, buff_size,
925                             I40E_NONDMA_TO_DMA);
926                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
927
928                 /* Update the address values in the desc with the pa value
929                  * for respective buffer
930                  */
931                 desc_on_ring->params.external.addr_high =
932                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
933                 desc_on_ring->params.external.addr_low =
934                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
935         }
936
937         /* bump the tail */
938         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
939         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
940                       buff, buff_size);
941         (hw->aq.asq.next_to_use)++;
942         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
943                 hw->aq.asq.next_to_use = 0;
944         if (!details->postpone)
945                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
946
947         /* if cmd_details are not defined or async flag is not set,
948          * we need to wait for desc write back
949          */
950         if (!details->async && !details->postpone) {
951                 u32 total_delay = 0;
952
953                 do {
954                         /* AQ designers suggest use of head for better
955                          * timing reliability than DD bit
956                          */
957                         if (i40e_asq_done(hw))
958                                 break;
959                         i40e_usec_delay(50);
960                         total_delay += 50;
961                 } while (total_delay < hw->aq.asq_cmd_timeout);
962         }
963
964         /* if ready, copy the desc back to temp */
965         if (i40e_asq_done(hw)) {
966                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
967                             I40E_DMA_TO_NONDMA);
968                 if (buff != NULL)
969                         i40e_memcpy(buff, dma_buff->va, buff_size,
970                                     I40E_DMA_TO_NONDMA);
971                 retval = LE16_TO_CPU(desc->retval);
972                 if (retval != 0) {
973                         i40e_debug(hw,
974                                    I40E_DEBUG_AQ_MESSAGE,
975                                    "AQTX: Command completed with error 0x%X.\n",
976                                    retval);
977
978                         /* strip off FW internal code */
979                         retval &= 0xff;
980                 }
981                 cmd_completed = true;
982                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
983                         status = I40E_SUCCESS;
984                 else
985                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
986                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
987         }
988
989         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
990                    "AQTX: desc and buffer writeback:\n");
991         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
992
993         /* save writeback aq if requested */
994         if (details->wb_desc)
995                 i40e_memcpy(details->wb_desc, desc_on_ring,
996                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
997
998         /* update the error if time out occurred */
999         if ((!cmd_completed) &&
1000             (!details->async && !details->postpone)) {
1001                 i40e_debug(hw,
1002                            I40E_DEBUG_AQ_MESSAGE,
1003                            "AQTX: Writeback timeout.\n");
1004                 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1005         }
1006
1007 asq_send_command_error:
1008         i40e_release_spinlock(&hw->aq.asq_spinlock);
1009         return status;
1010 }
1011
1012 /**
1013  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1014  *  @desc:     pointer to the temp descriptor (non DMA mem)
1015  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1016  *
1017  *  Fill the desc with default values
1018  **/
1019 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1020                                        u16 opcode)
1021 {
1022         /* zero out the desc */
1023         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1024                     I40E_NONDMA_MEM);
1025         desc->opcode = CPU_TO_LE16(opcode);
1026         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1027 }
1028
1029 /**
1030  *  i40e_clean_arq_element
1031  *  @hw: pointer to the hw struct
1032  *  @e: event info from the receive descriptor, includes any buffers
1033  *  @pending: number of events that could be left to process
1034  *
1035  *  This function cleans one Admin Receive Queue element and returns
1036  *  the contents through e.  It can also return how many events are
1037  *  left to process through 'pending'
1038  **/
1039 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1040                                              struct i40e_arq_event_info *e,
1041                                              u16 *pending)
1042 {
1043         enum i40e_status_code ret_code = I40E_SUCCESS;
1044         u16 ntc = hw->aq.arq.next_to_clean;
1045         struct i40e_aq_desc *desc;
1046         struct i40e_dma_mem *bi;
1047         u16 desc_idx;
1048         u16 datalen;
1049         u16 flags;
1050         u16 ntu;
1051
1052         /* pre-clean the event info */
1053         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1054
1055         /* take the lock before we start messing with the ring */
1056         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1057
1058         if (hw->aq.arq.count == 0) {
1059                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1060                            "AQRX: Admin queue not initialized.\n");
1061                 ret_code = I40E_ERR_QUEUE_EMPTY;
1062                 goto clean_arq_element_err;
1063         }
1064
1065         /* set next_to_use to head */
1066 #ifdef PF_DRIVER
1067 #ifdef INTEGRATED_VF
1068         if (!i40e_is_vf(hw))
1069                 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1070 #else
1071         ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1072 #endif /* INTEGRATED_VF */
1073 #endif /* PF_DRIVER */
1074 #ifdef VF_DRIVER
1075 #ifdef INTEGRATED_VF
1076         if (i40e_is_vf(hw))
1077                 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1078 #else
1079         ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1080 #endif /* INTEGRATED_VF */
1081 #endif /* VF_DRIVER */
1082         if (ntu == ntc) {
1083                 /* nothing to do - shouldn't need to update ring's values */
1084                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1085                 goto clean_arq_element_out;
1086         }
1087
1088         /* now clean the next descriptor */
1089         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1090         desc_idx = ntc;
1091
1092         hw->aq.arq_last_status =
1093                 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1094         flags = LE16_TO_CPU(desc->flags);
1095         if (flags & I40E_AQ_FLAG_ERR) {
1096                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1097                 i40e_debug(hw,
1098                            I40E_DEBUG_AQ_MESSAGE,
1099                            "AQRX: Event received with error 0x%X.\n",
1100                            hw->aq.arq_last_status);
1101         }
1102
1103         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1104                     I40E_DMA_TO_NONDMA);
1105         datalen = LE16_TO_CPU(desc->datalen);
1106         e->msg_len = min(datalen, e->buf_len);
1107         if (e->msg_buf != NULL && (e->msg_len != 0))
1108                 i40e_memcpy(e->msg_buf,
1109                             hw->aq.arq.r.arq_bi[desc_idx].va,
1110                             e->msg_len, I40E_DMA_TO_NONDMA);
1111
1112         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1113         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1114                       hw->aq.arq_buf_size);
1115
1116         /* Restore the original datalen and buffer address in the desc,
1117          * FW updates datalen to indicate the event message
1118          * size
1119          */
1120         bi = &hw->aq.arq.r.arq_bi[ntc];
1121         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1122
1123         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1124         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1125                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1126         desc->datalen = CPU_TO_LE16((u16)bi->size);
1127         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1128         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1129
1130         /* set tail = the last cleaned desc index. */
1131         wr32(hw, hw->aq.arq.tail, ntc);
1132         /* ntc is updated to tail + 1 */
1133         ntc++;
1134         if (ntc == hw->aq.num_arq_entries)
1135                 ntc = 0;
1136         hw->aq.arq.next_to_clean = ntc;
1137         hw->aq.arq.next_to_use = ntu;
1138
1139 #ifdef PF_DRIVER
1140         i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
1141 #endif /* PF_DRIVER */
1142 clean_arq_element_out:
1143         /* Set pending if needed, unlock and return */
1144         if (pending != NULL)
1145                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1146 clean_arq_element_err:
1147         i40e_release_spinlock(&hw->aq.arq_spinlock);
1148
1149         return ret_code;
1150 }
1151