net/i40e/base: remove HMC admin queue functions
[dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
39
40 #ifdef PF_DRIVER
41 /**
42  * i40e_is_nvm_update_op - return true if this is an NVM update operation
43  * @desc: API request descriptor
44  **/
45 STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
46 {
47         return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48                 desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
49 }
50
51 #endif /* PF_DRIVER */
52 /**
53  *  i40e_adminq_init_regs - Initialize AdminQ registers
54  *  @hw: pointer to the hardware structure
55  *
56  *  This assumes the alloc_asq and alloc_arq functions have already been called
57  **/
58 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
59 {
60         /* set head and tail registers in our local struct */
61         if (i40e_is_vf(hw)) {
62                 hw->aq.asq.tail = I40E_VF_ATQT1;
63                 hw->aq.asq.head = I40E_VF_ATQH1;
64                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
65                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
66                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
67                 hw->aq.arq.tail = I40E_VF_ARQT1;
68                 hw->aq.arq.head = I40E_VF_ARQH1;
69                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
70                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
71                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
72 #ifdef PF_DRIVER
73         } else {
74                 hw->aq.asq.tail = I40E_PF_ATQT;
75                 hw->aq.asq.head = I40E_PF_ATQH;
76                 hw->aq.asq.len  = I40E_PF_ATQLEN;
77                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
78                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
79                 hw->aq.arq.tail = I40E_PF_ARQT;
80                 hw->aq.arq.head = I40E_PF_ARQH;
81                 hw->aq.arq.len  = I40E_PF_ARQLEN;
82                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
83                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
84 #endif
85         }
86 }
87
88 /**
89  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
90  *  @hw: pointer to the hardware structure
91  **/
92 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
93 {
94         enum i40e_status_code ret_code;
95
96         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
97                                          i40e_mem_atq_ring,
98                                          (hw->aq.num_asq_entries *
99                                          sizeof(struct i40e_aq_desc)),
100                                          I40E_ADMINQ_DESC_ALIGNMENT);
101         if (ret_code)
102                 return ret_code;
103
104         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
105                                           (hw->aq.num_asq_entries *
106                                           sizeof(struct i40e_asq_cmd_details)));
107         if (ret_code) {
108                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
109                 return ret_code;
110         }
111
112         return ret_code;
113 }
114
115 /**
116  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
117  *  @hw: pointer to the hardware structure
118  **/
119 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
120 {
121         enum i40e_status_code ret_code;
122
123         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
124                                          i40e_mem_arq_ring,
125                                          (hw->aq.num_arq_entries *
126                                          sizeof(struct i40e_aq_desc)),
127                                          I40E_ADMINQ_DESC_ALIGNMENT);
128
129         return ret_code;
130 }
131
132 /**
133  *  i40e_free_adminq_asq - Free Admin Queue send rings
134  *  @hw: pointer to the hardware structure
135  *
136  *  This assumes the posted send buffers have already been cleaned
137  *  and de-allocated
138  **/
139 void i40e_free_adminq_asq(struct i40e_hw *hw)
140 {
141         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
142 }
143
144 /**
145  *  i40e_free_adminq_arq - Free Admin Queue receive rings
146  *  @hw: pointer to the hardware structure
147  *
148  *  This assumes the posted receive buffers have already been cleaned
149  *  and de-allocated
150  **/
151 void i40e_free_adminq_arq(struct i40e_hw *hw)
152 {
153         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
154 }
155
156 /**
157  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
158  *  @hw: pointer to the hardware structure
159  **/
160 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
161 {
162         enum i40e_status_code ret_code;
163         struct i40e_aq_desc *desc;
164         struct i40e_dma_mem *bi;
165         int i;
166
167         /* We'll be allocating the buffer info memory first, then we can
168          * allocate the mapped buffers for the event processing
169          */
170
171         /* buffer_info structures do not need alignment */
172         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
173                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
174         if (ret_code)
175                 goto alloc_arq_bufs;
176         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
177
178         /* allocate the mapped buffers */
179         for (i = 0; i < hw->aq.num_arq_entries; i++) {
180                 bi = &hw->aq.arq.r.arq_bi[i];
181                 ret_code = i40e_allocate_dma_mem(hw, bi,
182                                                  i40e_mem_arq_buf,
183                                                  hw->aq.arq_buf_size,
184                                                  I40E_ADMINQ_DESC_ALIGNMENT);
185                 if (ret_code)
186                         goto unwind_alloc_arq_bufs;
187
188                 /* now configure the descriptors for use */
189                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
190
191                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
192                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
193                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
194                 desc->opcode = 0;
195                 /* This is in accordance with Admin queue design, there is no
196                  * register for buffer size configuration
197                  */
198                 desc->datalen = CPU_TO_LE16((u16)bi->size);
199                 desc->retval = 0;
200                 desc->cookie_high = 0;
201                 desc->cookie_low = 0;
202                 desc->params.external.addr_high =
203                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
204                 desc->params.external.addr_low =
205                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
206                 desc->params.external.param0 = 0;
207                 desc->params.external.param1 = 0;
208         }
209
210 alloc_arq_bufs:
211         return ret_code;
212
213 unwind_alloc_arq_bufs:
214         /* don't try to free the one that failed... */
215         i--;
216         for (; i >= 0; i--)
217                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
218         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
219
220         return ret_code;
221 }
222
223 /**
224  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
225  *  @hw: pointer to the hardware structure
226  **/
227 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
228 {
229         enum i40e_status_code ret_code;
230         struct i40e_dma_mem *bi;
231         int i;
232
233         /* No mapped memory needed yet, just the buffer info structures */
234         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
235                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
236         if (ret_code)
237                 goto alloc_asq_bufs;
238         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
239
240         /* allocate the mapped buffers */
241         for (i = 0; i < hw->aq.num_asq_entries; i++) {
242                 bi = &hw->aq.asq.r.asq_bi[i];
243                 ret_code = i40e_allocate_dma_mem(hw, bi,
244                                                  i40e_mem_asq_buf,
245                                                  hw->aq.asq_buf_size,
246                                                  I40E_ADMINQ_DESC_ALIGNMENT);
247                 if (ret_code)
248                         goto unwind_alloc_asq_bufs;
249         }
250 alloc_asq_bufs:
251         return ret_code;
252
253 unwind_alloc_asq_bufs:
254         /* don't try to free the one that failed... */
255         i--;
256         for (; i >= 0; i--)
257                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
258         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
259
260         return ret_code;
261 }
262
263 /**
264  *  i40e_free_arq_bufs - Free receive queue buffer info elements
265  *  @hw: pointer to the hardware structure
266  **/
267 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
268 {
269         int i;
270
271         /* free descriptors */
272         for (i = 0; i < hw->aq.num_arq_entries; i++)
273                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
274
275         /* free the descriptor memory */
276         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
277
278         /* free the dma header */
279         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
280 }
281
282 /**
283  *  i40e_free_asq_bufs - Free send queue buffer info elements
284  *  @hw: pointer to the hardware structure
285  **/
286 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
287 {
288         int i;
289
290         /* only unmap if the address is non-NULL */
291         for (i = 0; i < hw->aq.num_asq_entries; i++)
292                 if (hw->aq.asq.r.asq_bi[i].pa)
293                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
294
295         /* free the buffer info list */
296         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
297
298         /* free the descriptor memory */
299         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
300
301         /* free the dma header */
302         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
303 }
304
305 /**
306  *  i40e_config_asq_regs - configure ASQ registers
307  *  @hw: pointer to the hardware structure
308  *
309  *  Configure base address and length registers for the transmit queue
310  **/
311 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
312 {
313         enum i40e_status_code ret_code = I40E_SUCCESS;
314         u32 reg = 0;
315
316         /* Clear Head and Tail */
317         wr32(hw, hw->aq.asq.head, 0);
318         wr32(hw, hw->aq.asq.tail, 0);
319
320         /* set starting point */
321 #ifdef PF_DRIVER
322 #ifdef INTEGRATED_VF
323         if (!i40e_is_vf(hw))
324                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
325                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
326 #else
327         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
328                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
329 #endif /* INTEGRATED_VF */
330 #endif /* PF_DRIVER */
331 #ifdef VF_DRIVER
332 #ifdef INTEGRATED_VF
333         if (i40e_is_vf(hw))
334                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
335                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
336 #else
337         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
338                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
339 #endif /* INTEGRATED_VF */
340 #endif /* VF_DRIVER */
341         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
342         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
343
344         /* Check one register to verify that config was applied */
345         reg = rd32(hw, hw->aq.asq.bal);
346         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
347                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
348
349         return ret_code;
350 }
351
352 /**
353  *  i40e_config_arq_regs - ARQ register configuration
354  *  @hw: pointer to the hardware structure
355  *
356  * Configure base address and length registers for the receive (event queue)
357  **/
358 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
359 {
360         enum i40e_status_code ret_code = I40E_SUCCESS;
361         u32 reg = 0;
362
363         /* Clear Head and Tail */
364         wr32(hw, hw->aq.arq.head, 0);
365         wr32(hw, hw->aq.arq.tail, 0);
366
367         /* set starting point */
368 #ifdef PF_DRIVER
369 #ifdef INTEGRATED_VF
370         if (!i40e_is_vf(hw))
371                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
372                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
373 #else
374         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
375                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
376 #endif /* INTEGRATED_VF */
377 #endif /* PF_DRIVER */
378 #ifdef VF_DRIVER
379 #ifdef INTEGRATED_VF
380         if (i40e_is_vf(hw))
381                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
382                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
383 #else
384         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
385                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
386 #endif /* INTEGRATED_VF */
387 #endif /* VF_DRIVER */
388         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
389         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
390
391         /* Update tail in the HW to post pre-allocated buffers */
392         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
393
394         /* Check one register to verify that config was applied */
395         reg = rd32(hw, hw->aq.arq.bal);
396         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
397                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
398
399         return ret_code;
400 }
401
402 /**
403  *  i40e_init_asq - main initialization routine for ASQ
404  *  @hw: pointer to the hardware structure
405  *
406  *  This is the main initialization routine for the Admin Send Queue
407  *  Prior to calling this function, drivers *MUST* set the following fields
408  *  in the hw->aq structure:
409  *     - hw->aq.num_asq_entries
410  *     - hw->aq.arq_buf_size
411  *
412  *  Do *NOT* hold the lock when calling this as the memory allocation routines
413  *  called are not going to be atomic context safe
414  **/
415 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
416 {
417         enum i40e_status_code ret_code = I40E_SUCCESS;
418
419         if (hw->aq.asq.count > 0) {
420                 /* queue already initialized */
421                 ret_code = I40E_ERR_NOT_READY;
422                 goto init_adminq_exit;
423         }
424
425         /* verify input for valid configuration */
426         if ((hw->aq.num_asq_entries == 0) ||
427             (hw->aq.asq_buf_size == 0)) {
428                 ret_code = I40E_ERR_CONFIG;
429                 goto init_adminq_exit;
430         }
431
432         hw->aq.asq.next_to_use = 0;
433         hw->aq.asq.next_to_clean = 0;
434
435         /* allocate the ring memory */
436         ret_code = i40e_alloc_adminq_asq_ring(hw);
437         if (ret_code != I40E_SUCCESS)
438                 goto init_adminq_exit;
439
440         /* allocate buffers in the rings */
441         ret_code = i40e_alloc_asq_bufs(hw);
442         if (ret_code != I40E_SUCCESS)
443                 goto init_adminq_free_rings;
444
445         /* initialize base registers */
446         ret_code = i40e_config_asq_regs(hw);
447         if (ret_code != I40E_SUCCESS)
448                 goto init_adminq_free_rings;
449
450         /* success! */
451         hw->aq.asq.count = hw->aq.num_asq_entries;
452         goto init_adminq_exit;
453
454 init_adminq_free_rings:
455         i40e_free_adminq_asq(hw);
456
457 init_adminq_exit:
458         return ret_code;
459 }
460
461 /**
462  *  i40e_init_arq - initialize ARQ
463  *  @hw: pointer to the hardware structure
464  *
465  *  The main initialization routine for the Admin Receive (Event) Queue.
466  *  Prior to calling this function, drivers *MUST* set the following fields
467  *  in the hw->aq structure:
468  *     - hw->aq.num_asq_entries
469  *     - hw->aq.arq_buf_size
470  *
471  *  Do *NOT* hold the lock when calling this as the memory allocation routines
472  *  called are not going to be atomic context safe
473  **/
474 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
475 {
476         enum i40e_status_code ret_code = I40E_SUCCESS;
477
478         if (hw->aq.arq.count > 0) {
479                 /* queue already initialized */
480                 ret_code = I40E_ERR_NOT_READY;
481                 goto init_adminq_exit;
482         }
483
484         /* verify input for valid configuration */
485         if ((hw->aq.num_arq_entries == 0) ||
486             (hw->aq.arq_buf_size == 0)) {
487                 ret_code = I40E_ERR_CONFIG;
488                 goto init_adminq_exit;
489         }
490
491         hw->aq.arq.next_to_use = 0;
492         hw->aq.arq.next_to_clean = 0;
493
494         /* allocate the ring memory */
495         ret_code = i40e_alloc_adminq_arq_ring(hw);
496         if (ret_code != I40E_SUCCESS)
497                 goto init_adminq_exit;
498
499         /* allocate buffers in the rings */
500         ret_code = i40e_alloc_arq_bufs(hw);
501         if (ret_code != I40E_SUCCESS)
502                 goto init_adminq_free_rings;
503
504         /* initialize base registers */
505         ret_code = i40e_config_arq_regs(hw);
506         if (ret_code != I40E_SUCCESS)
507                 goto init_adminq_free_rings;
508
509         /* success! */
510         hw->aq.arq.count = hw->aq.num_arq_entries;
511         goto init_adminq_exit;
512
513 init_adminq_free_rings:
514         i40e_free_adminq_arq(hw);
515
516 init_adminq_exit:
517         return ret_code;
518 }
519
520 /**
521  *  i40e_shutdown_asq - shutdown the ASQ
522  *  @hw: pointer to the hardware structure
523  *
524  *  The main shutdown routine for the Admin Send Queue
525  **/
526 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
527 {
528         enum i40e_status_code ret_code = I40E_SUCCESS;
529
530         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
531
532         if (hw->aq.asq.count == 0) {
533                 ret_code = I40E_ERR_NOT_READY;
534                 goto shutdown_asq_out;
535         }
536
537         /* Stop firmware AdminQ processing */
538         wr32(hw, hw->aq.asq.head, 0);
539         wr32(hw, hw->aq.asq.tail, 0);
540         wr32(hw, hw->aq.asq.len, 0);
541         wr32(hw, hw->aq.asq.bal, 0);
542         wr32(hw, hw->aq.asq.bah, 0);
543
544         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
545
546         /* free ring buffers */
547         i40e_free_asq_bufs(hw);
548
549 shutdown_asq_out:
550         i40e_release_spinlock(&hw->aq.asq_spinlock);
551         return ret_code;
552 }
553
554 /**
555  *  i40e_shutdown_arq - shutdown ARQ
556  *  @hw: pointer to the hardware structure
557  *
558  *  The main shutdown routine for the Admin Receive Queue
559  **/
560 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
561 {
562         enum i40e_status_code ret_code = I40E_SUCCESS;
563
564         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
565
566         if (hw->aq.arq.count == 0) {
567                 ret_code = I40E_ERR_NOT_READY;
568                 goto shutdown_arq_out;
569         }
570
571         /* Stop firmware AdminQ processing */
572         wr32(hw, hw->aq.arq.head, 0);
573         wr32(hw, hw->aq.arq.tail, 0);
574         wr32(hw, hw->aq.arq.len, 0);
575         wr32(hw, hw->aq.arq.bal, 0);
576         wr32(hw, hw->aq.arq.bah, 0);
577
578         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
579
580         /* free ring buffers */
581         i40e_free_arq_bufs(hw);
582
583 shutdown_arq_out:
584         i40e_release_spinlock(&hw->aq.arq_spinlock);
585         return ret_code;
586 }
587
588 /**
589  *  i40e_init_adminq - main initialization routine for Admin Queue
590  *  @hw: pointer to the hardware structure
591  *
592  *  Prior to calling this function, drivers *MUST* set the following fields
593  *  in the hw->aq structure:
594  *     - hw->aq.num_asq_entries
595  *     - hw->aq.num_arq_entries
596  *     - hw->aq.arq_buf_size
597  *     - hw->aq.asq_buf_size
598  **/
599 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
600 {
601         enum i40e_status_code ret_code;
602 #ifdef PF_DRIVER
603         u16 eetrack_lo, eetrack_hi;
604         u16 cfg_ptr, oem_hi, oem_lo;
605         int retry = 0;
606 #endif
607         /* verify input for valid configuration */
608         if ((hw->aq.num_arq_entries == 0) ||
609             (hw->aq.num_asq_entries == 0) ||
610             (hw->aq.arq_buf_size == 0) ||
611             (hw->aq.asq_buf_size == 0)) {
612                 ret_code = I40E_ERR_CONFIG;
613                 goto init_adminq_exit;
614         }
615
616         /* initialize spin locks */
617         i40e_init_spinlock(&hw->aq.asq_spinlock);
618         i40e_init_spinlock(&hw->aq.arq_spinlock);
619
620         /* Set up register offsets */
621         i40e_adminq_init_regs(hw);
622
623         /* setup ASQ command write back timeout */
624         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
625
626         /* allocate the ASQ */
627         ret_code = i40e_init_asq(hw);
628         if (ret_code != I40E_SUCCESS)
629                 goto init_adminq_destroy_spinlocks;
630
631         /* allocate the ARQ */
632         ret_code = i40e_init_arq(hw);
633         if (ret_code != I40E_SUCCESS)
634                 goto init_adminq_free_asq;
635
636 #ifdef PF_DRIVER
637 #ifdef INTEGRATED_VF
638         /* VF has no need of firmware */
639         if (i40e_is_vf(hw))
640                 goto init_adminq_exit;
641 #endif
642         /* There are some cases where the firmware may not be quite ready
643          * for AdminQ operations, so we retry the AdminQ setup a few times
644          * if we see timeouts in this first AQ call.
645          */
646         do {
647                 ret_code = i40e_aq_get_firmware_version(hw,
648                                                         &hw->aq.fw_maj_ver,
649                                                         &hw->aq.fw_min_ver,
650                                                         &hw->aq.fw_build,
651                                                         &hw->aq.api_maj_ver,
652                                                         &hw->aq.api_min_ver,
653                                                         NULL);
654                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
655                         break;
656                 retry++;
657                 i40e_msec_delay(100);
658                 i40e_resume_aq(hw);
659         } while (retry < 10);
660         if (ret_code != I40E_SUCCESS)
661                 goto init_adminq_free_arq;
662
663         /* get the NVM version info */
664         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
665                            &hw->nvm.version);
666         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
667         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
668         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
669         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
670         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
671                            &oem_hi);
672         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
673                            &oem_lo);
674         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
675
676         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
677                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
678                 goto init_adminq_free_arq;
679         }
680
681         /* pre-emptive resource lock release */
682         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
683         hw->aq.nvm_release_on_done = false;
684         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
685
686 #endif /* PF_DRIVER */
687         ret_code = I40E_SUCCESS;
688
689         /* success! */
690         goto init_adminq_exit;
691
692 #ifdef PF_DRIVER
693 init_adminq_free_arq:
694         i40e_shutdown_arq(hw);
695 #endif
696 init_adminq_free_asq:
697         i40e_shutdown_asq(hw);
698 init_adminq_destroy_spinlocks:
699         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
700         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
701
702 init_adminq_exit:
703         return ret_code;
704 }
705
706 /**
707  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
708  *  @hw: pointer to the hardware structure
709  **/
710 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
711 {
712         enum i40e_status_code ret_code = I40E_SUCCESS;
713
714         if (i40e_check_asq_alive(hw))
715                 i40e_aq_queue_shutdown(hw, true);
716
717         i40e_shutdown_asq(hw);
718         i40e_shutdown_arq(hw);
719
720         /* destroy the spinlocks */
721         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
722         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
723
724         if (hw->nvm_buff.va)
725                 i40e_free_virt_mem(hw, &hw->nvm_buff);
726
727         return ret_code;
728 }
729
730 /**
731  *  i40e_clean_asq - cleans Admin send queue
732  *  @hw: pointer to the hardware structure
733  *
734  *  returns the number of free desc
735  **/
736 u16 i40e_clean_asq(struct i40e_hw *hw)
737 {
738         struct i40e_adminq_ring *asq = &(hw->aq.asq);
739         struct i40e_asq_cmd_details *details;
740         u16 ntc = asq->next_to_clean;
741         struct i40e_aq_desc desc_cb;
742         struct i40e_aq_desc *desc;
743
744         desc = I40E_ADMINQ_DESC(*asq, ntc);
745         details = I40E_ADMINQ_DETAILS(*asq, ntc);
746
747         while (rd32(hw, hw->aq.asq.head) != ntc) {
748                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
749                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
750
751                 if (details->callback) {
752                         I40E_ADMINQ_CALLBACK cb_func =
753                                         (I40E_ADMINQ_CALLBACK)details->callback;
754                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
755                                     I40E_DMA_TO_DMA);
756                         cb_func(hw, &desc_cb);
757                 }
758                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
759                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
760                 ntc++;
761                 if (ntc == asq->count)
762                         ntc = 0;
763                 desc = I40E_ADMINQ_DESC(*asq, ntc);
764                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
765         }
766
767         asq->next_to_clean = ntc;
768
769         return I40E_DESC_UNUSED(asq);
770 }
771
772 /**
773  *  i40e_asq_done - check if FW has processed the Admin Send Queue
774  *  @hw: pointer to the hw struct
775  *
776  *  Returns true if the firmware has processed all descriptors on the
777  *  admin send queue. Returns false if there are still requests pending.
778  **/
779 bool i40e_asq_done(struct i40e_hw *hw)
780 {
781         /* AQ designers suggest use of head for better
782          * timing reliability than DD bit
783          */
784         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
785
786 }
787
788 /**
789  *  i40e_asq_send_command - send command to Admin Queue
790  *  @hw: pointer to the hw struct
791  *  @desc: prefilled descriptor describing the command (non DMA mem)
792  *  @buff: buffer to use for indirect commands
793  *  @buff_size: size of buffer for indirect commands
794  *  @cmd_details: pointer to command details structure
795  *
796  *  This is the main send command driver routine for the Admin Queue send
797  *  queue.  It runs the queue, cleans the queue, etc
798  **/
799 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
800                                 struct i40e_aq_desc *desc,
801                                 void *buff, /* can be NULL */
802                                 u16  buff_size,
803                                 struct i40e_asq_cmd_details *cmd_details)
804 {
805         enum i40e_status_code status = I40E_SUCCESS;
806         struct i40e_dma_mem *dma_buff = NULL;
807         struct i40e_asq_cmd_details *details;
808         struct i40e_aq_desc *desc_on_ring;
809         bool cmd_completed = false;
810         u16  retval = 0;
811         u32  val = 0;
812
813         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
814
815         hw->aq.asq_last_status = I40E_AQ_RC_OK;
816
817         if (hw->aq.asq.count == 0) {
818                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
819                            "AQTX: Admin queue not initialized.\n");
820                 status = I40E_ERR_QUEUE_EMPTY;
821                 goto asq_send_command_error;
822         }
823
824         val = rd32(hw, hw->aq.asq.head);
825         if (val >= hw->aq.num_asq_entries) {
826                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
827                            "AQTX: head overrun at %d\n", val);
828                 status = I40E_ERR_QUEUE_EMPTY;
829                 goto asq_send_command_error;
830         }
831
832         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
833         if (cmd_details) {
834                 i40e_memcpy(details,
835                             cmd_details,
836                             sizeof(struct i40e_asq_cmd_details),
837                             I40E_NONDMA_TO_NONDMA);
838
839                 /* If the cmd_details are defined copy the cookie.  The
840                  * CPU_TO_LE32 is not needed here because the data is ignored
841                  * by the FW, only used by the driver
842                  */
843                 if (details->cookie) {
844                         desc->cookie_high =
845                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
846                         desc->cookie_low =
847                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
848                 }
849         } else {
850                 i40e_memset(details, 0,
851                             sizeof(struct i40e_asq_cmd_details),
852                             I40E_NONDMA_MEM);
853         }
854
855         /* clear requested flags and then set additional flags if defined */
856         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
857         desc->flags |= CPU_TO_LE16(details->flags_ena);
858
859         if (buff_size > hw->aq.asq_buf_size) {
860                 i40e_debug(hw,
861                            I40E_DEBUG_AQ_MESSAGE,
862                            "AQTX: Invalid buffer size: %d.\n",
863                            buff_size);
864                 status = I40E_ERR_INVALID_SIZE;
865                 goto asq_send_command_error;
866         }
867
868         if (details->postpone && !details->async) {
869                 i40e_debug(hw,
870                            I40E_DEBUG_AQ_MESSAGE,
871                            "AQTX: Async flag not set along with postpone flag");
872                 status = I40E_ERR_PARAM;
873                 goto asq_send_command_error;
874         }
875
876         /* call clean and check queue available function to reclaim the
877          * descriptors that were processed by FW, the function returns the
878          * number of desc available
879          */
880         /* the clean function called here could be called in a separate thread
881          * in case of asynchronous completions
882          */
883         if (i40e_clean_asq(hw) == 0) {
884                 i40e_debug(hw,
885                            I40E_DEBUG_AQ_MESSAGE,
886                            "AQTX: Error queue is full.\n");
887                 status = I40E_ERR_ADMIN_QUEUE_FULL;
888                 goto asq_send_command_error;
889         }
890
891         /* initialize the temp desc pointer with the right desc */
892         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
893
894         /* if the desc is available copy the temp desc to the right place */
895         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
896                     I40E_NONDMA_TO_DMA);
897
898         /* if buff is not NULL assume indirect command */
899         if (buff != NULL) {
900                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
901                 /* copy the user buff into the respective DMA buff */
902                 i40e_memcpy(dma_buff->va, buff, buff_size,
903                             I40E_NONDMA_TO_DMA);
904                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
905
906                 /* Update the address values in the desc with the pa value
907                  * for respective buffer
908                  */
909                 desc_on_ring->params.external.addr_high =
910                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
911                 desc_on_ring->params.external.addr_low =
912                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
913         }
914
915         /* bump the tail */
916         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
917         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
918                       buff, buff_size);
919         (hw->aq.asq.next_to_use)++;
920         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
921                 hw->aq.asq.next_to_use = 0;
922         if (!details->postpone)
923                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
924
925         /* if cmd_details are not defined or async flag is not set,
926          * we need to wait for desc write back
927          */
928         if (!details->async && !details->postpone) {
929                 u32 total_delay = 0;
930
931                 do {
932                         /* AQ designers suggest use of head for better
933                          * timing reliability than DD bit
934                          */
935                         if (i40e_asq_done(hw))
936                                 break;
937                         /* ugh! delay while spin_lock */
938                         i40e_msec_delay(1);
939                         total_delay++;
940                 } while (total_delay < hw->aq.asq_cmd_timeout);
941         }
942
943         /* if ready, copy the desc back to temp */
944         if (i40e_asq_done(hw)) {
945                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
946                             I40E_DMA_TO_NONDMA);
947                 if (buff != NULL)
948                         i40e_memcpy(buff, dma_buff->va, buff_size,
949                                     I40E_DMA_TO_NONDMA);
950                 retval = LE16_TO_CPU(desc->retval);
951                 if (retval != 0) {
952                         i40e_debug(hw,
953                                    I40E_DEBUG_AQ_MESSAGE,
954                                    "AQTX: Command completed with error 0x%X.\n",
955                                    retval);
956
957                         /* strip off FW internal code */
958                         retval &= 0xff;
959                 }
960                 cmd_completed = true;
961                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
962                         status = I40E_SUCCESS;
963                 else
964                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
965                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
966         }
967
968         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
969                    "AQTX: desc and buffer writeback:\n");
970         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
971
972         /* save writeback aq if requested */
973         if (details->wb_desc)
974                 i40e_memcpy(details->wb_desc, desc_on_ring,
975                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
976
977         /* update the error if time out occurred */
978         if ((!cmd_completed) &&
979             (!details->async && !details->postpone)) {
980                 i40e_debug(hw,
981                            I40E_DEBUG_AQ_MESSAGE,
982                            "AQTX: Writeback timeout.\n");
983                 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
984         }
985
986 asq_send_command_error:
987         i40e_release_spinlock(&hw->aq.asq_spinlock);
988         return status;
989 }
990
991 /**
992  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
993  *  @desc:     pointer to the temp descriptor (non DMA mem)
994  *  @opcode:   the opcode can be used to decide which flags to turn off or on
995  *
996  *  Fill the desc with default values
997  **/
998 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
999                                        u16 opcode)
1000 {
1001         /* zero out the desc */
1002         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1003                     I40E_NONDMA_MEM);
1004         desc->opcode = CPU_TO_LE16(opcode);
1005         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1006 }
1007
1008 /**
1009  *  i40e_clean_arq_element
1010  *  @hw: pointer to the hw struct
1011  *  @e: event info from the receive descriptor, includes any buffers
1012  *  @pending: number of events that could be left to process
1013  *
1014  *  This function cleans one Admin Receive Queue element and returns
1015  *  the contents through e.  It can also return how many events are
1016  *  left to process through 'pending'
1017  **/
1018 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1019                                              struct i40e_arq_event_info *e,
1020                                              u16 *pending)
1021 {
1022         enum i40e_status_code ret_code = I40E_SUCCESS;
1023         u16 ntc = hw->aq.arq.next_to_clean;
1024         struct i40e_aq_desc *desc;
1025         struct i40e_dma_mem *bi;
1026         u16 desc_idx;
1027         u16 datalen;
1028         u16 flags;
1029         u16 ntu;
1030
1031         /* pre-clean the event info */
1032         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1033
1034         /* take the lock before we start messing with the ring */
1035         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1036
1037         if (hw->aq.arq.count == 0) {
1038                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1039                            "AQRX: Admin queue not initialized.\n");
1040                 ret_code = I40E_ERR_QUEUE_EMPTY;
1041                 goto clean_arq_element_err;
1042         }
1043
1044         /* set next_to_use to head */
1045 #ifdef PF_DRIVER
1046 #ifdef INTEGRATED_VF
1047         if (!i40e_is_vf(hw))
1048                 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1049 #else
1050         ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1051 #endif /* INTEGRATED_VF */
1052 #endif /* PF_DRIVER */
1053 #ifdef VF_DRIVER
1054 #ifdef INTEGRATED_VF
1055         if (i40e_is_vf(hw))
1056                 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1057 #else
1058         ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1059 #endif /* INTEGRATED_VF */
1060 #endif /* VF_DRIVER */
1061         if (ntu == ntc) {
1062                 /* nothing to do - shouldn't need to update ring's values */
1063                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1064                 goto clean_arq_element_out;
1065         }
1066
1067         /* now clean the next descriptor */
1068         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1069         desc_idx = ntc;
1070
1071         flags = LE16_TO_CPU(desc->flags);
1072         if (flags & I40E_AQ_FLAG_ERR) {
1073                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1074                 hw->aq.arq_last_status =
1075                         (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1076                 i40e_debug(hw,
1077                            I40E_DEBUG_AQ_MESSAGE,
1078                            "AQRX: Event received with error 0x%X.\n",
1079                            hw->aq.arq_last_status);
1080         }
1081
1082         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1083                     I40E_DMA_TO_NONDMA);
1084         datalen = LE16_TO_CPU(desc->datalen);
1085         e->msg_len = min(datalen, e->buf_len);
1086         if (e->msg_buf != NULL && (e->msg_len != 0))
1087                 i40e_memcpy(e->msg_buf,
1088                             hw->aq.arq.r.arq_bi[desc_idx].va,
1089                             e->msg_len, I40E_DMA_TO_NONDMA);
1090
1091         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1092         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1093                       hw->aq.arq_buf_size);
1094
1095         /* Restore the original datalen and buffer address in the desc,
1096          * FW updates datalen to indicate the event message
1097          * size
1098          */
1099         bi = &hw->aq.arq.r.arq_bi[ntc];
1100         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1101
1102         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1103         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1104                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1105         desc->datalen = CPU_TO_LE16((u16)bi->size);
1106         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1107         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1108
1109         /* set tail = the last cleaned desc index. */
1110         wr32(hw, hw->aq.arq.tail, ntc);
1111         /* ntc is updated to tail + 1 */
1112         ntc++;
1113         if (ntc == hw->aq.num_arq_entries)
1114                 ntc = 0;
1115         hw->aq.arq.next_to_clean = ntc;
1116         hw->aq.arq.next_to_use = ntu;
1117
1118 #ifdef PF_DRIVER
1119         if (i40e_is_nvm_update_op(&e->desc)) {
1120                 if (hw->aq.nvm_release_on_done) {
1121                         i40e_release_nvm(hw);
1122                         hw->aq.nvm_release_on_done = false;
1123                 }
1124
1125                 switch (hw->nvmupd_state) {
1126                 case I40E_NVMUPD_STATE_INIT_WAIT:
1127                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1128                         break;
1129
1130                 case I40E_NVMUPD_STATE_WRITE_WAIT:
1131                         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1132                         break;
1133
1134                 default:
1135                         break;
1136                 }
1137         }
1138
1139 #endif
1140 clean_arq_element_out:
1141         /* Set pending if needed, unlock and return */
1142         if (pending != NULL)
1143                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1144 clean_arq_element_err:
1145         i40e_release_spinlock(&hw->aq.arq_spinlock);
1146
1147         return ret_code;
1148 }
1149
1150 void i40e_resume_aq(struct i40e_hw *hw)
1151 {
1152         /* Registers are reset after PF reset */
1153         hw->aq.asq.next_to_use = 0;
1154         hw->aq.asq.next_to_clean = 0;
1155
1156         i40e_config_asq_regs(hw);
1157
1158         hw->aq.arq.next_to_use = 0;
1159         hw->aq.arq.next_to_clean = 0;
1160
1161         i40e_config_arq_regs(hw);
1162 }