ixgbe/base: move phy sfp detection in a function
[dpdk.git] / lib / librte_pmd_i40e / i40e / i40e_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2014, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
39
40 #ifndef VF_DRIVER
41 /**
42  * i40e_is_nvm_update_op - return true if this is an NVM update operation
43  * @desc: API request descriptor
44  **/
45 STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
46 {
47         return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48                 desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
49 }
50
51 #endif /* VF_DRIVER */
52 /**
53  *  i40e_adminq_init_regs - Initialize AdminQ registers
54  *  @hw: pointer to the hardware structure
55  *
56  *  This assumes the alloc_asq and alloc_arq functions have already been called
57  **/
58 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
59 {
60         /* set head and tail registers in our local struct */
61         if (hw->mac.type == I40E_MAC_VF) {
62                 hw->aq.asq.tail = I40E_VF_ATQT1;
63                 hw->aq.asq.head = I40E_VF_ATQH1;
64                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
65                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
66                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
67                 hw->aq.arq.tail = I40E_VF_ARQT1;
68                 hw->aq.arq.head = I40E_VF_ARQH1;
69                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
70                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
71                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
72         } else {
73                 hw->aq.asq.tail = I40E_PF_ATQT;
74                 hw->aq.asq.head = I40E_PF_ATQH;
75                 hw->aq.asq.len  = I40E_PF_ATQLEN;
76                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
77                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
78                 hw->aq.arq.tail = I40E_PF_ARQT;
79                 hw->aq.arq.head = I40E_PF_ARQH;
80                 hw->aq.arq.len  = I40E_PF_ARQLEN;
81                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
82                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
83         }
84 }
85
86 /**
87  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
88  *  @hw: pointer to the hardware structure
89  **/
90 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
91 {
92         enum i40e_status_code ret_code;
93
94         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
95                                          i40e_mem_atq_ring,
96                                          (hw->aq.num_asq_entries *
97                                          sizeof(struct i40e_aq_desc)),
98                                          I40E_ADMINQ_DESC_ALIGNMENT);
99         if (ret_code)
100                 return ret_code;
101
102         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
103                                           (hw->aq.num_asq_entries *
104                                           sizeof(struct i40e_asq_cmd_details)));
105         if (ret_code) {
106                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
107                 return ret_code;
108         }
109
110         return ret_code;
111 }
112
113 /**
114  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
115  *  @hw: pointer to the hardware structure
116  **/
117 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
118 {
119         enum i40e_status_code ret_code;
120
121         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
122                                          i40e_mem_arq_ring,
123                                          (hw->aq.num_arq_entries *
124                                          sizeof(struct i40e_aq_desc)),
125                                          I40E_ADMINQ_DESC_ALIGNMENT);
126
127         return ret_code;
128 }
129
130 /**
131  *  i40e_free_adminq_asq - Free Admin Queue send rings
132  *  @hw: pointer to the hardware structure
133  *
134  *  This assumes the posted send buffers have already been cleaned
135  *  and de-allocated
136  **/
137 void i40e_free_adminq_asq(struct i40e_hw *hw)
138 {
139         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
140 }
141
142 /**
143  *  i40e_free_adminq_arq - Free Admin Queue receive rings
144  *  @hw: pointer to the hardware structure
145  *
146  *  This assumes the posted receive buffers have already been cleaned
147  *  and de-allocated
148  **/
149 void i40e_free_adminq_arq(struct i40e_hw *hw)
150 {
151         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
152 }
153
154 /**
155  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
156  *  @hw: pointer to the hardware structure
157  **/
158 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
159 {
160         enum i40e_status_code ret_code;
161         struct i40e_aq_desc *desc;
162         struct i40e_dma_mem *bi;
163         int i;
164
165         /* We'll be allocating the buffer info memory first, then we can
166          * allocate the mapped buffers for the event processing
167          */
168
169         /* buffer_info structures do not need alignment */
170         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
171                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
172         if (ret_code)
173                 goto alloc_arq_bufs;
174         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
175
176         /* allocate the mapped buffers */
177         for (i = 0; i < hw->aq.num_arq_entries; i++) {
178                 bi = &hw->aq.arq.r.arq_bi[i];
179                 ret_code = i40e_allocate_dma_mem(hw, bi,
180                                                  i40e_mem_arq_buf,
181                                                  hw->aq.arq_buf_size,
182                                                  I40E_ADMINQ_DESC_ALIGNMENT);
183                 if (ret_code)
184                         goto unwind_alloc_arq_bufs;
185
186                 /* now configure the descriptors for use */
187                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
188
189                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
190                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
191                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
192                 desc->opcode = 0;
193                 /* This is in accordance with Admin queue design, there is no
194                  * register for buffer size configuration
195                  */
196                 desc->datalen = CPU_TO_LE16((u16)bi->size);
197                 desc->retval = 0;
198                 desc->cookie_high = 0;
199                 desc->cookie_low = 0;
200                 desc->params.external.addr_high =
201                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
202                 desc->params.external.addr_low =
203                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
204                 desc->params.external.param0 = 0;
205                 desc->params.external.param1 = 0;
206         }
207
208 alloc_arq_bufs:
209         return ret_code;
210
211 unwind_alloc_arq_bufs:
212         /* don't try to free the one that failed... */
213         i--;
214         for (; i >= 0; i--)
215                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
216         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
217
218         return ret_code;
219 }
220
221 /**
222  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
223  *  @hw: pointer to the hardware structure
224  **/
225 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
226 {
227         enum i40e_status_code ret_code;
228         struct i40e_dma_mem *bi;
229         int i;
230
231         /* No mapped memory needed yet, just the buffer info structures */
232         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
233                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
234         if (ret_code)
235                 goto alloc_asq_bufs;
236         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
237
238         /* allocate the mapped buffers */
239         for (i = 0; i < hw->aq.num_asq_entries; i++) {
240                 bi = &hw->aq.asq.r.asq_bi[i];
241                 ret_code = i40e_allocate_dma_mem(hw, bi,
242                                                  i40e_mem_asq_buf,
243                                                  hw->aq.asq_buf_size,
244                                                  I40E_ADMINQ_DESC_ALIGNMENT);
245                 if (ret_code)
246                         goto unwind_alloc_asq_bufs;
247         }
248 alloc_asq_bufs:
249         return ret_code;
250
251 unwind_alloc_asq_bufs:
252         /* don't try to free the one that failed... */
253         i--;
254         for (; i >= 0; i--)
255                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
256         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
257
258         return ret_code;
259 }
260
261 /**
262  *  i40e_free_arq_bufs - Free receive queue buffer info elements
263  *  @hw: pointer to the hardware structure
264  **/
265 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
266 {
267         int i;
268
269         /* free descriptors */
270         for (i = 0; i < hw->aq.num_arq_entries; i++)
271                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
272
273         /* free the descriptor memory */
274         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
275
276         /* free the dma header */
277         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
278 }
279
280 /**
281  *  i40e_free_asq_bufs - Free send queue buffer info elements
282  *  @hw: pointer to the hardware structure
283  **/
284 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
285 {
286         int i;
287
288         /* only unmap if the address is non-NULL */
289         for (i = 0; i < hw->aq.num_asq_entries; i++)
290                 if (hw->aq.asq.r.asq_bi[i].pa)
291                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
292
293         /* free the buffer info list */
294         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
295
296         /* free the descriptor memory */
297         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
298
299         /* free the dma header */
300         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
301 }
302
303 /**
304  *  i40e_config_asq_regs - configure ASQ registers
305  *  @hw: pointer to the hardware structure
306  *
307  *  Configure base address and length registers for the transmit queue
308  **/
309 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
310 {
311         enum i40e_status_code ret_code = I40E_SUCCESS;
312         u32 reg = 0;
313
314         /* Clear Head and Tail */
315         wr32(hw, hw->aq.asq.head, 0);
316         wr32(hw, hw->aq.asq.tail, 0);
317
318         /* set starting point */
319         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
320                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
321         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
322         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
323
324         /* Check one register to verify that config was applied */
325         reg = rd32(hw, hw->aq.asq.bal);
326         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
327                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
328
329         return ret_code;
330 }
331
332 /**
333  *  i40e_config_arq_regs - ARQ register configuration
334  *  @hw: pointer to the hardware structure
335  *
336  * Configure base address and length registers for the receive (event queue)
337  **/
338 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
339 {
340         enum i40e_status_code ret_code = I40E_SUCCESS;
341         u32 reg = 0;
342
343         /* Clear Head and Tail */
344         wr32(hw, hw->aq.arq.head, 0);
345         wr32(hw, hw->aq.arq.tail, 0);
346
347         /* set starting point */
348         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
349                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
350         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
351         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
352
353         /* Update tail in the HW to post pre-allocated buffers */
354         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
355
356         /* Check one register to verify that config was applied */
357         reg = rd32(hw, hw->aq.arq.bal);
358         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
359                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
360
361         return ret_code;
362 }
363
364 /**
365  *  i40e_init_asq - main initialization routine for ASQ
366  *  @hw: pointer to the hardware structure
367  *
368  *  This is the main initialization routine for the Admin Send Queue
369  *  Prior to calling this function, drivers *MUST* set the following fields
370  *  in the hw->aq structure:
371  *     - hw->aq.num_asq_entries
372  *     - hw->aq.arq_buf_size
373  *
374  *  Do *NOT* hold the lock when calling this as the memory allocation routines
375  *  called are not going to be atomic context safe
376  **/
377 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
378 {
379         enum i40e_status_code ret_code = I40E_SUCCESS;
380
381         if (hw->aq.asq.count > 0) {
382                 /* queue already initialized */
383                 ret_code = I40E_ERR_NOT_READY;
384                 goto init_adminq_exit;
385         }
386
387         /* verify input for valid configuration */
388         if ((hw->aq.num_asq_entries == 0) ||
389             (hw->aq.asq_buf_size == 0)) {
390                 ret_code = I40E_ERR_CONFIG;
391                 goto init_adminq_exit;
392         }
393
394         hw->aq.asq.next_to_use = 0;
395         hw->aq.asq.next_to_clean = 0;
396         hw->aq.asq.count = hw->aq.num_asq_entries;
397
398         /* allocate the ring memory */
399         ret_code = i40e_alloc_adminq_asq_ring(hw);
400         if (ret_code != I40E_SUCCESS)
401                 goto init_adminq_exit;
402
403         /* allocate buffers in the rings */
404         ret_code = i40e_alloc_asq_bufs(hw);
405         if (ret_code != I40E_SUCCESS)
406                 goto init_adminq_free_rings;
407
408         /* initialize base registers */
409         ret_code = i40e_config_asq_regs(hw);
410         if (ret_code != I40E_SUCCESS)
411                 goto init_adminq_free_rings;
412
413         /* success! */
414         goto init_adminq_exit;
415
416 init_adminq_free_rings:
417         i40e_free_adminq_asq(hw);
418
419 init_adminq_exit:
420         return ret_code;
421 }
422
423 /**
424  *  i40e_init_arq - initialize ARQ
425  *  @hw: pointer to the hardware structure
426  *
427  *  The main initialization routine for the Admin Receive (Event) Queue.
428  *  Prior to calling this function, drivers *MUST* set the following fields
429  *  in the hw->aq structure:
430  *     - hw->aq.num_asq_entries
431  *     - hw->aq.arq_buf_size
432  *
433  *  Do *NOT* hold the lock when calling this as the memory allocation routines
434  *  called are not going to be atomic context safe
435  **/
436 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
437 {
438         enum i40e_status_code ret_code = I40E_SUCCESS;
439
440         if (hw->aq.arq.count > 0) {
441                 /* queue already initialized */
442                 ret_code = I40E_ERR_NOT_READY;
443                 goto init_adminq_exit;
444         }
445
446         /* verify input for valid configuration */
447         if ((hw->aq.num_arq_entries == 0) ||
448             (hw->aq.arq_buf_size == 0)) {
449                 ret_code = I40E_ERR_CONFIG;
450                 goto init_adminq_exit;
451         }
452
453         hw->aq.arq.next_to_use = 0;
454         hw->aq.arq.next_to_clean = 0;
455         hw->aq.arq.count = hw->aq.num_arq_entries;
456
457         /* allocate the ring memory */
458         ret_code = i40e_alloc_adminq_arq_ring(hw);
459         if (ret_code != I40E_SUCCESS)
460                 goto init_adminq_exit;
461
462         /* allocate buffers in the rings */
463         ret_code = i40e_alloc_arq_bufs(hw);
464         if (ret_code != I40E_SUCCESS)
465                 goto init_adminq_free_rings;
466
467         /* initialize base registers */
468         ret_code = i40e_config_arq_regs(hw);
469         if (ret_code != I40E_SUCCESS)
470                 goto init_adminq_free_rings;
471
472         /* success! */
473         goto init_adminq_exit;
474
475 init_adminq_free_rings:
476         i40e_free_adminq_arq(hw);
477
478 init_adminq_exit:
479         return ret_code;
480 }
481
482 /**
483  *  i40e_shutdown_asq - shutdown the ASQ
484  *  @hw: pointer to the hardware structure
485  *
486  *  The main shutdown routine for the Admin Send Queue
487  **/
488 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
489 {
490         enum i40e_status_code ret_code = I40E_SUCCESS;
491
492         if (hw->aq.asq.count == 0)
493                 return I40E_ERR_NOT_READY;
494
495         /* Stop firmware AdminQ processing */
496         wr32(hw, hw->aq.asq.head, 0);
497         wr32(hw, hw->aq.asq.tail, 0);
498         wr32(hw, hw->aq.asq.len, 0);
499         wr32(hw, hw->aq.asq.bal, 0);
500         wr32(hw, hw->aq.asq.bah, 0);
501
502         /* make sure spinlock is available */
503         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
504
505         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
506
507         /* free ring buffers */
508         i40e_free_asq_bufs(hw);
509
510         i40e_release_spinlock(&hw->aq.asq_spinlock);
511
512         return ret_code;
513 }
514
515 /**
516  *  i40e_shutdown_arq - shutdown ARQ
517  *  @hw: pointer to the hardware structure
518  *
519  *  The main shutdown routine for the Admin Receive Queue
520  **/
521 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
522 {
523         enum i40e_status_code ret_code = I40E_SUCCESS;
524
525         if (hw->aq.arq.count == 0)
526                 return I40E_ERR_NOT_READY;
527
528         /* Stop firmware AdminQ processing */
529         wr32(hw, hw->aq.arq.head, 0);
530         wr32(hw, hw->aq.arq.tail, 0);
531         wr32(hw, hw->aq.arq.len, 0);
532         wr32(hw, hw->aq.arq.bal, 0);
533         wr32(hw, hw->aq.arq.bah, 0);
534
535         /* make sure spinlock is available */
536         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
537
538         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
539
540         /* free ring buffers */
541         i40e_free_arq_bufs(hw);
542
543         i40e_release_spinlock(&hw->aq.arq_spinlock);
544
545         return ret_code;
546 }
547
548 /**
549  *  i40e_init_adminq - main initialization routine for Admin Queue
550  *  @hw: pointer to the hardware structure
551  *
552  *  Prior to calling this function, drivers *MUST* set the following fields
553  *  in the hw->aq structure:
554  *     - hw->aq.num_asq_entries
555  *     - hw->aq.num_arq_entries
556  *     - hw->aq.arq_buf_size
557  *     - hw->aq.asq_buf_size
558  **/
559 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
560 {
561         enum i40e_status_code ret_code;
562 #ifndef VF_DRIVER
563         u16 eetrack_lo, eetrack_hi;
564         int retry = 0;
565 #endif
566
567         /* verify input for valid configuration */
568         if ((hw->aq.num_arq_entries == 0) ||
569             (hw->aq.num_asq_entries == 0) ||
570             (hw->aq.arq_buf_size == 0) ||
571             (hw->aq.asq_buf_size == 0)) {
572                 ret_code = I40E_ERR_CONFIG;
573                 goto init_adminq_exit;
574         }
575
576         /* initialize spin locks */
577         i40e_init_spinlock(&hw->aq.asq_spinlock);
578         i40e_init_spinlock(&hw->aq.arq_spinlock);
579
580         /* Set up register offsets */
581         i40e_adminq_init_regs(hw);
582
583         /* setup ASQ command write back timeout */
584         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
585
586         /* allocate the ASQ */
587         ret_code = i40e_init_asq(hw);
588         if (ret_code != I40E_SUCCESS)
589                 goto init_adminq_destroy_spinlocks;
590
591         /* allocate the ARQ */
592         ret_code = i40e_init_arq(hw);
593         if (ret_code != I40E_SUCCESS)
594                 goto init_adminq_free_asq;
595
596 #ifndef VF_DRIVER
597         /* There are some cases where the firmware may not be quite ready
598          * for AdminQ operations, so we retry the AdminQ setup a few times
599          * if we see timeouts in this first AQ call.
600          */
601         do {
602                 ret_code = i40e_aq_get_firmware_version(hw,
603                                                         &hw->aq.fw_maj_ver,
604                                                         &hw->aq.fw_min_ver,
605                                                         &hw->aq.api_maj_ver,
606                                                         &hw->aq.api_min_ver,
607                                                         NULL);
608                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
609                         break;
610                 retry++;
611                 i40e_msec_delay(100);
612                 i40e_resume_aq(hw);
613         } while (retry < 10);
614         if (ret_code != I40E_SUCCESS)
615                 goto init_adminq_free_arq;
616
617         /* get the NVM version info */
618         i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
619         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
620         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
621         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
622
623         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
624                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
625                 goto init_adminq_free_arq;
626         }
627
628         /* pre-emptive resource lock release */
629         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
630         hw->aq.nvm_busy = false;
631
632         ret_code = i40e_aq_set_hmc_resource_profile(hw,
633                                                     I40E_HMC_PROFILE_DEFAULT,
634                                                     0,
635                                                     NULL);
636         ret_code = I40E_SUCCESS;
637
638 #endif /* VF_DRIVER */
639         /* success! */
640         goto init_adminq_exit;
641
642 #ifndef VF_DRIVER
643 init_adminq_free_arq:
644         i40e_shutdown_arq(hw);
645 #endif
646 init_adminq_free_asq:
647         i40e_shutdown_asq(hw);
648 init_adminq_destroy_spinlocks:
649         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
650         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
651
652 init_adminq_exit:
653         return ret_code;
654 }
655
656 /**
657  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
658  *  @hw: pointer to the hardware structure
659  **/
660 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
661 {
662         enum i40e_status_code ret_code = I40E_SUCCESS;
663
664         if (i40e_check_asq_alive(hw))
665                 i40e_aq_queue_shutdown(hw, true);
666
667         i40e_shutdown_asq(hw);
668         i40e_shutdown_arq(hw);
669
670         /* destroy the spinlocks */
671         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
672         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
673
674         return ret_code;
675 }
676
677 /**
678  *  i40e_clean_asq - cleans Admin send queue
679  *  @hw: pointer to the hardware structure
680  *
681  *  returns the number of free desc
682  **/
683 u16 i40e_clean_asq(struct i40e_hw *hw)
684 {
685         struct i40e_adminq_ring *asq = &(hw->aq.asq);
686         struct i40e_asq_cmd_details *details;
687         u16 ntc = asq->next_to_clean;
688         struct i40e_aq_desc desc_cb;
689         struct i40e_aq_desc *desc;
690
691         desc = I40E_ADMINQ_DESC(*asq, ntc);
692         details = I40E_ADMINQ_DETAILS(*asq, ntc);
693 #ifdef DMA_SYNC_SUPPORT
694         I40E_DMA_SYNC(&hw->aq.asq.desc_buf, I40E_SYNC_FORKERNEL);
695 #endif /* DMA_SYNC_SUPPORT */
696         while (rd32(hw, hw->aq.asq.head) != ntc) {
697                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
698                            "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
699                            rd32(hw, hw->aq.asq.head));
700
701                 if (details->callback) {
702                         I40E_ADMINQ_CALLBACK cb_func =
703                                         (I40E_ADMINQ_CALLBACK)details->callback;
704                         i40e_memcpy(&desc_cb, desc,
705                                     sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
706                         cb_func(hw, &desc_cb);
707                 }
708                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
709                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
710                 ntc++;
711                 if (ntc == asq->count)
712                         ntc = 0;
713                 desc = I40E_ADMINQ_DESC(*asq, ntc);
714                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
715         }
716
717         asq->next_to_clean = ntc;
718
719         return I40E_DESC_UNUSED(asq);
720 }
721
722 /**
723  *  i40e_asq_done - check if FW has processed the Admin Send Queue
724  *  @hw: pointer to the hw struct
725  *
726  *  Returns true if the firmware has processed all descriptors on the
727  *  admin send queue. Returns false if there are still requests pending.
728  **/
729 bool i40e_asq_done(struct i40e_hw *hw)
730 {
731         /* AQ designers suggest use of head for better
732          * timing reliability than DD bit
733          */
734         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
735
736 }
737
738 /**
739  *  i40e_asq_send_command - send command to Admin Queue
740  *  @hw: pointer to the hw struct
741  *  @desc: prefilled descriptor describing the command (non DMA mem)
742  *  @buff: buffer to use for indirect commands
743  *  @buff_size: size of buffer for indirect commands
744  *  @cmd_details: pointer to command details structure
745  *
746  *  This is the main send command driver routine for the Admin Queue send
747  *  queue.  It runs the queue, cleans the queue, etc
748  **/
749 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
750                                 struct i40e_aq_desc *desc,
751                                 void *buff, /* can be NULL */
752                                 u16  buff_size,
753                                 struct i40e_asq_cmd_details *cmd_details)
754 {
755         enum i40e_status_code status = I40E_SUCCESS;
756         struct i40e_dma_mem *dma_buff = NULL;
757         struct i40e_asq_cmd_details *details;
758         struct i40e_aq_desc *desc_on_ring;
759         bool cmd_completed = false;
760         u16  retval = 0;
761         u32  val = 0;
762
763         val = rd32(hw, hw->aq.asq.head);
764         if (val >= hw->aq.num_asq_entries) {
765                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
766                            "AQTX: head overrun at %d\n", val);
767                 status = I40E_ERR_QUEUE_EMPTY;
768                 goto asq_send_command_exit;
769         }
770
771         if (hw->aq.asq.count == 0) {
772                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
773                            "AQTX: Admin queue not initialized.\n");
774                 status = I40E_ERR_QUEUE_EMPTY;
775                 goto asq_send_command_exit;
776         }
777
778 #ifndef VF_DRIVER
779         if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
780                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
781                 status = I40E_ERR_NVM;
782                 goto asq_send_command_exit;
783         }
784
785 #endif
786         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
787         if (cmd_details) {
788                 i40e_memcpy(details,
789                             cmd_details,
790                             sizeof(struct i40e_asq_cmd_details),
791                             I40E_NONDMA_TO_NONDMA);
792
793                 /* If the cmd_details are defined copy the cookie.  The
794                  * CPU_TO_LE32 is not needed here because the data is ignored
795                  * by the FW, only used by the driver
796                  */
797                 if (details->cookie) {
798                         desc->cookie_high =
799                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
800                         desc->cookie_low =
801                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
802                 }
803         } else {
804                 i40e_memset(details, 0,
805                             sizeof(struct i40e_asq_cmd_details),
806                             I40E_NONDMA_MEM);
807         }
808
809         /* clear requested flags and then set additional flags if defined */
810         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
811         desc->flags |= CPU_TO_LE16(details->flags_ena);
812
813         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
814
815         if (buff_size > hw->aq.asq_buf_size) {
816                 i40e_debug(hw,
817                            I40E_DEBUG_AQ_MESSAGE,
818                            "AQTX: Invalid buffer size: %d.\n",
819                            buff_size);
820                 status = I40E_ERR_INVALID_SIZE;
821                 goto asq_send_command_error;
822         }
823
824         if (details->postpone && !details->async) {
825                 i40e_debug(hw,
826                            I40E_DEBUG_AQ_MESSAGE,
827                            "AQTX: Async flag not set along with postpone flag");
828                 status = I40E_ERR_PARAM;
829                 goto asq_send_command_error;
830         }
831
832         /* call clean and check queue available function to reclaim the
833          * descriptors that were processed by FW, the function returns the
834          * number of desc available
835          */
836         /* the clean function called here could be called in a separate thread
837          * in case of asynchronous completions
838          */
839         if (i40e_clean_asq(hw) == 0) {
840                 i40e_debug(hw,
841                            I40E_DEBUG_AQ_MESSAGE,
842                            "AQTX: Error queue is full.\n");
843                 status = I40E_ERR_ADMIN_QUEUE_FULL;
844                 goto asq_send_command_error;
845         }
846
847         /* initialize the temp desc pointer with the right desc */
848         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
849
850         /* if the desc is available copy the temp desc to the right place */
851         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
852                     I40E_NONDMA_TO_DMA);
853
854         /* if buff is not NULL assume indirect command */
855         if (buff != NULL) {
856                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
857                 /* copy the user buff into the respective DMA buff */
858                 i40e_memcpy(dma_buff->va, buff, buff_size,
859                             I40E_NONDMA_TO_DMA);
860                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
861
862                 /* Update the address values in the desc with the pa value
863                  * for respective buffer
864                  */
865                 desc_on_ring->params.external.addr_high =
866                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
867                 desc_on_ring->params.external.addr_low =
868                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
869 #ifdef DMA_SYNC_SUPPORT
870                 I40E_DMA_SYNC(dma_buff, I40E_SYNC_FORDEVICE);
871 #endif /* DMA_SYNC_SUPPORT */
872         }
873
874 #ifdef DMA_SYNC_SUPPORT
875         I40E_DMA_SYNC(&hw->aq.asq.desc_buf, I40E_SYNC_FORDEVICE);
876 #endif /* DMA_SYNC_SUPPORT */
877         /* bump the tail */
878         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
879         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
880         (hw->aq.asq.next_to_use)++;
881         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
882                 hw->aq.asq.next_to_use = 0;
883         if (!details->postpone)
884                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
885
886         /* if cmd_details are not defined or async flag is not set,
887          * we need to wait for desc write back
888          */
889         if (!details->async && !details->postpone) {
890                 u32 total_delay = 0;
891                 u32 delay_len = 1;
892
893                 do {
894                         /* AQ designers suggest use of head for better
895                          * timing reliability than DD bit
896                          */
897                         if (i40e_asq_done(hw))
898                                 break;
899                         /* ugh! delay while spin_lock */
900                         i40e_msec_delay(delay_len);
901                         total_delay += delay_len;
902                 } while (total_delay < hw->aq.asq_cmd_timeout);
903         }
904
905         /* if ready, copy the desc back to temp */
906         if (i40e_asq_done(hw)) {
907 #ifdef DMA_SYNC_SUPPORT
908                 I40E_DMA_SYNC(&hw->aq.asq.desc_buf, I40E_SYNC_FORKERNEL);
909 #endif /* DMA_SYNC_SUPPORT */
910                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
911                             I40E_DMA_TO_NONDMA);
912                 if (buff != NULL)
913                         i40e_memcpy(buff, dma_buff->va, buff_size,
914                                     I40E_DMA_TO_NONDMA);
915                 retval = LE16_TO_CPU(desc->retval);
916                 if (retval != 0) {
917                         i40e_debug(hw,
918                                    I40E_DEBUG_AQ_MESSAGE,
919                                    "AQTX: Command completed with error 0x%X.\n",
920                                    retval);
921
922                         /* strip off FW internal code */
923                         retval &= 0xff;
924                 }
925                 cmd_completed = true;
926                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
927                         status = I40E_SUCCESS;
928                 else
929                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
930                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
931         }
932
933         if (LE16_TO_CPU(desc->datalen) == buff_size) {
934                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
935                            "AQTX: desc and buffer writeback:\n");
936                 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
937         }
938
939         /* update the error if time out occurred */
940         if ((!cmd_completed) &&
941             (!details->async && !details->postpone)) {
942                 i40e_debug(hw,
943                            I40E_DEBUG_AQ_MESSAGE,
944                            "AQTX: Writeback timeout.\n");
945                 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
946         }
947
948 #ifndef VF_DRIVER
949         if (!status && i40e_is_nvm_update_op(desc))
950                 hw->aq.nvm_busy = true;
951
952 #endif /* VF_DRIVER */
953 asq_send_command_error:
954         i40e_release_spinlock(&hw->aq.asq_spinlock);
955 asq_send_command_exit:
956         return status;
957 }
958
959 /**
960  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
961  *  @desc:     pointer to the temp descriptor (non DMA mem)
962  *  @opcode:   the opcode can be used to decide which flags to turn off or on
963  *
964  *  Fill the desc with default values
965  **/
966 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
967                                        u16 opcode)
968 {
969         /* zero out the desc */
970         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
971                     I40E_NONDMA_MEM);
972         desc->opcode = CPU_TO_LE16(opcode);
973         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
974 }
975
976 /**
977  *  i40e_clean_arq_element
978  *  @hw: pointer to the hw struct
979  *  @e: event info from the receive descriptor, includes any buffers
980  *  @pending: number of events that could be left to process
981  *
982  *  This function cleans one Admin Receive Queue element and returns
983  *  the contents through e.  It can also return how many events are
984  *  left to process through 'pending'
985  **/
986 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
987                                              struct i40e_arq_event_info *e,
988                                              u16 *pending)
989 {
990         enum i40e_status_code ret_code = I40E_SUCCESS;
991         u16 ntc = hw->aq.arq.next_to_clean;
992         struct i40e_aq_desc *desc;
993         struct i40e_dma_mem *bi;
994         u16 desc_idx;
995         u16 datalen;
996         u16 flags;
997         u16 ntu;
998 #ifdef DMA_SYNC_SUPPORT
999         I40E_DMA_SYNC(&hw->aq.arq.desc_buf, I40E_SYNC_FORKERNEL);
1000 #endif /* DMA_SYNC_SUPPORT */
1001
1002         /* take the lock before we start messing with the ring */
1003         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1004
1005         /* set next_to_use to head */
1006         ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1007         if (ntu == ntc) {
1008                 /* nothing to do - shouldn't need to update ring's values */
1009                 i40e_debug(hw,
1010                            I40E_DEBUG_AQ_MESSAGE,
1011                            "AQRX: Queue is empty.\n");
1012                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1013                 goto clean_arq_element_out;
1014         }
1015
1016         /* now clean the next descriptor */
1017         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1018         desc_idx = ntc;
1019 #ifdef DMA_SYNC_SUPPORT
1020         I40E_DMA_SYNC(&hw->aq.arq.r.arq_bi[desc_idx], I40E_SYNC_FORKERNEL);
1021 #endif /* DMA_SYNC_SUPPORT */
1022
1023         flags = LE16_TO_CPU(desc->flags);
1024         if (flags & I40E_AQ_FLAG_ERR) {
1025                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1026                 hw->aq.arq_last_status =
1027                         (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1028                 i40e_debug(hw,
1029                            I40E_DEBUG_AQ_MESSAGE,
1030                            "AQRX: Event received with error 0x%X.\n",
1031                            hw->aq.arq_last_status);
1032         } else {
1033                 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1034                             I40E_DMA_TO_NONDMA);
1035                 datalen = LE16_TO_CPU(desc->datalen);
1036                 e->msg_size = min(datalen, e->msg_size);
1037                 if (e->msg_buf != NULL && (e->msg_size != 0))
1038                         i40e_memcpy(e->msg_buf,
1039                                     hw->aq.arq.r.arq_bi[desc_idx].va,
1040                                     e->msg_size, I40E_DMA_TO_NONDMA);
1041         }
1042
1043         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1044         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
1045
1046         /* Restore the original datalen and buffer address in the desc,
1047          * FW updates datalen to indicate the event message
1048          * size
1049          */
1050         bi = &hw->aq.arq.r.arq_bi[ntc];
1051         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1052
1053         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1054         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1055                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1056         desc->datalen = CPU_TO_LE16((u16)bi->size);
1057         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1058         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1059
1060         /* set tail = the last cleaned desc index. */
1061         wr32(hw, hw->aq.arq.tail, ntc);
1062         /* ntc is updated to tail + 1 */
1063         ntc++;
1064         if (ntc == hw->aq.num_arq_entries)
1065                 ntc = 0;
1066         hw->aq.arq.next_to_clean = ntc;
1067         hw->aq.arq.next_to_use = ntu;
1068
1069 clean_arq_element_out:
1070         /* Set pending if needed, unlock and return */
1071         if (pending != NULL)
1072                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1073         i40e_release_spinlock(&hw->aq.arq_spinlock);
1074
1075 #ifndef VF_DRIVER
1076         if (i40e_is_nvm_update_op(&e->desc)) {
1077                 hw->aq.nvm_busy = false;
1078                 if (hw->aq.nvm_release_on_done) {
1079                         i40e_release_nvm(hw);
1080                         hw->aq.nvm_release_on_done = false;
1081                 }
1082         }
1083
1084 #endif
1085         return ret_code;
1086 }
1087
1088 void i40e_resume_aq(struct i40e_hw *hw)
1089 {
1090         /* Registers are reset after PF reset */
1091         hw->aq.asq.next_to_use = 0;
1092         hw->aq.asq.next_to_clean = 0;
1093
1094 #if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
1095 #error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
1096 #endif
1097         i40e_config_asq_regs(hw);
1098
1099         hw->aq.arq.next_to_use = 0;
1100         hw->aq.arq.next_to_clean = 0;
1101
1102         i40e_config_arq_regs(hw);
1103 }