ba7ef42de090e04b90b3273473f130111db5e5d6
[dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
39
40 /**
41  *  i40e_adminq_init_regs - Initialize AdminQ registers
42  *  @hw: pointer to the hardware structure
43  *
44  *  This assumes the alloc_asq and alloc_arq functions have already been called
45  **/
46 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
47 {
48         /* set head and tail registers in our local struct */
49         if (i40e_is_vf(hw)) {
50                 hw->aq.asq.tail = I40E_VF_ATQT1;
51                 hw->aq.asq.head = I40E_VF_ATQH1;
52                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
53                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
54                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
55                 hw->aq.arq.tail = I40E_VF_ARQT1;
56                 hw->aq.arq.head = I40E_VF_ARQH1;
57                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
58                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
59                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
60 #ifdef PF_DRIVER
61         } else {
62                 hw->aq.asq.tail = I40E_PF_ATQT;
63                 hw->aq.asq.head = I40E_PF_ATQH;
64                 hw->aq.asq.len  = I40E_PF_ATQLEN;
65                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
66                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
67                 hw->aq.arq.tail = I40E_PF_ARQT;
68                 hw->aq.arq.head = I40E_PF_ARQH;
69                 hw->aq.arq.len  = I40E_PF_ARQLEN;
70                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
71                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
72 #endif
73         }
74 }
75
76 /**
77  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
78  *  @hw: pointer to the hardware structure
79  **/
80 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
81 {
82         enum i40e_status_code ret_code;
83
84         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
85                                          i40e_mem_atq_ring,
86                                          (hw->aq.num_asq_entries *
87                                          sizeof(struct i40e_aq_desc)),
88                                          I40E_ADMINQ_DESC_ALIGNMENT);
89         if (ret_code)
90                 return ret_code;
91
92         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
93                                           (hw->aq.num_asq_entries *
94                                           sizeof(struct i40e_asq_cmd_details)));
95         if (ret_code) {
96                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
97                 return ret_code;
98         }
99
100         return ret_code;
101 }
102
103 /**
104  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
105  *  @hw: pointer to the hardware structure
106  **/
107 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
108 {
109         enum i40e_status_code ret_code;
110
111         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
112                                          i40e_mem_arq_ring,
113                                          (hw->aq.num_arq_entries *
114                                          sizeof(struct i40e_aq_desc)),
115                                          I40E_ADMINQ_DESC_ALIGNMENT);
116
117         return ret_code;
118 }
119
120 /**
121  *  i40e_free_adminq_asq - Free Admin Queue send rings
122  *  @hw: pointer to the hardware structure
123  *
124  *  This assumes the posted send buffers have already been cleaned
125  *  and de-allocated
126  **/
127 void i40e_free_adminq_asq(struct i40e_hw *hw)
128 {
129         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
130 }
131
132 /**
133  *  i40e_free_adminq_arq - Free Admin Queue receive rings
134  *  @hw: pointer to the hardware structure
135  *
136  *  This assumes the posted receive buffers have already been cleaned
137  *  and de-allocated
138  **/
139 void i40e_free_adminq_arq(struct i40e_hw *hw)
140 {
141         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
142 }
143
144 /**
145  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
146  *  @hw: pointer to the hardware structure
147  **/
148 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
149 {
150         enum i40e_status_code ret_code;
151         struct i40e_aq_desc *desc;
152         struct i40e_dma_mem *bi;
153         int i;
154
155         /* We'll be allocating the buffer info memory first, then we can
156          * allocate the mapped buffers for the event processing
157          */
158
159         /* buffer_info structures do not need alignment */
160         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
161                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
162         if (ret_code)
163                 goto alloc_arq_bufs;
164         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
165
166         /* allocate the mapped buffers */
167         for (i = 0; i < hw->aq.num_arq_entries; i++) {
168                 bi = &hw->aq.arq.r.arq_bi[i];
169                 ret_code = i40e_allocate_dma_mem(hw, bi,
170                                                  i40e_mem_arq_buf,
171                                                  hw->aq.arq_buf_size,
172                                                  I40E_ADMINQ_DESC_ALIGNMENT);
173                 if (ret_code)
174                         goto unwind_alloc_arq_bufs;
175
176                 /* now configure the descriptors for use */
177                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
178
179                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
180                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
181                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
182                 desc->opcode = 0;
183                 /* This is in accordance with Admin queue design, there is no
184                  * register for buffer size configuration
185                  */
186                 desc->datalen = CPU_TO_LE16((u16)bi->size);
187                 desc->retval = 0;
188                 desc->cookie_high = 0;
189                 desc->cookie_low = 0;
190                 desc->params.external.addr_high =
191                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
192                 desc->params.external.addr_low =
193                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
194                 desc->params.external.param0 = 0;
195                 desc->params.external.param1 = 0;
196         }
197
198 alloc_arq_bufs:
199         return ret_code;
200
201 unwind_alloc_arq_bufs:
202         /* don't try to free the one that failed... */
203         i--;
204         for (; i >= 0; i--)
205                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
206         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
207
208         return ret_code;
209 }
210
211 /**
212  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
213  *  @hw: pointer to the hardware structure
214  **/
215 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
216 {
217         enum i40e_status_code ret_code;
218         struct i40e_dma_mem *bi;
219         int i;
220
221         /* No mapped memory needed yet, just the buffer info structures */
222         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
223                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
224         if (ret_code)
225                 goto alloc_asq_bufs;
226         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
227
228         /* allocate the mapped buffers */
229         for (i = 0; i < hw->aq.num_asq_entries; i++) {
230                 bi = &hw->aq.asq.r.asq_bi[i];
231                 ret_code = i40e_allocate_dma_mem(hw, bi,
232                                                  i40e_mem_asq_buf,
233                                                  hw->aq.asq_buf_size,
234                                                  I40E_ADMINQ_DESC_ALIGNMENT);
235                 if (ret_code)
236                         goto unwind_alloc_asq_bufs;
237         }
238 alloc_asq_bufs:
239         return ret_code;
240
241 unwind_alloc_asq_bufs:
242         /* don't try to free the one that failed... */
243         i--;
244         for (; i >= 0; i--)
245                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
246         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
247
248         return ret_code;
249 }
250
251 /**
252  *  i40e_free_arq_bufs - Free receive queue buffer info elements
253  *  @hw: pointer to the hardware structure
254  **/
255 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
256 {
257         int i;
258
259         /* free descriptors */
260         for (i = 0; i < hw->aq.num_arq_entries; i++)
261                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
262
263         /* free the descriptor memory */
264         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
265
266         /* free the dma header */
267         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
268 }
269
270 /**
271  *  i40e_free_asq_bufs - Free send queue buffer info elements
272  *  @hw: pointer to the hardware structure
273  **/
274 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
275 {
276         int i;
277
278         /* only unmap if the address is non-NULL */
279         for (i = 0; i < hw->aq.num_asq_entries; i++)
280                 if (hw->aq.asq.r.asq_bi[i].pa)
281                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
282
283         /* free the buffer info list */
284         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
285
286         /* free the descriptor memory */
287         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
288
289         /* free the dma header */
290         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
291 }
292
293 /**
294  *  i40e_config_asq_regs - configure ASQ registers
295  *  @hw: pointer to the hardware structure
296  *
297  *  Configure base address and length registers for the transmit queue
298  **/
299 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
300 {
301         enum i40e_status_code ret_code = I40E_SUCCESS;
302         u32 reg = 0;
303
304         /* Clear Head and Tail */
305         wr32(hw, hw->aq.asq.head, 0);
306         wr32(hw, hw->aq.asq.tail, 0);
307
308         /* set starting point */
309 #ifdef PF_DRIVER
310 #ifdef INTEGRATED_VF
311         if (!i40e_is_vf(hw))
312                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
313                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
314 #else
315         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
316                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
317 #endif /* INTEGRATED_VF */
318 #endif /* PF_DRIVER */
319 #ifdef VF_DRIVER
320 #ifdef INTEGRATED_VF
321         if (i40e_is_vf(hw))
322                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
323                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
324 #else
325         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
326                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
327 #endif /* INTEGRATED_VF */
328 #endif /* VF_DRIVER */
329         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
330         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
331
332         /* Check one register to verify that config was applied */
333         reg = rd32(hw, hw->aq.asq.bal);
334         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
335                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
336
337         return ret_code;
338 }
339
340 /**
341  *  i40e_config_arq_regs - ARQ register configuration
342  *  @hw: pointer to the hardware structure
343  *
344  * Configure base address and length registers for the receive (event queue)
345  **/
346 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
347 {
348         enum i40e_status_code ret_code = I40E_SUCCESS;
349         u32 reg = 0;
350
351         /* Clear Head and Tail */
352         wr32(hw, hw->aq.arq.head, 0);
353         wr32(hw, hw->aq.arq.tail, 0);
354
355         /* set starting point */
356 #ifdef PF_DRIVER
357 #ifdef INTEGRATED_VF
358         if (!i40e_is_vf(hw))
359                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
360                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
361 #else
362         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
363                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
364 #endif /* INTEGRATED_VF */
365 #endif /* PF_DRIVER */
366 #ifdef VF_DRIVER
367 #ifdef INTEGRATED_VF
368         if (i40e_is_vf(hw))
369                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
370                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
371 #else
372         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
373                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
374 #endif /* INTEGRATED_VF */
375 #endif /* VF_DRIVER */
376         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
377         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
378
379         /* Update tail in the HW to post pre-allocated buffers */
380         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
381
382         /* Check one register to verify that config was applied */
383         reg = rd32(hw, hw->aq.arq.bal);
384         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
385                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
386
387         return ret_code;
388 }
389
390 /**
391  *  i40e_init_asq - main initialization routine for ASQ
392  *  @hw: pointer to the hardware structure
393  *
394  *  This is the main initialization routine for the Admin Send Queue
395  *  Prior to calling this function, drivers *MUST* set the following fields
396  *  in the hw->aq structure:
397  *     - hw->aq.num_asq_entries
398  *     - hw->aq.arq_buf_size
399  *
400  *  Do *NOT* hold the lock when calling this as the memory allocation routines
401  *  called are not going to be atomic context safe
402  **/
403 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
404 {
405         enum i40e_status_code ret_code = I40E_SUCCESS;
406
407         if (hw->aq.asq.count > 0) {
408                 /* queue already initialized */
409                 ret_code = I40E_ERR_NOT_READY;
410                 goto init_adminq_exit;
411         }
412
413         /* verify input for valid configuration */
414         if ((hw->aq.num_asq_entries == 0) ||
415             (hw->aq.asq_buf_size == 0)) {
416                 ret_code = I40E_ERR_CONFIG;
417                 goto init_adminq_exit;
418         }
419
420         hw->aq.asq.next_to_use = 0;
421         hw->aq.asq.next_to_clean = 0;
422
423         /* allocate the ring memory */
424         ret_code = i40e_alloc_adminq_asq_ring(hw);
425         if (ret_code != I40E_SUCCESS)
426                 goto init_adminq_exit;
427
428         /* allocate buffers in the rings */
429         ret_code = i40e_alloc_asq_bufs(hw);
430         if (ret_code != I40E_SUCCESS)
431                 goto init_adminq_free_rings;
432
433         /* initialize base registers */
434         ret_code = i40e_config_asq_regs(hw);
435         if (ret_code != I40E_SUCCESS)
436                 goto init_adminq_free_rings;
437
438         /* success! */
439         hw->aq.asq.count = hw->aq.num_asq_entries;
440         goto init_adminq_exit;
441
442 init_adminq_free_rings:
443         i40e_free_adminq_asq(hw);
444
445 init_adminq_exit:
446         return ret_code;
447 }
448
449 /**
450  *  i40e_init_arq - initialize ARQ
451  *  @hw: pointer to the hardware structure
452  *
453  *  The main initialization routine for the Admin Receive (Event) Queue.
454  *  Prior to calling this function, drivers *MUST* set the following fields
455  *  in the hw->aq structure:
456  *     - hw->aq.num_asq_entries
457  *     - hw->aq.arq_buf_size
458  *
459  *  Do *NOT* hold the lock when calling this as the memory allocation routines
460  *  called are not going to be atomic context safe
461  **/
462 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
463 {
464         enum i40e_status_code ret_code = I40E_SUCCESS;
465
466         if (hw->aq.arq.count > 0) {
467                 /* queue already initialized */
468                 ret_code = I40E_ERR_NOT_READY;
469                 goto init_adminq_exit;
470         }
471
472         /* verify input for valid configuration */
473         if ((hw->aq.num_arq_entries == 0) ||
474             (hw->aq.arq_buf_size == 0)) {
475                 ret_code = I40E_ERR_CONFIG;
476                 goto init_adminq_exit;
477         }
478
479         hw->aq.arq.next_to_use = 0;
480         hw->aq.arq.next_to_clean = 0;
481
482         /* allocate the ring memory */
483         ret_code = i40e_alloc_adminq_arq_ring(hw);
484         if (ret_code != I40E_SUCCESS)
485                 goto init_adminq_exit;
486
487         /* allocate buffers in the rings */
488         ret_code = i40e_alloc_arq_bufs(hw);
489         if (ret_code != I40E_SUCCESS)
490                 goto init_adminq_free_rings;
491
492         /* initialize base registers */
493         ret_code = i40e_config_arq_regs(hw);
494         if (ret_code != I40E_SUCCESS)
495                 goto init_adminq_free_rings;
496
497         /* success! */
498         hw->aq.arq.count = hw->aq.num_arq_entries;
499         goto init_adminq_exit;
500
501 init_adminq_free_rings:
502         i40e_free_adminq_arq(hw);
503
504 init_adminq_exit:
505         return ret_code;
506 }
507
508 /**
509  *  i40e_shutdown_asq - shutdown the ASQ
510  *  @hw: pointer to the hardware structure
511  *
512  *  The main shutdown routine for the Admin Send Queue
513  **/
514 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
515 {
516         enum i40e_status_code ret_code = I40E_SUCCESS;
517
518         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
519
520         if (hw->aq.asq.count == 0) {
521                 ret_code = I40E_ERR_NOT_READY;
522                 goto shutdown_asq_out;
523         }
524
525         /* Stop firmware AdminQ processing */
526         wr32(hw, hw->aq.asq.head, 0);
527         wr32(hw, hw->aq.asq.tail, 0);
528         wr32(hw, hw->aq.asq.len, 0);
529         wr32(hw, hw->aq.asq.bal, 0);
530         wr32(hw, hw->aq.asq.bah, 0);
531
532         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
533
534         /* free ring buffers */
535         i40e_free_asq_bufs(hw);
536
537 shutdown_asq_out:
538         i40e_release_spinlock(&hw->aq.asq_spinlock);
539         return ret_code;
540 }
541
542 /**
543  *  i40e_shutdown_arq - shutdown ARQ
544  *  @hw: pointer to the hardware structure
545  *
546  *  The main shutdown routine for the Admin Receive Queue
547  **/
548 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
549 {
550         enum i40e_status_code ret_code = I40E_SUCCESS;
551
552         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
553
554         if (hw->aq.arq.count == 0) {
555                 ret_code = I40E_ERR_NOT_READY;
556                 goto shutdown_arq_out;
557         }
558
559         /* Stop firmware AdminQ processing */
560         wr32(hw, hw->aq.arq.head, 0);
561         wr32(hw, hw->aq.arq.tail, 0);
562         wr32(hw, hw->aq.arq.len, 0);
563         wr32(hw, hw->aq.arq.bal, 0);
564         wr32(hw, hw->aq.arq.bah, 0);
565
566         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
567
568         /* free ring buffers */
569         i40e_free_arq_bufs(hw);
570
571 shutdown_arq_out:
572         i40e_release_spinlock(&hw->aq.arq_spinlock);
573         return ret_code;
574 }
575
576 /**
577  *  i40e_init_adminq - main initialization routine for Admin Queue
578  *  @hw: pointer to the hardware structure
579  *
580  *  Prior to calling this function, drivers *MUST* set the following fields
581  *  in the hw->aq structure:
582  *     - hw->aq.num_asq_entries
583  *     - hw->aq.num_arq_entries
584  *     - hw->aq.arq_buf_size
585  *     - hw->aq.asq_buf_size
586  **/
587 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
588 {
589         enum i40e_status_code ret_code;
590 #ifdef PF_DRIVER
591         u16 eetrack_lo, eetrack_hi;
592         u16 cfg_ptr, oem_hi, oem_lo;
593         int retry = 0;
594 #endif
595         /* verify input for valid configuration */
596         if ((hw->aq.num_arq_entries == 0) ||
597             (hw->aq.num_asq_entries == 0) ||
598             (hw->aq.arq_buf_size == 0) ||
599             (hw->aq.asq_buf_size == 0)) {
600                 ret_code = I40E_ERR_CONFIG;
601                 goto init_adminq_exit;
602         }
603
604         /* initialize spin locks */
605         i40e_init_spinlock(&hw->aq.asq_spinlock);
606         i40e_init_spinlock(&hw->aq.arq_spinlock);
607
608         /* Set up register offsets */
609         i40e_adminq_init_regs(hw);
610
611         /* setup ASQ command write back timeout */
612         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
613
614         /* allocate the ASQ */
615         ret_code = i40e_init_asq(hw);
616         if (ret_code != I40E_SUCCESS)
617                 goto init_adminq_destroy_spinlocks;
618
619         /* allocate the ARQ */
620         ret_code = i40e_init_arq(hw);
621         if (ret_code != I40E_SUCCESS)
622                 goto init_adminq_free_asq;
623
624 #ifdef PF_DRIVER
625 #ifdef INTEGRATED_VF
626         /* VF has no need of firmware */
627         if (i40e_is_vf(hw))
628                 goto init_adminq_exit;
629 #endif
630         /* There are some cases where the firmware may not be quite ready
631          * for AdminQ operations, so we retry the AdminQ setup a few times
632          * if we see timeouts in this first AQ call.
633          */
634         do {
635                 ret_code = i40e_aq_get_firmware_version(hw,
636                                                         &hw->aq.fw_maj_ver,
637                                                         &hw->aq.fw_min_ver,
638                                                         &hw->aq.fw_build,
639                                                         &hw->aq.api_maj_ver,
640                                                         &hw->aq.api_min_ver,
641                                                         NULL);
642                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
643                         break;
644                 retry++;
645                 i40e_msec_delay(100);
646                 i40e_resume_aq(hw);
647         } while (retry < 10);
648         if (ret_code != I40E_SUCCESS)
649                 goto init_adminq_free_arq;
650
651         /* get the NVM version info */
652         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
653                            &hw->nvm.version);
654         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
655         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
656         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
657         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
658         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
659                            &oem_hi);
660         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
661                            &oem_lo);
662         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
663
664         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
665                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
666                 goto init_adminq_free_arq;
667         }
668
669         /* pre-emptive resource lock release */
670         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
671         hw->nvm_release_on_done = false;
672         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
673
674 #endif /* PF_DRIVER */
675         ret_code = I40E_SUCCESS;
676
677         /* success! */
678         goto init_adminq_exit;
679
680 #ifdef PF_DRIVER
681 init_adminq_free_arq:
682         i40e_shutdown_arq(hw);
683 #endif
684 init_adminq_free_asq:
685         i40e_shutdown_asq(hw);
686 init_adminq_destroy_spinlocks:
687         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
688         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
689
690 init_adminq_exit:
691         return ret_code;
692 }
693
694 /**
695  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
696  *  @hw: pointer to the hardware structure
697  **/
698 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
699 {
700         enum i40e_status_code ret_code = I40E_SUCCESS;
701
702         if (i40e_check_asq_alive(hw))
703                 i40e_aq_queue_shutdown(hw, true);
704
705         i40e_shutdown_asq(hw);
706         i40e_shutdown_arq(hw);
707
708         /* destroy the spinlocks */
709         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
710         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
711
712         if (hw->nvm_buff.va)
713                 i40e_free_virt_mem(hw, &hw->nvm_buff);
714
715         return ret_code;
716 }
717
718 /**
719  *  i40e_clean_asq - cleans Admin send queue
720  *  @hw: pointer to the hardware structure
721  *
722  *  returns the number of free desc
723  **/
724 u16 i40e_clean_asq(struct i40e_hw *hw)
725 {
726         struct i40e_adminq_ring *asq = &(hw->aq.asq);
727         struct i40e_asq_cmd_details *details;
728         u16 ntc = asq->next_to_clean;
729         struct i40e_aq_desc desc_cb;
730         struct i40e_aq_desc *desc;
731
732         desc = I40E_ADMINQ_DESC(*asq, ntc);
733         details = I40E_ADMINQ_DETAILS(*asq, ntc);
734
735         while (rd32(hw, hw->aq.asq.head) != ntc) {
736                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
737                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
738
739                 if (details->callback) {
740                         I40E_ADMINQ_CALLBACK cb_func =
741                                         (I40E_ADMINQ_CALLBACK)details->callback;
742                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
743                                     I40E_DMA_TO_DMA);
744                         cb_func(hw, &desc_cb);
745                 }
746                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
747                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
748                 ntc++;
749                 if (ntc == asq->count)
750                         ntc = 0;
751                 desc = I40E_ADMINQ_DESC(*asq, ntc);
752                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
753         }
754
755         asq->next_to_clean = ntc;
756
757         return I40E_DESC_UNUSED(asq);
758 }
759
760 /**
761  *  i40e_asq_done - check if FW has processed the Admin Send Queue
762  *  @hw: pointer to the hw struct
763  *
764  *  Returns true if the firmware has processed all descriptors on the
765  *  admin send queue. Returns false if there are still requests pending.
766  **/
767 bool i40e_asq_done(struct i40e_hw *hw)
768 {
769         /* AQ designers suggest use of head for better
770          * timing reliability than DD bit
771          */
772         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
773
774 }
775
776 /**
777  *  i40e_asq_send_command - send command to Admin Queue
778  *  @hw: pointer to the hw struct
779  *  @desc: prefilled descriptor describing the command (non DMA mem)
780  *  @buff: buffer to use for indirect commands
781  *  @buff_size: size of buffer for indirect commands
782  *  @cmd_details: pointer to command details structure
783  *
784  *  This is the main send command driver routine for the Admin Queue send
785  *  queue.  It runs the queue, cleans the queue, etc
786  **/
787 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
788                                 struct i40e_aq_desc *desc,
789                                 void *buff, /* can be NULL */
790                                 u16  buff_size,
791                                 struct i40e_asq_cmd_details *cmd_details)
792 {
793         enum i40e_status_code status = I40E_SUCCESS;
794         struct i40e_dma_mem *dma_buff = NULL;
795         struct i40e_asq_cmd_details *details;
796         struct i40e_aq_desc *desc_on_ring;
797         bool cmd_completed = false;
798         u16  retval = 0;
799         u32  val = 0;
800
801         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
802
803         hw->aq.asq_last_status = I40E_AQ_RC_OK;
804
805         if (hw->aq.asq.count == 0) {
806                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
807                            "AQTX: Admin queue not initialized.\n");
808                 status = I40E_ERR_QUEUE_EMPTY;
809                 goto asq_send_command_error;
810         }
811
812         val = rd32(hw, hw->aq.asq.head);
813         if (val >= hw->aq.num_asq_entries) {
814                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
815                            "AQTX: head overrun at %d\n", val);
816                 status = I40E_ERR_QUEUE_EMPTY;
817                 goto asq_send_command_error;
818         }
819
820         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
821         if (cmd_details) {
822                 i40e_memcpy(details,
823                             cmd_details,
824                             sizeof(struct i40e_asq_cmd_details),
825                             I40E_NONDMA_TO_NONDMA);
826
827                 /* If the cmd_details are defined copy the cookie.  The
828                  * CPU_TO_LE32 is not needed here because the data is ignored
829                  * by the FW, only used by the driver
830                  */
831                 if (details->cookie) {
832                         desc->cookie_high =
833                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
834                         desc->cookie_low =
835                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
836                 }
837         } else {
838                 i40e_memset(details, 0,
839                             sizeof(struct i40e_asq_cmd_details),
840                             I40E_NONDMA_MEM);
841         }
842
843         /* clear requested flags and then set additional flags if defined */
844         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
845         desc->flags |= CPU_TO_LE16(details->flags_ena);
846
847         if (buff_size > hw->aq.asq_buf_size) {
848                 i40e_debug(hw,
849                            I40E_DEBUG_AQ_MESSAGE,
850                            "AQTX: Invalid buffer size: %d.\n",
851                            buff_size);
852                 status = I40E_ERR_INVALID_SIZE;
853                 goto asq_send_command_error;
854         }
855
856         if (details->postpone && !details->async) {
857                 i40e_debug(hw,
858                            I40E_DEBUG_AQ_MESSAGE,
859                            "AQTX: Async flag not set along with postpone flag");
860                 status = I40E_ERR_PARAM;
861                 goto asq_send_command_error;
862         }
863
864         /* call clean and check queue available function to reclaim the
865          * descriptors that were processed by FW, the function returns the
866          * number of desc available
867          */
868         /* the clean function called here could be called in a separate thread
869          * in case of asynchronous completions
870          */
871         if (i40e_clean_asq(hw) == 0) {
872                 i40e_debug(hw,
873                            I40E_DEBUG_AQ_MESSAGE,
874                            "AQTX: Error queue is full.\n");
875                 status = I40E_ERR_ADMIN_QUEUE_FULL;
876                 goto asq_send_command_error;
877         }
878
879         /* initialize the temp desc pointer with the right desc */
880         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
881
882         /* if the desc is available copy the temp desc to the right place */
883         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
884                     I40E_NONDMA_TO_DMA);
885
886         /* if buff is not NULL assume indirect command */
887         if (buff != NULL) {
888                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
889                 /* copy the user buff into the respective DMA buff */
890                 i40e_memcpy(dma_buff->va, buff, buff_size,
891                             I40E_NONDMA_TO_DMA);
892                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
893
894                 /* Update the address values in the desc with the pa value
895                  * for respective buffer
896                  */
897                 desc_on_ring->params.external.addr_high =
898                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
899                 desc_on_ring->params.external.addr_low =
900                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
901         }
902
903         /* bump the tail */
904         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
905         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
906                       buff, buff_size);
907         (hw->aq.asq.next_to_use)++;
908         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
909                 hw->aq.asq.next_to_use = 0;
910         if (!details->postpone)
911                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
912
913         /* if cmd_details are not defined or async flag is not set,
914          * we need to wait for desc write back
915          */
916         if (!details->async && !details->postpone) {
917                 u32 total_delay = 0;
918
919                 do {
920                         /* AQ designers suggest use of head for better
921                          * timing reliability than DD bit
922                          */
923                         if (i40e_asq_done(hw))
924                                 break;
925                         /* ugh! delay while spin_lock */
926                         i40e_msec_delay(1);
927                         total_delay++;
928                 } while (total_delay < hw->aq.asq_cmd_timeout);
929         }
930
931         /* if ready, copy the desc back to temp */
932         if (i40e_asq_done(hw)) {
933                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
934                             I40E_DMA_TO_NONDMA);
935                 if (buff != NULL)
936                         i40e_memcpy(buff, dma_buff->va, buff_size,
937                                     I40E_DMA_TO_NONDMA);
938                 retval = LE16_TO_CPU(desc->retval);
939                 if (retval != 0) {
940                         i40e_debug(hw,
941                                    I40E_DEBUG_AQ_MESSAGE,
942                                    "AQTX: Command completed with error 0x%X.\n",
943                                    retval);
944
945                         /* strip off FW internal code */
946                         retval &= 0xff;
947                 }
948                 cmd_completed = true;
949                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
950                         status = I40E_SUCCESS;
951                 else
952                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
953                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
954         }
955
956         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
957                    "AQTX: desc and buffer writeback:\n");
958         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
959
960         /* save writeback aq if requested */
961         if (details->wb_desc)
962                 i40e_memcpy(details->wb_desc, desc_on_ring,
963                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
964
965         /* update the error if time out occurred */
966         if ((!cmd_completed) &&
967             (!details->async && !details->postpone)) {
968                 i40e_debug(hw,
969                            I40E_DEBUG_AQ_MESSAGE,
970                            "AQTX: Writeback timeout.\n");
971                 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
972         }
973
974 asq_send_command_error:
975         i40e_release_spinlock(&hw->aq.asq_spinlock);
976         return status;
977 }
978
979 /**
980  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
981  *  @desc:     pointer to the temp descriptor (non DMA mem)
982  *  @opcode:   the opcode can be used to decide which flags to turn off or on
983  *
984  *  Fill the desc with default values
985  **/
986 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
987                                        u16 opcode)
988 {
989         /* zero out the desc */
990         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
991                     I40E_NONDMA_MEM);
992         desc->opcode = CPU_TO_LE16(opcode);
993         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
994 }
995
996 /**
997  *  i40e_clean_arq_element
998  *  @hw: pointer to the hw struct
999  *  @e: event info from the receive descriptor, includes any buffers
1000  *  @pending: number of events that could be left to process
1001  *
1002  *  This function cleans one Admin Receive Queue element and returns
1003  *  the contents through e.  It can also return how many events are
1004  *  left to process through 'pending'
1005  **/
1006 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1007                                              struct i40e_arq_event_info *e,
1008                                              u16 *pending)
1009 {
1010         enum i40e_status_code ret_code = I40E_SUCCESS;
1011         u16 ntc = hw->aq.arq.next_to_clean;
1012         struct i40e_aq_desc *desc;
1013         struct i40e_dma_mem *bi;
1014         u16 desc_idx;
1015         u16 datalen;
1016         u16 flags;
1017         u16 ntu;
1018
1019         /* pre-clean the event info */
1020         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1021
1022         /* take the lock before we start messing with the ring */
1023         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1024
1025         if (hw->aq.arq.count == 0) {
1026                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1027                            "AQRX: Admin queue not initialized.\n");
1028                 ret_code = I40E_ERR_QUEUE_EMPTY;
1029                 goto clean_arq_element_err;
1030         }
1031
1032         /* set next_to_use to head */
1033 #ifdef PF_DRIVER
1034 #ifdef INTEGRATED_VF
1035         if (!i40e_is_vf(hw))
1036                 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1037 #else
1038         ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1039 #endif /* INTEGRATED_VF */
1040 #endif /* PF_DRIVER */
1041 #ifdef VF_DRIVER
1042 #ifdef INTEGRATED_VF
1043         if (i40e_is_vf(hw))
1044                 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1045 #else
1046         ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1047 #endif /* INTEGRATED_VF */
1048 #endif /* VF_DRIVER */
1049         if (ntu == ntc) {
1050                 /* nothing to do - shouldn't need to update ring's values */
1051                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1052                 goto clean_arq_element_out;
1053         }
1054
1055         /* now clean the next descriptor */
1056         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1057         desc_idx = ntc;
1058
1059         flags = LE16_TO_CPU(desc->flags);
1060         if (flags & I40E_AQ_FLAG_ERR) {
1061                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1062                 hw->aq.arq_last_status =
1063                         (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1064                 i40e_debug(hw,
1065                            I40E_DEBUG_AQ_MESSAGE,
1066                            "AQRX: Event received with error 0x%X.\n",
1067                            hw->aq.arq_last_status);
1068         }
1069
1070         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1071                     I40E_DMA_TO_NONDMA);
1072         datalen = LE16_TO_CPU(desc->datalen);
1073         e->msg_len = min(datalen, e->buf_len);
1074         if (e->msg_buf != NULL && (e->msg_len != 0))
1075                 i40e_memcpy(e->msg_buf,
1076                             hw->aq.arq.r.arq_bi[desc_idx].va,
1077                             e->msg_len, I40E_DMA_TO_NONDMA);
1078
1079         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1080         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1081                       hw->aq.arq_buf_size);
1082
1083         /* Restore the original datalen and buffer address in the desc,
1084          * FW updates datalen to indicate the event message
1085          * size
1086          */
1087         bi = &hw->aq.arq.r.arq_bi[ntc];
1088         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1089
1090         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1091         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1092                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1093         desc->datalen = CPU_TO_LE16((u16)bi->size);
1094         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1095         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1096
1097         /* set tail = the last cleaned desc index. */
1098         wr32(hw, hw->aq.arq.tail, ntc);
1099         /* ntc is updated to tail + 1 */
1100         ntc++;
1101         if (ntc == hw->aq.num_arq_entries)
1102                 ntc = 0;
1103         hw->aq.arq.next_to_clean = ntc;
1104         hw->aq.arq.next_to_use = ntu;
1105
1106 #ifdef PF_DRIVER
1107         i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
1108 #endif
1109 clean_arq_element_out:
1110         /* Set pending if needed, unlock and return */
1111         if (pending != NULL)
1112                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1113 clean_arq_element_err:
1114         i40e_release_spinlock(&hw->aq.arq_spinlock);
1115
1116         return ret_code;
1117 }
1118
1119 void i40e_resume_aq(struct i40e_hw *hw)
1120 {
1121         /* Registers are reset after PF reset */
1122         hw->aq.asq.next_to_use = 0;
1123         hw->aq.asq.next_to_clean = 0;
1124
1125         i40e_config_asq_regs(hw);
1126
1127         hw->aq.arq.next_to_use = 0;
1128         hw->aq.arq.next_to_clean = 0;
1129
1130         i40e_config_arq_regs(hw);
1131 }