net/bnx2x: fix reading VF id
[dpdk.git] / drivers / net / iavf / base / iavf_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "iavf_status.h"
35 #include "iavf_type.h"
36 #include "iavf_register.h"
37 #include "iavf_adminq.h"
38 #include "iavf_prototype.h"
39
40 /**
41  *  iavf_adminq_init_regs - Initialize AdminQ registers
42  *  @hw: pointer to the hardware structure
43  *
44  *  This assumes the alloc_asq and alloc_arq functions have already been called
45  **/
46 STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
47 {
48         /* set head and tail registers in our local struct */
49         if (iavf_is_vf(hw)) {
50                 hw->aq.asq.tail = IAVF_ATQT1;
51                 hw->aq.asq.head = IAVF_ATQH1;
52                 hw->aq.asq.len  = IAVF_ATQLEN1;
53                 hw->aq.asq.bal  = IAVF_ATQBAL1;
54                 hw->aq.asq.bah  = IAVF_ATQBAH1;
55                 hw->aq.arq.tail = IAVF_ARQT1;
56                 hw->aq.arq.head = IAVF_ARQH1;
57                 hw->aq.arq.len  = IAVF_ARQLEN1;
58                 hw->aq.arq.bal  = IAVF_ARQBAL1;
59                 hw->aq.arq.bah  = IAVF_ARQBAH1;
60         }
61 }
62
63 /**
64  *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
65  *  @hw: pointer to the hardware structure
66  **/
67 enum iavf_status_code iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
68 {
69         enum iavf_status_code ret_code;
70
71         ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
72                                          iavf_mem_atq_ring,
73                                          (hw->aq.num_asq_entries *
74                                          sizeof(struct iavf_aq_desc)),
75                                          IAVF_ADMINQ_DESC_ALIGNMENT);
76         if (ret_code)
77                 return ret_code;
78
79         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
80                                           (hw->aq.num_asq_entries *
81                                           sizeof(struct iavf_asq_cmd_details)));
82         if (ret_code) {
83                 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
84                 return ret_code;
85         }
86
87         return ret_code;
88 }
89
90 /**
91  *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
92  *  @hw: pointer to the hardware structure
93  **/
94 enum iavf_status_code iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
95 {
96         enum iavf_status_code ret_code;
97
98         ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
99                                          iavf_mem_arq_ring,
100                                          (hw->aq.num_arq_entries *
101                                          sizeof(struct iavf_aq_desc)),
102                                          IAVF_ADMINQ_DESC_ALIGNMENT);
103
104         return ret_code;
105 }
106
107 /**
108  *  iavf_free_adminq_asq - Free Admin Queue send rings
109  *  @hw: pointer to the hardware structure
110  *
111  *  This assumes the posted send buffers have already been cleaned
112  *  and de-allocated
113  **/
114 void iavf_free_adminq_asq(struct iavf_hw *hw)
115 {
116         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
117 }
118
119 /**
120  *  iavf_free_adminq_arq - Free Admin Queue receive rings
121  *  @hw: pointer to the hardware structure
122  *
123  *  This assumes the posted receive buffers have already been cleaned
124  *  and de-allocated
125  **/
126 void iavf_free_adminq_arq(struct iavf_hw *hw)
127 {
128         iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
129 }
130
131 /**
132  *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
133  *  @hw: pointer to the hardware structure
134  **/
135 STATIC enum iavf_status_code iavf_alloc_arq_bufs(struct iavf_hw *hw)
136 {
137         enum iavf_status_code ret_code;
138         struct iavf_aq_desc *desc;
139         struct iavf_dma_mem *bi;
140         int i;
141
142         /* We'll be allocating the buffer info memory first, then we can
143          * allocate the mapped buffers for the event processing
144          */
145
146         /* buffer_info structures do not need alignment */
147         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
148                 (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
149         if (ret_code)
150                 goto alloc_arq_bufs;
151         hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
152
153         /* allocate the mapped buffers */
154         for (i = 0; i < hw->aq.num_arq_entries; i++) {
155                 bi = &hw->aq.arq.r.arq_bi[i];
156                 ret_code = iavf_allocate_dma_mem(hw, bi,
157                                                  iavf_mem_arq_buf,
158                                                  hw->aq.arq_buf_size,
159                                                  IAVF_ADMINQ_DESC_ALIGNMENT);
160                 if (ret_code)
161                         goto unwind_alloc_arq_bufs;
162
163                 /* now configure the descriptors for use */
164                 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
165
166                 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
167                 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
168                         desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
169                 desc->opcode = 0;
170                 /* This is in accordance with Admin queue design, there is no
171                  * register for buffer size configuration
172                  */
173                 desc->datalen = CPU_TO_LE16((u16)bi->size);
174                 desc->retval = 0;
175                 desc->cookie_high = 0;
176                 desc->cookie_low = 0;
177                 desc->params.external.addr_high =
178                         CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
179                 desc->params.external.addr_low =
180                         CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
181                 desc->params.external.param0 = 0;
182                 desc->params.external.param1 = 0;
183         }
184
185 alloc_arq_bufs:
186         return ret_code;
187
188 unwind_alloc_arq_bufs:
189         /* don't try to free the one that failed... */
190         i--;
191         for (; i >= 0; i--)
192                 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
193         iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
194
195         return ret_code;
196 }
197
198 /**
199  *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
200  *  @hw: pointer to the hardware structure
201  **/
202 STATIC enum iavf_status_code iavf_alloc_asq_bufs(struct iavf_hw *hw)
203 {
204         enum iavf_status_code ret_code;
205         struct iavf_dma_mem *bi;
206         int i;
207
208         /* No mapped memory needed yet, just the buffer info structures */
209         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
210                 (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
211         if (ret_code)
212                 goto alloc_asq_bufs;
213         hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
214
215         /* allocate the mapped buffers */
216         for (i = 0; i < hw->aq.num_asq_entries; i++) {
217                 bi = &hw->aq.asq.r.asq_bi[i];
218                 ret_code = iavf_allocate_dma_mem(hw, bi,
219                                                  iavf_mem_asq_buf,
220                                                  hw->aq.asq_buf_size,
221                                                  IAVF_ADMINQ_DESC_ALIGNMENT);
222                 if (ret_code)
223                         goto unwind_alloc_asq_bufs;
224         }
225 alloc_asq_bufs:
226         return ret_code;
227
228 unwind_alloc_asq_bufs:
229         /* don't try to free the one that failed... */
230         i--;
231         for (; i >= 0; i--)
232                 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
233         iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
234
235         return ret_code;
236 }
237
238 /**
239  *  iavf_free_arq_bufs - Free receive queue buffer info elements
240  *  @hw: pointer to the hardware structure
241  **/
242 STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
243 {
244         int i;
245
246         /* free descriptors */
247         for (i = 0; i < hw->aq.num_arq_entries; i++)
248                 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
249
250         /* free the descriptor memory */
251         iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
252
253         /* free the dma header */
254         iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
255 }
256
257 /**
258  *  iavf_free_asq_bufs - Free send queue buffer info elements
259  *  @hw: pointer to the hardware structure
260  **/
261 STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
262 {
263         int i;
264
265         /* only unmap if the address is non-NULL */
266         for (i = 0; i < hw->aq.num_asq_entries; i++)
267                 if (hw->aq.asq.r.asq_bi[i].pa)
268                         iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
269
270         /* free the buffer info list */
271         iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
272
273         /* free the descriptor memory */
274         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
275
276         /* free the dma header */
277         iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
278 }
279
280 /**
281  *  iavf_config_asq_regs - configure ASQ registers
282  *  @hw: pointer to the hardware structure
283  *
284  *  Configure base address and length registers for the transmit queue
285  **/
286 STATIC enum iavf_status_code iavf_config_asq_regs(struct iavf_hw *hw)
287 {
288         enum iavf_status_code ret_code = IAVF_SUCCESS;
289         u32 reg = 0;
290
291         /* Clear Head and Tail */
292         wr32(hw, hw->aq.asq.head, 0);
293         wr32(hw, hw->aq.asq.tail, 0);
294
295         /* set starting point */
296 #ifdef INTEGRATED_VF
297         if (iavf_is_vf(hw))
298                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
299                                           IAVF_ATQLEN1_ATQENABLE_MASK));
300 #else
301         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
302                                   IAVF_ATQLEN1_ATQENABLE_MASK));
303 #endif /* INTEGRATED_VF */
304         wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
305         wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
306
307         /* Check one register to verify that config was applied */
308         reg = rd32(hw, hw->aq.asq.bal);
309         if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
310                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
311
312         return ret_code;
313 }
314
315 /**
316  *  iavf_config_arq_regs - ARQ register configuration
317  *  @hw: pointer to the hardware structure
318  *
319  * Configure base address and length registers for the receive (event queue)
320  **/
321 STATIC enum iavf_status_code iavf_config_arq_regs(struct iavf_hw *hw)
322 {
323         enum iavf_status_code ret_code = IAVF_SUCCESS;
324         u32 reg = 0;
325
326         /* Clear Head and Tail */
327         wr32(hw, hw->aq.arq.head, 0);
328         wr32(hw, hw->aq.arq.tail, 0);
329
330         /* set starting point */
331 #ifdef INTEGRATED_VF
332         if (iavf_is_vf(hw))
333                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
334                                           IAVF_ARQLEN1_ARQENABLE_MASK));
335 #else
336         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
337                                   IAVF_ARQLEN1_ARQENABLE_MASK));
338 #endif /* INTEGRATED_VF */
339         wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
340         wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
341
342         /* Update tail in the HW to post pre-allocated buffers */
343         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
344
345         /* Check one register to verify that config was applied */
346         reg = rd32(hw, hw->aq.arq.bal);
347         if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
348                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
349
350         return ret_code;
351 }
352
353 /**
354  *  iavf_init_asq - main initialization routine for ASQ
355  *  @hw: pointer to the hardware structure
356  *
357  *  This is the main initialization routine for the Admin Send Queue
358  *  Prior to calling this function, drivers *MUST* set the following fields
359  *  in the hw->aq structure:
360  *     - hw->aq.num_asq_entries
361  *     - hw->aq.arq_buf_size
362  *
363  *  Do *NOT* hold the lock when calling this as the memory allocation routines
364  *  called are not going to be atomic context safe
365  **/
366 enum iavf_status_code iavf_init_asq(struct iavf_hw *hw)
367 {
368         enum iavf_status_code ret_code = IAVF_SUCCESS;
369
370         if (hw->aq.asq.count > 0) {
371                 /* queue already initialized */
372                 ret_code = IAVF_ERR_NOT_READY;
373                 goto init_adminq_exit;
374         }
375
376         /* verify input for valid configuration */
377         if ((hw->aq.num_asq_entries == 0) ||
378             (hw->aq.asq_buf_size == 0)) {
379                 ret_code = IAVF_ERR_CONFIG;
380                 goto init_adminq_exit;
381         }
382
383         hw->aq.asq.next_to_use = 0;
384         hw->aq.asq.next_to_clean = 0;
385
386         /* allocate the ring memory */
387         ret_code = iavf_alloc_adminq_asq_ring(hw);
388         if (ret_code != IAVF_SUCCESS)
389                 goto init_adminq_exit;
390
391         /* allocate buffers in the rings */
392         ret_code = iavf_alloc_asq_bufs(hw);
393         if (ret_code != IAVF_SUCCESS)
394                 goto init_adminq_free_rings;
395
396         /* initialize base registers */
397         ret_code = iavf_config_asq_regs(hw);
398         if (ret_code != IAVF_SUCCESS)
399                 goto init_adminq_free_rings;
400
401         /* success! */
402         hw->aq.asq.count = hw->aq.num_asq_entries;
403         goto init_adminq_exit;
404
405 init_adminq_free_rings:
406         iavf_free_adminq_asq(hw);
407
408 init_adminq_exit:
409         return ret_code;
410 }
411
412 /**
413  *  iavf_init_arq - initialize ARQ
414  *  @hw: pointer to the hardware structure
415  *
416  *  The main initialization routine for the Admin Receive (Event) Queue.
417  *  Prior to calling this function, drivers *MUST* set the following fields
418  *  in the hw->aq structure:
419  *     - hw->aq.num_asq_entries
420  *     - hw->aq.arq_buf_size
421  *
422  *  Do *NOT* hold the lock when calling this as the memory allocation routines
423  *  called are not going to be atomic context safe
424  **/
425 enum iavf_status_code iavf_init_arq(struct iavf_hw *hw)
426 {
427         enum iavf_status_code ret_code = IAVF_SUCCESS;
428
429         if (hw->aq.arq.count > 0) {
430                 /* queue already initialized */
431                 ret_code = IAVF_ERR_NOT_READY;
432                 goto init_adminq_exit;
433         }
434
435         /* verify input for valid configuration */
436         if ((hw->aq.num_arq_entries == 0) ||
437             (hw->aq.arq_buf_size == 0)) {
438                 ret_code = IAVF_ERR_CONFIG;
439                 goto init_adminq_exit;
440         }
441
442         hw->aq.arq.next_to_use = 0;
443         hw->aq.arq.next_to_clean = 0;
444
445         /* allocate the ring memory */
446         ret_code = iavf_alloc_adminq_arq_ring(hw);
447         if (ret_code != IAVF_SUCCESS)
448                 goto init_adminq_exit;
449
450         /* allocate buffers in the rings */
451         ret_code = iavf_alloc_arq_bufs(hw);
452         if (ret_code != IAVF_SUCCESS)
453                 goto init_adminq_free_rings;
454
455         /* initialize base registers */
456         ret_code = iavf_config_arq_regs(hw);
457         if (ret_code != IAVF_SUCCESS)
458                 goto init_adminq_free_rings;
459
460         /* success! */
461         hw->aq.arq.count = hw->aq.num_arq_entries;
462         goto init_adminq_exit;
463
464 init_adminq_free_rings:
465         iavf_free_adminq_arq(hw);
466
467 init_adminq_exit:
468         return ret_code;
469 }
470
471 /**
472  *  iavf_shutdown_asq - shutdown the ASQ
473  *  @hw: pointer to the hardware structure
474  *
475  *  The main shutdown routine for the Admin Send Queue
476  **/
477 enum iavf_status_code iavf_shutdown_asq(struct iavf_hw *hw)
478 {
479         enum iavf_status_code ret_code = IAVF_SUCCESS;
480
481         iavf_acquire_spinlock(&hw->aq.asq_spinlock);
482
483         if (hw->aq.asq.count == 0) {
484                 ret_code = IAVF_ERR_NOT_READY;
485                 goto shutdown_asq_out;
486         }
487
488         /* Stop firmware AdminQ processing */
489         wr32(hw, hw->aq.asq.head, 0);
490         wr32(hw, hw->aq.asq.tail, 0);
491         wr32(hw, hw->aq.asq.len, 0);
492         wr32(hw, hw->aq.asq.bal, 0);
493         wr32(hw, hw->aq.asq.bah, 0);
494
495         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
496
497         /* free ring buffers */
498         iavf_free_asq_bufs(hw);
499
500 shutdown_asq_out:
501         iavf_release_spinlock(&hw->aq.asq_spinlock);
502         return ret_code;
503 }
504
505 /**
506  *  iavf_shutdown_arq - shutdown ARQ
507  *  @hw: pointer to the hardware structure
508  *
509  *  The main shutdown routine for the Admin Receive Queue
510  **/
511 enum iavf_status_code iavf_shutdown_arq(struct iavf_hw *hw)
512 {
513         enum iavf_status_code ret_code = IAVF_SUCCESS;
514
515         iavf_acquire_spinlock(&hw->aq.arq_spinlock);
516
517         if (hw->aq.arq.count == 0) {
518                 ret_code = IAVF_ERR_NOT_READY;
519                 goto shutdown_arq_out;
520         }
521
522         /* Stop firmware AdminQ processing */
523         wr32(hw, hw->aq.arq.head, 0);
524         wr32(hw, hw->aq.arq.tail, 0);
525         wr32(hw, hw->aq.arq.len, 0);
526         wr32(hw, hw->aq.arq.bal, 0);
527         wr32(hw, hw->aq.arq.bah, 0);
528
529         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
530
531         /* free ring buffers */
532         iavf_free_arq_bufs(hw);
533
534 shutdown_arq_out:
535         iavf_release_spinlock(&hw->aq.arq_spinlock);
536         return ret_code;
537 }
538
539 /**
540  *  iavf_init_adminq - main initialization routine for Admin Queue
541  *  @hw: pointer to the hardware structure
542  *
543  *  Prior to calling this function, drivers *MUST* set the following fields
544  *  in the hw->aq structure:
545  *     - hw->aq.num_asq_entries
546  *     - hw->aq.num_arq_entries
547  *     - hw->aq.arq_buf_size
548  *     - hw->aq.asq_buf_size
549  **/
550 enum iavf_status_code iavf_init_adminq(struct iavf_hw *hw)
551 {
552         enum iavf_status_code ret_code;
553
554         /* verify input for valid configuration */
555         if ((hw->aq.num_arq_entries == 0) ||
556             (hw->aq.num_asq_entries == 0) ||
557             (hw->aq.arq_buf_size == 0) ||
558             (hw->aq.asq_buf_size == 0)) {
559                 ret_code = IAVF_ERR_CONFIG;
560                 goto init_adminq_exit;
561         }
562         iavf_init_spinlock(&hw->aq.asq_spinlock);
563         iavf_init_spinlock(&hw->aq.arq_spinlock);
564
565         /* Set up register offsets */
566         iavf_adminq_init_regs(hw);
567
568         /* setup ASQ command write back timeout */
569         hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
570
571         /* allocate the ASQ */
572         ret_code = iavf_init_asq(hw);
573         if (ret_code != IAVF_SUCCESS)
574                 goto init_adminq_destroy_spinlocks;
575
576         /* allocate the ARQ */
577         ret_code = iavf_init_arq(hw);
578         if (ret_code != IAVF_SUCCESS)
579                 goto init_adminq_free_asq;
580
581         ret_code = IAVF_SUCCESS;
582
583         /* success! */
584         goto init_adminq_exit;
585
586 init_adminq_free_asq:
587         iavf_shutdown_asq(hw);
588 init_adminq_destroy_spinlocks:
589         iavf_destroy_spinlock(&hw->aq.asq_spinlock);
590         iavf_destroy_spinlock(&hw->aq.arq_spinlock);
591
592 init_adminq_exit:
593         return ret_code;
594 }
595
596 /**
597  *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
598  *  @hw: pointer to the hardware structure
599  **/
600 enum iavf_status_code iavf_shutdown_adminq(struct iavf_hw *hw)
601 {
602         enum iavf_status_code ret_code = IAVF_SUCCESS;
603
604         if (iavf_check_asq_alive(hw))
605                 iavf_aq_queue_shutdown(hw, true);
606
607         iavf_shutdown_asq(hw);
608         iavf_shutdown_arq(hw);
609         iavf_destroy_spinlock(&hw->aq.asq_spinlock);
610         iavf_destroy_spinlock(&hw->aq.arq_spinlock);
611
612         if (hw->nvm_buff.va)
613                 iavf_free_virt_mem(hw, &hw->nvm_buff);
614
615         return ret_code;
616 }
617
618 /**
619  *  iavf_clean_asq - cleans Admin send queue
620  *  @hw: pointer to the hardware structure
621  *
622  *  returns the number of free desc
623  **/
624 u16 iavf_clean_asq(struct iavf_hw *hw)
625 {
626         struct iavf_adminq_ring *asq = &(hw->aq.asq);
627         struct iavf_asq_cmd_details *details;
628         u16 ntc = asq->next_to_clean;
629         struct iavf_aq_desc desc_cb;
630         struct iavf_aq_desc *desc;
631
632         desc = IAVF_ADMINQ_DESC(*asq, ntc);
633         details = IAVF_ADMINQ_DETAILS(*asq, ntc);
634         while (rd32(hw, hw->aq.asq.head) != ntc) {
635                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
636                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
637
638                 if (details->callback) {
639                         IAVF_ADMINQ_CALLBACK cb_func =
640                                         (IAVF_ADMINQ_CALLBACK)details->callback;
641                         iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
642                                     IAVF_DMA_TO_DMA);
643                         cb_func(hw, &desc_cb);
644                 }
645                 iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
646                 iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
647                 ntc++;
648                 if (ntc == asq->count)
649                         ntc = 0;
650                 desc = IAVF_ADMINQ_DESC(*asq, ntc);
651                 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
652         }
653
654         asq->next_to_clean = ntc;
655
656         return IAVF_DESC_UNUSED(asq);
657 }
658
659 /**
660  *  iavf_asq_done - check if FW has processed the Admin Send Queue
661  *  @hw: pointer to the hw struct
662  *
663  *  Returns true if the firmware has processed all descriptors on the
664  *  admin send queue. Returns false if there are still requests pending.
665  **/
666 bool iavf_asq_done(struct iavf_hw *hw)
667 {
668         /* AQ designers suggest use of head for better
669          * timing reliability than DD bit
670          */
671         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
672
673 }
674
675 /**
676  *  iavf_asq_send_command - send command to Admin Queue
677  *  @hw: pointer to the hw struct
678  *  @desc: prefilled descriptor describing the command (non DMA mem)
679  *  @buff: buffer to use for indirect commands
680  *  @buff_size: size of buffer for indirect commands
681  *  @cmd_details: pointer to command details structure
682  *
683  *  This is the main send command driver routine for the Admin Queue send
684  *  queue.  It runs the queue, cleans the queue, etc
685  **/
686 enum iavf_status_code iavf_asq_send_command(struct iavf_hw *hw,
687                                 struct iavf_aq_desc *desc,
688                                 void *buff, /* can be NULL */
689                                 u16  buff_size,
690                                 struct iavf_asq_cmd_details *cmd_details)
691 {
692         enum iavf_status_code status = IAVF_SUCCESS;
693         struct iavf_dma_mem *dma_buff = NULL;
694         struct iavf_asq_cmd_details *details;
695         struct iavf_aq_desc *desc_on_ring;
696         bool cmd_completed = false;
697         u16  retval = 0;
698         u32  val = 0;
699
700         iavf_acquire_spinlock(&hw->aq.asq_spinlock);
701
702         hw->aq.asq_last_status = IAVF_AQ_RC_OK;
703
704         if (hw->aq.asq.count == 0) {
705                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
706                            "AQTX: Admin queue not initialized.\n");
707                 status = IAVF_ERR_QUEUE_EMPTY;
708                 goto asq_send_command_error;
709         }
710
711         val = rd32(hw, hw->aq.asq.head);
712         if (val >= hw->aq.num_asq_entries) {
713                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
714                            "AQTX: head overrun at %d\n", val);
715                 status = IAVF_ERR_QUEUE_EMPTY;
716                 goto asq_send_command_error;
717         }
718
719         details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
720         if (cmd_details) {
721                 iavf_memcpy(details,
722                             cmd_details,
723                             sizeof(struct iavf_asq_cmd_details),
724                             IAVF_NONDMA_TO_NONDMA);
725
726                 /* If the cmd_details are defined copy the cookie.  The
727                  * CPU_TO_LE32 is not needed here because the data is ignored
728                  * by the FW, only used by the driver
729                  */
730                 if (details->cookie) {
731                         desc->cookie_high =
732                                 CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
733                         desc->cookie_low =
734                                 CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
735                 }
736         } else {
737                 iavf_memset(details, 0,
738                             sizeof(struct iavf_asq_cmd_details),
739                             IAVF_NONDMA_MEM);
740         }
741
742         /* clear requested flags and then set additional flags if defined */
743         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
744         desc->flags |= CPU_TO_LE16(details->flags_ena);
745
746         if (buff_size > hw->aq.asq_buf_size) {
747                 iavf_debug(hw,
748                            IAVF_DEBUG_AQ_MESSAGE,
749                            "AQTX: Invalid buffer size: %d.\n",
750                            buff_size);
751                 status = IAVF_ERR_INVALID_SIZE;
752                 goto asq_send_command_error;
753         }
754
755         if (details->postpone && !details->async) {
756                 iavf_debug(hw,
757                            IAVF_DEBUG_AQ_MESSAGE,
758                            "AQTX: Async flag not set along with postpone flag");
759                 status = IAVF_ERR_PARAM;
760                 goto asq_send_command_error;
761         }
762
763         /* call clean and check queue available function to reclaim the
764          * descriptors that were processed by FW, the function returns the
765          * number of desc available
766          */
767         /* the clean function called here could be called in a separate thread
768          * in case of asynchronous completions
769          */
770         if (iavf_clean_asq(hw) == 0) {
771                 iavf_debug(hw,
772                            IAVF_DEBUG_AQ_MESSAGE,
773                            "AQTX: Error queue is full.\n");
774                 status = IAVF_ERR_ADMIN_QUEUE_FULL;
775                 goto asq_send_command_error;
776         }
777
778         /* initialize the temp desc pointer with the right desc */
779         desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
780
781         /* if the desc is available copy the temp desc to the right place */
782         iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
783                     IAVF_NONDMA_TO_DMA);
784
785         /* if buff is not NULL assume indirect command */
786         if (buff != NULL) {
787                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
788                 /* copy the user buff into the respective DMA buff */
789                 iavf_memcpy(dma_buff->va, buff, buff_size,
790                             IAVF_NONDMA_TO_DMA);
791                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
792
793                 /* Update the address values in the desc with the pa value
794                  * for respective buffer
795                  */
796                 desc_on_ring->params.external.addr_high =
797                                 CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
798                 desc_on_ring->params.external.addr_low =
799                                 CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
800         }
801
802         /* bump the tail */
803         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
804         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
805                       buff, buff_size);
806         (hw->aq.asq.next_to_use)++;
807         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
808                 hw->aq.asq.next_to_use = 0;
809         if (!details->postpone)
810                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
811
812         /* if cmd_details are not defined or async flag is not set,
813          * we need to wait for desc write back
814          */
815         if (!details->async && !details->postpone) {
816                 u32 total_delay = 0;
817
818                 do {
819                         /* AQ designers suggest use of head for better
820                          * timing reliability than DD bit
821                          */
822                         if (iavf_asq_done(hw))
823                                 break;
824                         iavf_usec_delay(50);
825                         total_delay += 50;
826                 } while (total_delay < hw->aq.asq_cmd_timeout);
827         }
828
829         /* if ready, copy the desc back to temp */
830         if (iavf_asq_done(hw)) {
831                 iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
832                             IAVF_DMA_TO_NONDMA);
833                 if (buff != NULL)
834                         iavf_memcpy(buff, dma_buff->va, buff_size,
835                                     IAVF_DMA_TO_NONDMA);
836                 retval = LE16_TO_CPU(desc->retval);
837                 if (retval != 0) {
838                         iavf_debug(hw,
839                                    IAVF_DEBUG_AQ_MESSAGE,
840                                    "AQTX: Command completed with error 0x%X.\n",
841                                    retval);
842
843                         /* strip off FW internal code */
844                         retval &= 0xff;
845                 }
846                 cmd_completed = true;
847                 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
848                         status = IAVF_SUCCESS;
849                 else
850                         status = IAVF_ERR_ADMIN_QUEUE_ERROR;
851                 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
852         }
853
854         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
855                    "AQTX: desc and buffer writeback:\n");
856         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
857
858         /* save writeback aq if requested */
859         if (details->wb_desc)
860                 iavf_memcpy(details->wb_desc, desc_on_ring,
861                             sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
862
863         /* update the error if time out occurred */
864         if ((!cmd_completed) &&
865             (!details->async && !details->postpone)) {
866                 if (rd32(hw, hw->aq.asq.len) & IAVF_ATQLEN1_ATQCRIT_MASK) {
867                         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
868                                    "AQTX: AQ Critical error.\n");
869                         status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
870                 } else {
871                         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
872                                    "AQTX: Writeback timeout.\n");
873                         status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
874                 }
875         }
876
877 asq_send_command_error:
878         iavf_release_spinlock(&hw->aq.asq_spinlock);
879         return status;
880 }
881
882 /**
883  *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
884  *  @desc:     pointer to the temp descriptor (non DMA mem)
885  *  @opcode:   the opcode can be used to decide which flags to turn off or on
886  *
887  *  Fill the desc with default values
888  **/
889 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
890                                        u16 opcode)
891 {
892         /* zero out the desc */
893         iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
894                     IAVF_NONDMA_MEM);
895         desc->opcode = CPU_TO_LE16(opcode);
896         desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
897 }
898
899 /**
900  *  iavf_clean_arq_element
901  *  @hw: pointer to the hw struct
902  *  @e: event info from the receive descriptor, includes any buffers
903  *  @pending: number of events that could be left to process
904  *
905  *  This function cleans one Admin Receive Queue element and returns
906  *  the contents through e.  It can also return how many events are
907  *  left to process through 'pending'
908  **/
909 enum iavf_status_code iavf_clean_arq_element(struct iavf_hw *hw,
910                                              struct iavf_arq_event_info *e,
911                                              u16 *pending)
912 {
913         enum iavf_status_code ret_code = IAVF_SUCCESS;
914         u16 ntc = hw->aq.arq.next_to_clean;
915         struct iavf_aq_desc *desc;
916         struct iavf_dma_mem *bi;
917         u16 desc_idx;
918         u16 datalen;
919         u16 flags;
920         u16 ntu;
921
922         /* pre-clean the event info */
923         iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
924
925         /* take the lock before we start messing with the ring */
926         iavf_acquire_spinlock(&hw->aq.arq_spinlock);
927
928         if (hw->aq.arq.count == 0) {
929                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
930                            "AQRX: Admin queue not initialized.\n");
931                 ret_code = IAVF_ERR_QUEUE_EMPTY;
932                 goto clean_arq_element_err;
933         }
934
935         /* set next_to_use to head */
936 #ifdef INTEGRATED_VF
937         if (!iavf_is_vf(hw))
938                 ntu = rd32(hw, hw->aq.arq.head) & IAVF_PF_ARQH_ARQH_MASK;
939         else
940                 ntu = rd32(hw, hw->aq.arq.head) & IAVF_ARQH1_ARQH_MASK;
941 #else
942         ntu = rd32(hw, hw->aq.arq.head) & IAVF_ARQH1_ARQH_MASK;
943 #endif /* INTEGRATED_VF */
944         if (ntu == ntc) {
945                 /* nothing to do - shouldn't need to update ring's values */
946                 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
947                 goto clean_arq_element_out;
948         }
949
950         /* now clean the next descriptor */
951         desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
952         desc_idx = ntc;
953
954         hw->aq.arq_last_status =
955                 (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
956         flags = LE16_TO_CPU(desc->flags);
957         if (flags & IAVF_AQ_FLAG_ERR) {
958                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
959                 iavf_debug(hw,
960                            IAVF_DEBUG_AQ_MESSAGE,
961                            "AQRX: Event received with error 0x%X.\n",
962                            hw->aq.arq_last_status);
963         }
964
965         iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
966                     IAVF_DMA_TO_NONDMA);
967         datalen = LE16_TO_CPU(desc->datalen);
968         e->msg_len = min(datalen, e->buf_len);
969         if (e->msg_buf != NULL && (e->msg_len != 0))
970                 iavf_memcpy(e->msg_buf,
971                             hw->aq.arq.r.arq_bi[desc_idx].va,
972                             e->msg_len, IAVF_DMA_TO_NONDMA);
973
974         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
975         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
976                       hw->aq.arq_buf_size);
977
978         /* Restore the original datalen and buffer address in the desc,
979          * FW updates datalen to indicate the event message
980          * size
981          */
982         bi = &hw->aq.arq.r.arq_bi[ntc];
983         iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
984
985         desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
986         if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
987                 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
988         desc->datalen = CPU_TO_LE16((u16)bi->size);
989         desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
990         desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
991
992         /* set tail = the last cleaned desc index. */
993         wr32(hw, hw->aq.arq.tail, ntc);
994         /* ntc is updated to tail + 1 */
995         ntc++;
996         if (ntc == hw->aq.num_arq_entries)
997                 ntc = 0;
998         hw->aq.arq.next_to_clean = ntc;
999         hw->aq.arq.next_to_use = ntu;
1000
1001 clean_arq_element_out:
1002         /* Set pending if needed, unlock and return */
1003         if (pending != NULL)
1004                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1005 clean_arq_element_err:
1006         iavf_release_spinlock(&hw->aq.arq_spinlock);
1007
1008         return ret_code;
1009 }
1010