net/iavf: replace license text with SPDX tag
[dpdk.git] / drivers / net / iavf / base / iavf_adminq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2013 - 2015 Intel Corporation
3  */
4
5 #include "iavf_status.h"
6 #include "iavf_type.h"
7 #include "iavf_register.h"
8 #include "iavf_adminq.h"
9 #include "iavf_prototype.h"
10
11 /**
12  *  iavf_adminq_init_regs - Initialize AdminQ registers
13  *  @hw: pointer to the hardware structure
14  *
15  *  This assumes the alloc_asq and alloc_arq functions have already been called
16  **/
17 STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
18 {
19         /* set head and tail registers in our local struct */
20         if (iavf_is_vf(hw)) {
21                 hw->aq.asq.tail = IAVF_ATQT1;
22                 hw->aq.asq.head = IAVF_ATQH1;
23                 hw->aq.asq.len  = IAVF_ATQLEN1;
24                 hw->aq.asq.bal  = IAVF_ATQBAL1;
25                 hw->aq.asq.bah  = IAVF_ATQBAH1;
26                 hw->aq.arq.tail = IAVF_ARQT1;
27                 hw->aq.arq.head = IAVF_ARQH1;
28                 hw->aq.arq.len  = IAVF_ARQLEN1;
29                 hw->aq.arq.bal  = IAVF_ARQBAL1;
30                 hw->aq.arq.bah  = IAVF_ARQBAH1;
31         }
32 }
33
34 /**
35  *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
36  *  @hw: pointer to the hardware structure
37  **/
38 enum iavf_status_code iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
39 {
40         enum iavf_status_code ret_code;
41
42         ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
43                                          iavf_mem_atq_ring,
44                                          (hw->aq.num_asq_entries *
45                                          sizeof(struct iavf_aq_desc)),
46                                          IAVF_ADMINQ_DESC_ALIGNMENT);
47         if (ret_code)
48                 return ret_code;
49
50         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
51                                           (hw->aq.num_asq_entries *
52                                           sizeof(struct iavf_asq_cmd_details)));
53         if (ret_code) {
54                 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
55                 return ret_code;
56         }
57
58         return ret_code;
59 }
60
61 /**
62  *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
63  *  @hw: pointer to the hardware structure
64  **/
65 enum iavf_status_code iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
66 {
67         enum iavf_status_code ret_code;
68
69         ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
70                                          iavf_mem_arq_ring,
71                                          (hw->aq.num_arq_entries *
72                                          sizeof(struct iavf_aq_desc)),
73                                          IAVF_ADMINQ_DESC_ALIGNMENT);
74
75         return ret_code;
76 }
77
78 /**
79  *  iavf_free_adminq_asq - Free Admin Queue send rings
80  *  @hw: pointer to the hardware structure
81  *
82  *  This assumes the posted send buffers have already been cleaned
83  *  and de-allocated
84  **/
85 void iavf_free_adminq_asq(struct iavf_hw *hw)
86 {
87         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
88 }
89
90 /**
91  *  iavf_free_adminq_arq - Free Admin Queue receive rings
92  *  @hw: pointer to the hardware structure
93  *
94  *  This assumes the posted receive buffers have already been cleaned
95  *  and de-allocated
96  **/
97 void iavf_free_adminq_arq(struct iavf_hw *hw)
98 {
99         iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
100 }
101
102 /**
103  *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
104  *  @hw: pointer to the hardware structure
105  **/
106 STATIC enum iavf_status_code iavf_alloc_arq_bufs(struct iavf_hw *hw)
107 {
108         enum iavf_status_code ret_code;
109         struct iavf_aq_desc *desc;
110         struct iavf_dma_mem *bi;
111         int i;
112
113         /* We'll be allocating the buffer info memory first, then we can
114          * allocate the mapped buffers for the event processing
115          */
116
117         /* buffer_info structures do not need alignment */
118         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
119                 (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
120         if (ret_code)
121                 goto alloc_arq_bufs;
122         hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
123
124         /* allocate the mapped buffers */
125         for (i = 0; i < hw->aq.num_arq_entries; i++) {
126                 bi = &hw->aq.arq.r.arq_bi[i];
127                 ret_code = iavf_allocate_dma_mem(hw, bi,
128                                                  iavf_mem_arq_buf,
129                                                  hw->aq.arq_buf_size,
130                                                  IAVF_ADMINQ_DESC_ALIGNMENT);
131                 if (ret_code)
132                         goto unwind_alloc_arq_bufs;
133
134                 /* now configure the descriptors for use */
135                 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
136
137                 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
138                 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
139                         desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
140                 desc->opcode = 0;
141                 /* This is in accordance with Admin queue design, there is no
142                  * register for buffer size configuration
143                  */
144                 desc->datalen = CPU_TO_LE16((u16)bi->size);
145                 desc->retval = 0;
146                 desc->cookie_high = 0;
147                 desc->cookie_low = 0;
148                 desc->params.external.addr_high =
149                         CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
150                 desc->params.external.addr_low =
151                         CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
152                 desc->params.external.param0 = 0;
153                 desc->params.external.param1 = 0;
154         }
155
156 alloc_arq_bufs:
157         return ret_code;
158
159 unwind_alloc_arq_bufs:
160         /* don't try to free the one that failed... */
161         i--;
162         for (; i >= 0; i--)
163                 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
164         iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
165
166         return ret_code;
167 }
168
169 /**
170  *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
171  *  @hw: pointer to the hardware structure
172  **/
173 STATIC enum iavf_status_code iavf_alloc_asq_bufs(struct iavf_hw *hw)
174 {
175         enum iavf_status_code ret_code;
176         struct iavf_dma_mem *bi;
177         int i;
178
179         /* No mapped memory needed yet, just the buffer info structures */
180         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
181                 (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
182         if (ret_code)
183                 goto alloc_asq_bufs;
184         hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
185
186         /* allocate the mapped buffers */
187         for (i = 0; i < hw->aq.num_asq_entries; i++) {
188                 bi = &hw->aq.asq.r.asq_bi[i];
189                 ret_code = iavf_allocate_dma_mem(hw, bi,
190                                                  iavf_mem_asq_buf,
191                                                  hw->aq.asq_buf_size,
192                                                  IAVF_ADMINQ_DESC_ALIGNMENT);
193                 if (ret_code)
194                         goto unwind_alloc_asq_bufs;
195         }
196 alloc_asq_bufs:
197         return ret_code;
198
199 unwind_alloc_asq_bufs:
200         /* don't try to free the one that failed... */
201         i--;
202         for (; i >= 0; i--)
203                 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
204         iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
205
206         return ret_code;
207 }
208
209 /**
210  *  iavf_free_arq_bufs - Free receive queue buffer info elements
211  *  @hw: pointer to the hardware structure
212  **/
213 STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
214 {
215         int i;
216
217         /* free descriptors */
218         for (i = 0; i < hw->aq.num_arq_entries; i++)
219                 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
220
221         /* free the descriptor memory */
222         iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
223
224         /* free the dma header */
225         iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
226 }
227
228 /**
229  *  iavf_free_asq_bufs - Free send queue buffer info elements
230  *  @hw: pointer to the hardware structure
231  **/
232 STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
233 {
234         int i;
235
236         /* only unmap if the address is non-NULL */
237         for (i = 0; i < hw->aq.num_asq_entries; i++)
238                 if (hw->aq.asq.r.asq_bi[i].pa)
239                         iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
240
241         /* free the buffer info list */
242         iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
243
244         /* free the descriptor memory */
245         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
246
247         /* free the dma header */
248         iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
249 }
250
251 /**
252  *  iavf_config_asq_regs - configure ASQ registers
253  *  @hw: pointer to the hardware structure
254  *
255  *  Configure base address and length registers for the transmit queue
256  **/
257 STATIC enum iavf_status_code iavf_config_asq_regs(struct iavf_hw *hw)
258 {
259         enum iavf_status_code ret_code = IAVF_SUCCESS;
260         u32 reg = 0;
261
262         /* Clear Head and Tail */
263         wr32(hw, hw->aq.asq.head, 0);
264         wr32(hw, hw->aq.asq.tail, 0);
265
266         /* set starting point */
267 #ifdef INTEGRATED_VF
268         if (iavf_is_vf(hw))
269                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
270                                           IAVF_ATQLEN1_ATQENABLE_MASK));
271 #else
272         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
273                                   IAVF_ATQLEN1_ATQENABLE_MASK));
274 #endif /* INTEGRATED_VF */
275         wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
276         wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
277
278         /* Check one register to verify that config was applied */
279         reg = rd32(hw, hw->aq.asq.bal);
280         if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
281                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
282
283         return ret_code;
284 }
285
286 /**
287  *  iavf_config_arq_regs - ARQ register configuration
288  *  @hw: pointer to the hardware structure
289  *
290  * Configure base address and length registers for the receive (event queue)
291  **/
292 STATIC enum iavf_status_code iavf_config_arq_regs(struct iavf_hw *hw)
293 {
294         enum iavf_status_code ret_code = IAVF_SUCCESS;
295         u32 reg = 0;
296
297         /* Clear Head and Tail */
298         wr32(hw, hw->aq.arq.head, 0);
299         wr32(hw, hw->aq.arq.tail, 0);
300
301         /* set starting point */
302 #ifdef INTEGRATED_VF
303         if (iavf_is_vf(hw))
304                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
305                                           IAVF_ARQLEN1_ARQENABLE_MASK));
306 #else
307         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
308                                   IAVF_ARQLEN1_ARQENABLE_MASK));
309 #endif /* INTEGRATED_VF */
310         wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
311         wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
312
313         /* Update tail in the HW to post pre-allocated buffers */
314         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
315
316         /* Check one register to verify that config was applied */
317         reg = rd32(hw, hw->aq.arq.bal);
318         if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
319                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
320
321         return ret_code;
322 }
323
324 /**
325  *  iavf_init_asq - main initialization routine for ASQ
326  *  @hw: pointer to the hardware structure
327  *
328  *  This is the main initialization routine for the Admin Send Queue
329  *  Prior to calling this function, drivers *MUST* set the following fields
330  *  in the hw->aq structure:
331  *     - hw->aq.num_asq_entries
332  *     - hw->aq.arq_buf_size
333  *
334  *  Do *NOT* hold the lock when calling this as the memory allocation routines
335  *  called are not going to be atomic context safe
336  **/
337 enum iavf_status_code iavf_init_asq(struct iavf_hw *hw)
338 {
339         enum iavf_status_code ret_code = IAVF_SUCCESS;
340
341         if (hw->aq.asq.count > 0) {
342                 /* queue already initialized */
343                 ret_code = IAVF_ERR_NOT_READY;
344                 goto init_adminq_exit;
345         }
346
347         /* verify input for valid configuration */
348         if ((hw->aq.num_asq_entries == 0) ||
349             (hw->aq.asq_buf_size == 0)) {
350                 ret_code = IAVF_ERR_CONFIG;
351                 goto init_adminq_exit;
352         }
353
354         hw->aq.asq.next_to_use = 0;
355         hw->aq.asq.next_to_clean = 0;
356
357         /* allocate the ring memory */
358         ret_code = iavf_alloc_adminq_asq_ring(hw);
359         if (ret_code != IAVF_SUCCESS)
360                 goto init_adminq_exit;
361
362         /* allocate buffers in the rings */
363         ret_code = iavf_alloc_asq_bufs(hw);
364         if (ret_code != IAVF_SUCCESS)
365                 goto init_adminq_free_rings;
366
367         /* initialize base registers */
368         ret_code = iavf_config_asq_regs(hw);
369         if (ret_code != IAVF_SUCCESS)
370                 goto init_adminq_free_rings;
371
372         /* success! */
373         hw->aq.asq.count = hw->aq.num_asq_entries;
374         goto init_adminq_exit;
375
376 init_adminq_free_rings:
377         iavf_free_adminq_asq(hw);
378
379 init_adminq_exit:
380         return ret_code;
381 }
382
383 /**
384  *  iavf_init_arq - initialize ARQ
385  *  @hw: pointer to the hardware structure
386  *
387  *  The main initialization routine for the Admin Receive (Event) Queue.
388  *  Prior to calling this function, drivers *MUST* set the following fields
389  *  in the hw->aq structure:
390  *     - hw->aq.num_asq_entries
391  *     - hw->aq.arq_buf_size
392  *
393  *  Do *NOT* hold the lock when calling this as the memory allocation routines
394  *  called are not going to be atomic context safe
395  **/
396 enum iavf_status_code iavf_init_arq(struct iavf_hw *hw)
397 {
398         enum iavf_status_code ret_code = IAVF_SUCCESS;
399
400         if (hw->aq.arq.count > 0) {
401                 /* queue already initialized */
402                 ret_code = IAVF_ERR_NOT_READY;
403                 goto init_adminq_exit;
404         }
405
406         /* verify input for valid configuration */
407         if ((hw->aq.num_arq_entries == 0) ||
408             (hw->aq.arq_buf_size == 0)) {
409                 ret_code = IAVF_ERR_CONFIG;
410                 goto init_adminq_exit;
411         }
412
413         hw->aq.arq.next_to_use = 0;
414         hw->aq.arq.next_to_clean = 0;
415
416         /* allocate the ring memory */
417         ret_code = iavf_alloc_adminq_arq_ring(hw);
418         if (ret_code != IAVF_SUCCESS)
419                 goto init_adminq_exit;
420
421         /* allocate buffers in the rings */
422         ret_code = iavf_alloc_arq_bufs(hw);
423         if (ret_code != IAVF_SUCCESS)
424                 goto init_adminq_free_rings;
425
426         /* initialize base registers */
427         ret_code = iavf_config_arq_regs(hw);
428         if (ret_code != IAVF_SUCCESS)
429                 goto init_adminq_free_rings;
430
431         /* success! */
432         hw->aq.arq.count = hw->aq.num_arq_entries;
433         goto init_adminq_exit;
434
435 init_adminq_free_rings:
436         iavf_free_adminq_arq(hw);
437
438 init_adminq_exit:
439         return ret_code;
440 }
441
442 /**
443  *  iavf_shutdown_asq - shutdown the ASQ
444  *  @hw: pointer to the hardware structure
445  *
446  *  The main shutdown routine for the Admin Send Queue
447  **/
448 enum iavf_status_code iavf_shutdown_asq(struct iavf_hw *hw)
449 {
450         enum iavf_status_code ret_code = IAVF_SUCCESS;
451
452         iavf_acquire_spinlock(&hw->aq.asq_spinlock);
453
454         if (hw->aq.asq.count == 0) {
455                 ret_code = IAVF_ERR_NOT_READY;
456                 goto shutdown_asq_out;
457         }
458
459         /* Stop firmware AdminQ processing */
460         wr32(hw, hw->aq.asq.head, 0);
461         wr32(hw, hw->aq.asq.tail, 0);
462         wr32(hw, hw->aq.asq.len, 0);
463         wr32(hw, hw->aq.asq.bal, 0);
464         wr32(hw, hw->aq.asq.bah, 0);
465
466         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
467
468         /* free ring buffers */
469         iavf_free_asq_bufs(hw);
470
471 shutdown_asq_out:
472         iavf_release_spinlock(&hw->aq.asq_spinlock);
473         return ret_code;
474 }
475
476 /**
477  *  iavf_shutdown_arq - shutdown ARQ
478  *  @hw: pointer to the hardware structure
479  *
480  *  The main shutdown routine for the Admin Receive Queue
481  **/
482 enum iavf_status_code iavf_shutdown_arq(struct iavf_hw *hw)
483 {
484         enum iavf_status_code ret_code = IAVF_SUCCESS;
485
486         iavf_acquire_spinlock(&hw->aq.arq_spinlock);
487
488         if (hw->aq.arq.count == 0) {
489                 ret_code = IAVF_ERR_NOT_READY;
490                 goto shutdown_arq_out;
491         }
492
493         /* Stop firmware AdminQ processing */
494         wr32(hw, hw->aq.arq.head, 0);
495         wr32(hw, hw->aq.arq.tail, 0);
496         wr32(hw, hw->aq.arq.len, 0);
497         wr32(hw, hw->aq.arq.bal, 0);
498         wr32(hw, hw->aq.arq.bah, 0);
499
500         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
501
502         /* free ring buffers */
503         iavf_free_arq_bufs(hw);
504
505 shutdown_arq_out:
506         iavf_release_spinlock(&hw->aq.arq_spinlock);
507         return ret_code;
508 }
509
510 /**
511  *  iavf_init_adminq - main initialization routine for Admin Queue
512  *  @hw: pointer to the hardware structure
513  *
514  *  Prior to calling this function, drivers *MUST* set the following fields
515  *  in the hw->aq structure:
516  *     - hw->aq.num_asq_entries
517  *     - hw->aq.num_arq_entries
518  *     - hw->aq.arq_buf_size
519  *     - hw->aq.asq_buf_size
520  **/
521 enum iavf_status_code iavf_init_adminq(struct iavf_hw *hw)
522 {
523         enum iavf_status_code ret_code;
524
525         /* verify input for valid configuration */
526         if ((hw->aq.num_arq_entries == 0) ||
527             (hw->aq.num_asq_entries == 0) ||
528             (hw->aq.arq_buf_size == 0) ||
529             (hw->aq.asq_buf_size == 0)) {
530                 ret_code = IAVF_ERR_CONFIG;
531                 goto init_adminq_exit;
532         }
533         iavf_init_spinlock(&hw->aq.asq_spinlock);
534         iavf_init_spinlock(&hw->aq.arq_spinlock);
535
536         /* Set up register offsets */
537         iavf_adminq_init_regs(hw);
538
539         /* setup ASQ command write back timeout */
540         hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
541
542         /* allocate the ASQ */
543         ret_code = iavf_init_asq(hw);
544         if (ret_code != IAVF_SUCCESS)
545                 goto init_adminq_destroy_spinlocks;
546
547         /* allocate the ARQ */
548         ret_code = iavf_init_arq(hw);
549         if (ret_code != IAVF_SUCCESS)
550                 goto init_adminq_free_asq;
551
552         ret_code = IAVF_SUCCESS;
553
554         /* success! */
555         goto init_adminq_exit;
556
557 init_adminq_free_asq:
558         iavf_shutdown_asq(hw);
559 init_adminq_destroy_spinlocks:
560         iavf_destroy_spinlock(&hw->aq.asq_spinlock);
561         iavf_destroy_spinlock(&hw->aq.arq_spinlock);
562
563 init_adminq_exit:
564         return ret_code;
565 }
566
567 /**
568  *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
569  *  @hw: pointer to the hardware structure
570  **/
571 enum iavf_status_code iavf_shutdown_adminq(struct iavf_hw *hw)
572 {
573         enum iavf_status_code ret_code = IAVF_SUCCESS;
574
575         if (iavf_check_asq_alive(hw))
576                 iavf_aq_queue_shutdown(hw, true);
577
578         iavf_shutdown_asq(hw);
579         iavf_shutdown_arq(hw);
580         iavf_destroy_spinlock(&hw->aq.asq_spinlock);
581         iavf_destroy_spinlock(&hw->aq.arq_spinlock);
582
583         if (hw->nvm_buff.va)
584                 iavf_free_virt_mem(hw, &hw->nvm_buff);
585
586         return ret_code;
587 }
588
589 /**
590  *  iavf_clean_asq - cleans Admin send queue
591  *  @hw: pointer to the hardware structure
592  *
593  *  returns the number of free desc
594  **/
595 u16 iavf_clean_asq(struct iavf_hw *hw)
596 {
597         struct iavf_adminq_ring *asq = &(hw->aq.asq);
598         struct iavf_asq_cmd_details *details;
599         u16 ntc = asq->next_to_clean;
600         struct iavf_aq_desc desc_cb;
601         struct iavf_aq_desc *desc;
602
603         desc = IAVF_ADMINQ_DESC(*asq, ntc);
604         details = IAVF_ADMINQ_DETAILS(*asq, ntc);
605         while (rd32(hw, hw->aq.asq.head) != ntc) {
606                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
607                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
608
609                 if (details->callback) {
610                         IAVF_ADMINQ_CALLBACK cb_func =
611                                         (IAVF_ADMINQ_CALLBACK)details->callback;
612                         iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
613                                     IAVF_DMA_TO_DMA);
614                         cb_func(hw, &desc_cb);
615                 }
616                 iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
617                 iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
618                 ntc++;
619                 if (ntc == asq->count)
620                         ntc = 0;
621                 desc = IAVF_ADMINQ_DESC(*asq, ntc);
622                 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
623         }
624
625         asq->next_to_clean = ntc;
626
627         return IAVF_DESC_UNUSED(asq);
628 }
629
630 /**
631  *  iavf_asq_done - check if FW has processed the Admin Send Queue
632  *  @hw: pointer to the hw struct
633  *
634  *  Returns true if the firmware has processed all descriptors on the
635  *  admin send queue. Returns false if there are still requests pending.
636  **/
637 bool iavf_asq_done(struct iavf_hw *hw)
638 {
639         /* AQ designers suggest use of head for better
640          * timing reliability than DD bit
641          */
642         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
643
644 }
645
646 /**
647  *  iavf_asq_send_command - send command to Admin Queue
648  *  @hw: pointer to the hw struct
649  *  @desc: prefilled descriptor describing the command (non DMA mem)
650  *  @buff: buffer to use for indirect commands
651  *  @buff_size: size of buffer for indirect commands
652  *  @cmd_details: pointer to command details structure
653  *
654  *  This is the main send command driver routine for the Admin Queue send
655  *  queue.  It runs the queue, cleans the queue, etc
656  **/
657 enum iavf_status_code iavf_asq_send_command(struct iavf_hw *hw,
658                                 struct iavf_aq_desc *desc,
659                                 void *buff, /* can be NULL */
660                                 u16  buff_size,
661                                 struct iavf_asq_cmd_details *cmd_details)
662 {
663         enum iavf_status_code status = IAVF_SUCCESS;
664         struct iavf_dma_mem *dma_buff = NULL;
665         struct iavf_asq_cmd_details *details;
666         struct iavf_aq_desc *desc_on_ring;
667         bool cmd_completed = false;
668         u16  retval = 0;
669         u32  val = 0;
670
671         iavf_acquire_spinlock(&hw->aq.asq_spinlock);
672
673         hw->aq.asq_last_status = IAVF_AQ_RC_OK;
674
675         if (hw->aq.asq.count == 0) {
676                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
677                            "AQTX: Admin queue not initialized.\n");
678                 status = IAVF_ERR_QUEUE_EMPTY;
679                 goto asq_send_command_error;
680         }
681
682         val = rd32(hw, hw->aq.asq.head);
683         if (val >= hw->aq.num_asq_entries) {
684                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
685                            "AQTX: head overrun at %d\n", val);
686                 status = IAVF_ERR_QUEUE_EMPTY;
687                 goto asq_send_command_error;
688         }
689
690         details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
691         if (cmd_details) {
692                 iavf_memcpy(details,
693                             cmd_details,
694                             sizeof(struct iavf_asq_cmd_details),
695                             IAVF_NONDMA_TO_NONDMA);
696
697                 /* If the cmd_details are defined copy the cookie.  The
698                  * CPU_TO_LE32 is not needed here because the data is ignored
699                  * by the FW, only used by the driver
700                  */
701                 if (details->cookie) {
702                         desc->cookie_high =
703                                 CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
704                         desc->cookie_low =
705                                 CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
706                 }
707         } else {
708                 iavf_memset(details, 0,
709                             sizeof(struct iavf_asq_cmd_details),
710                             IAVF_NONDMA_MEM);
711         }
712
713         /* clear requested flags and then set additional flags if defined */
714         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
715         desc->flags |= CPU_TO_LE16(details->flags_ena);
716
717         if (buff_size > hw->aq.asq_buf_size) {
718                 iavf_debug(hw,
719                            IAVF_DEBUG_AQ_MESSAGE,
720                            "AQTX: Invalid buffer size: %d.\n",
721                            buff_size);
722                 status = IAVF_ERR_INVALID_SIZE;
723                 goto asq_send_command_error;
724         }
725
726         if (details->postpone && !details->async) {
727                 iavf_debug(hw,
728                            IAVF_DEBUG_AQ_MESSAGE,
729                            "AQTX: Async flag not set along with postpone flag");
730                 status = IAVF_ERR_PARAM;
731                 goto asq_send_command_error;
732         }
733
734         /* call clean and check queue available function to reclaim the
735          * descriptors that were processed by FW, the function returns the
736          * number of desc available
737          */
738         /* the clean function called here could be called in a separate thread
739          * in case of asynchronous completions
740          */
741         if (iavf_clean_asq(hw) == 0) {
742                 iavf_debug(hw,
743                            IAVF_DEBUG_AQ_MESSAGE,
744                            "AQTX: Error queue is full.\n");
745                 status = IAVF_ERR_ADMIN_QUEUE_FULL;
746                 goto asq_send_command_error;
747         }
748
749         /* initialize the temp desc pointer with the right desc */
750         desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
751
752         /* if the desc is available copy the temp desc to the right place */
753         iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
754                     IAVF_NONDMA_TO_DMA);
755
756         /* if buff is not NULL assume indirect command */
757         if (buff != NULL) {
758                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
759                 /* copy the user buff into the respective DMA buff */
760                 iavf_memcpy(dma_buff->va, buff, buff_size,
761                             IAVF_NONDMA_TO_DMA);
762                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
763
764                 /* Update the address values in the desc with the pa value
765                  * for respective buffer
766                  */
767                 desc_on_ring->params.external.addr_high =
768                                 CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
769                 desc_on_ring->params.external.addr_low =
770                                 CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
771         }
772
773         /* bump the tail */
774         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
775         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
776                       buff, buff_size);
777         (hw->aq.asq.next_to_use)++;
778         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
779                 hw->aq.asq.next_to_use = 0;
780         if (!details->postpone)
781                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
782
783         /* if cmd_details are not defined or async flag is not set,
784          * we need to wait for desc write back
785          */
786         if (!details->async && !details->postpone) {
787                 u32 total_delay = 0;
788
789                 do {
790                         /* AQ designers suggest use of head for better
791                          * timing reliability than DD bit
792                          */
793                         if (iavf_asq_done(hw))
794                                 break;
795                         iavf_usec_delay(50);
796                         total_delay += 50;
797                 } while (total_delay < hw->aq.asq_cmd_timeout);
798         }
799
800         /* if ready, copy the desc back to temp */
801         if (iavf_asq_done(hw)) {
802                 iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
803                             IAVF_DMA_TO_NONDMA);
804                 if (buff != NULL)
805                         iavf_memcpy(buff, dma_buff->va, buff_size,
806                                     IAVF_DMA_TO_NONDMA);
807                 retval = LE16_TO_CPU(desc->retval);
808                 if (retval != 0) {
809                         iavf_debug(hw,
810                                    IAVF_DEBUG_AQ_MESSAGE,
811                                    "AQTX: Command completed with error 0x%X.\n",
812                                    retval);
813
814                         /* strip off FW internal code */
815                         retval &= 0xff;
816                 }
817                 cmd_completed = true;
818                 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
819                         status = IAVF_SUCCESS;
820                 else
821                         status = IAVF_ERR_ADMIN_QUEUE_ERROR;
822                 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
823         }
824
825         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
826                    "AQTX: desc and buffer writeback:\n");
827         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
828
829         /* save writeback aq if requested */
830         if (details->wb_desc)
831                 iavf_memcpy(details->wb_desc, desc_on_ring,
832                             sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
833
834         /* update the error if time out occurred */
835         if ((!cmd_completed) &&
836             (!details->async && !details->postpone)) {
837                 if (rd32(hw, hw->aq.asq.len) & IAVF_ATQLEN1_ATQCRIT_MASK) {
838                         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
839                                    "AQTX: AQ Critical error.\n");
840                         status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
841                 } else {
842                         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
843                                    "AQTX: Writeback timeout.\n");
844                         status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
845                 }
846         }
847
848 asq_send_command_error:
849         iavf_release_spinlock(&hw->aq.asq_spinlock);
850         return status;
851 }
852
853 /**
854  *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
855  *  @desc:     pointer to the temp descriptor (non DMA mem)
856  *  @opcode:   the opcode can be used to decide which flags to turn off or on
857  *
858  *  Fill the desc with default values
859  **/
860 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
861                                        u16 opcode)
862 {
863         /* zero out the desc */
864         iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
865                     IAVF_NONDMA_MEM);
866         desc->opcode = CPU_TO_LE16(opcode);
867         desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
868 }
869
870 /**
871  *  iavf_clean_arq_element
872  *  @hw: pointer to the hw struct
873  *  @e: event info from the receive descriptor, includes any buffers
874  *  @pending: number of events that could be left to process
875  *
876  *  This function cleans one Admin Receive Queue element and returns
877  *  the contents through e.  It can also return how many events are
878  *  left to process through 'pending'
879  **/
880 enum iavf_status_code iavf_clean_arq_element(struct iavf_hw *hw,
881                                              struct iavf_arq_event_info *e,
882                                              u16 *pending)
883 {
884         enum iavf_status_code ret_code = IAVF_SUCCESS;
885         u16 ntc = hw->aq.arq.next_to_clean;
886         struct iavf_aq_desc *desc;
887         struct iavf_dma_mem *bi;
888         u16 desc_idx;
889         u16 datalen;
890         u16 flags;
891         u16 ntu;
892
893         /* pre-clean the event info */
894         iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
895
896         /* take the lock before we start messing with the ring */
897         iavf_acquire_spinlock(&hw->aq.arq_spinlock);
898
899         if (hw->aq.arq.count == 0) {
900                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
901                            "AQRX: Admin queue not initialized.\n");
902                 ret_code = IAVF_ERR_QUEUE_EMPTY;
903                 goto clean_arq_element_err;
904         }
905
906         /* set next_to_use to head */
907 #ifdef INTEGRATED_VF
908         if (!iavf_is_vf(hw))
909                 ntu = rd32(hw, hw->aq.arq.head) & IAVF_PF_ARQH_ARQH_MASK;
910         else
911                 ntu = rd32(hw, hw->aq.arq.head) & IAVF_ARQH1_ARQH_MASK;
912 #else
913         ntu = rd32(hw, hw->aq.arq.head) & IAVF_ARQH1_ARQH_MASK;
914 #endif /* INTEGRATED_VF */
915         if (ntu == ntc) {
916                 /* nothing to do - shouldn't need to update ring's values */
917                 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
918                 goto clean_arq_element_out;
919         }
920
921         /* now clean the next descriptor */
922         desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
923         desc_idx = ntc;
924
925         hw->aq.arq_last_status =
926                 (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
927         flags = LE16_TO_CPU(desc->flags);
928         if (flags & IAVF_AQ_FLAG_ERR) {
929                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
930                 iavf_debug(hw,
931                            IAVF_DEBUG_AQ_MESSAGE,
932                            "AQRX: Event received with error 0x%X.\n",
933                            hw->aq.arq_last_status);
934         }
935
936         iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
937                     IAVF_DMA_TO_NONDMA);
938         datalen = LE16_TO_CPU(desc->datalen);
939         e->msg_len = min(datalen, e->buf_len);
940         if (e->msg_buf != NULL && (e->msg_len != 0))
941                 iavf_memcpy(e->msg_buf,
942                             hw->aq.arq.r.arq_bi[desc_idx].va,
943                             e->msg_len, IAVF_DMA_TO_NONDMA);
944
945         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
946         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
947                       hw->aq.arq_buf_size);
948
949         /* Restore the original datalen and buffer address in the desc,
950          * FW updates datalen to indicate the event message
951          * size
952          */
953         bi = &hw->aq.arq.r.arq_bi[ntc];
954         iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
955
956         desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
957         if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
958                 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
959         desc->datalen = CPU_TO_LE16((u16)bi->size);
960         desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
961         desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
962
963         /* set tail = the last cleaned desc index. */
964         wr32(hw, hw->aq.arq.tail, ntc);
965         /* ntc is updated to tail + 1 */
966         ntc++;
967         if (ntc == hw->aq.num_arq_entries)
968                 ntc = 0;
969         hw->aq.arq.next_to_clean = ntc;
970         hw->aq.arq.next_to_use = ntu;
971
972 clean_arq_element_out:
973         /* Set pending if needed, unlock and return */
974         if (pending != NULL)
975                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
976 clean_arq_element_err:
977         iavf_release_spinlock(&hw->aq.arq_spinlock);
978
979         return ret_code;
980 }
981