net/iavf/base: remove unnecessary compile option
[dpdk.git] / drivers / net / iavf / base / iavf_adminq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2013 - 2015 Intel Corporation
3  */
4
5 #include "iavf_status.h"
6 #include "iavf_type.h"
7 #include "iavf_register.h"
8 #include "iavf_adminq.h"
9 #include "iavf_prototype.h"
10
11 /**
12  *  iavf_adminq_init_regs - Initialize AdminQ registers
13  *  @hw: pointer to the hardware structure
14  *
15  *  This assumes the alloc_asq and alloc_arq functions have already been called
16  **/
17 STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
18 {
19         /* set head and tail registers in our local struct */
20         hw->aq.asq.tail = IAVF_VF_ATQT1;
21         hw->aq.asq.head = IAVF_VF_ATQH1;
22         hw->aq.asq.len  = IAVF_VF_ATQLEN1;
23         hw->aq.asq.bal  = IAVF_VF_ATQBAL1;
24         hw->aq.asq.bah  = IAVF_VF_ATQBAH1;
25         hw->aq.arq.tail = IAVF_VF_ARQT1;
26         hw->aq.arq.head = IAVF_VF_ARQH1;
27         hw->aq.arq.len  = IAVF_VF_ARQLEN1;
28         hw->aq.arq.bal  = IAVF_VF_ARQBAL1;
29         hw->aq.arq.bah  = IAVF_VF_ARQBAH1;
30 }
31
32 /**
33  *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
34  *  @hw: pointer to the hardware structure
35  **/
36 enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
37 {
38         enum iavf_status ret_code;
39
40         ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
41                                          iavf_mem_atq_ring,
42                                          (hw->aq.num_asq_entries *
43                                          sizeof(struct iavf_aq_desc)),
44                                          IAVF_ADMINQ_DESC_ALIGNMENT);
45         if (ret_code)
46                 return ret_code;
47
48         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
49                                           (hw->aq.num_asq_entries *
50                                           sizeof(struct iavf_asq_cmd_details)));
51         if (ret_code) {
52                 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
53                 return ret_code;
54         }
55
56         return ret_code;
57 }
58
59 /**
60  *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
61  *  @hw: pointer to the hardware structure
62  **/
63 enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
64 {
65         enum iavf_status ret_code;
66
67         ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
68                                          iavf_mem_arq_ring,
69                                          (hw->aq.num_arq_entries *
70                                          sizeof(struct iavf_aq_desc)),
71                                          IAVF_ADMINQ_DESC_ALIGNMENT);
72
73         return ret_code;
74 }
75
76 /**
77  *  iavf_free_adminq_asq - Free Admin Queue send rings
78  *  @hw: pointer to the hardware structure
79  *
80  *  This assumes the posted send buffers have already been cleaned
81  *  and de-allocated
82  **/
83 void iavf_free_adminq_asq(struct iavf_hw *hw)
84 {
85         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
86 }
87
88 /**
89  *  iavf_free_adminq_arq - Free Admin Queue receive rings
90  *  @hw: pointer to the hardware structure
91  *
92  *  This assumes the posted receive buffers have already been cleaned
93  *  and de-allocated
94  **/
95 void iavf_free_adminq_arq(struct iavf_hw *hw)
96 {
97         iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
98 }
99
100 /**
101  *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
102  *  @hw: pointer to the hardware structure
103  **/
104 STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
105 {
106         enum iavf_status ret_code;
107         struct iavf_aq_desc *desc;
108         struct iavf_dma_mem *bi;
109         int i;
110
111         /* We'll be allocating the buffer info memory first, then we can
112          * allocate the mapped buffers for the event processing
113          */
114
115         /* buffer_info structures do not need alignment */
116         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
117                 (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
118         if (ret_code)
119                 goto alloc_arq_bufs;
120         hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
121
122         /* allocate the mapped buffers */
123         for (i = 0; i < hw->aq.num_arq_entries; i++) {
124                 bi = &hw->aq.arq.r.arq_bi[i];
125                 ret_code = iavf_allocate_dma_mem(hw, bi,
126                                                  iavf_mem_arq_buf,
127                                                  hw->aq.arq_buf_size,
128                                                  IAVF_ADMINQ_DESC_ALIGNMENT);
129                 if (ret_code)
130                         goto unwind_alloc_arq_bufs;
131
132                 /* now configure the descriptors for use */
133                 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
134
135                 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
136                 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
137                         desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
138                 desc->opcode = 0;
139                 /* This is in accordance with Admin queue design, there is no
140                  * register for buffer size configuration
141                  */
142                 desc->datalen = CPU_TO_LE16((u16)bi->size);
143                 desc->retval = 0;
144                 desc->cookie_high = 0;
145                 desc->cookie_low = 0;
146                 desc->params.external.addr_high =
147                         CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
148                 desc->params.external.addr_low =
149                         CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
150                 desc->params.external.param0 = 0;
151                 desc->params.external.param1 = 0;
152         }
153
154 alloc_arq_bufs:
155         return ret_code;
156
157 unwind_alloc_arq_bufs:
158         /* don't try to free the one that failed... */
159         i--;
160         for (; i >= 0; i--)
161                 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
162         iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
163
164         return ret_code;
165 }
166
167 /**
168  *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
169  *  @hw: pointer to the hardware structure
170  **/
171 STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
172 {
173         enum iavf_status ret_code;
174         struct iavf_dma_mem *bi;
175         int i;
176
177         /* No mapped memory needed yet, just the buffer info structures */
178         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
179                 (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
180         if (ret_code)
181                 goto alloc_asq_bufs;
182         hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
183
184         /* allocate the mapped buffers */
185         for (i = 0; i < hw->aq.num_asq_entries; i++) {
186                 bi = &hw->aq.asq.r.asq_bi[i];
187                 ret_code = iavf_allocate_dma_mem(hw, bi,
188                                                  iavf_mem_asq_buf,
189                                                  hw->aq.asq_buf_size,
190                                                  IAVF_ADMINQ_DESC_ALIGNMENT);
191                 if (ret_code)
192                         goto unwind_alloc_asq_bufs;
193         }
194 alloc_asq_bufs:
195         return ret_code;
196
197 unwind_alloc_asq_bufs:
198         /* don't try to free the one that failed... */
199         i--;
200         for (; i >= 0; i--)
201                 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
202         iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
203
204         return ret_code;
205 }
206
207 /**
208  *  iavf_free_arq_bufs - Free receive queue buffer info elements
209  *  @hw: pointer to the hardware structure
210  **/
211 STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
212 {
213         int i;
214
215         /* free descriptors */
216         for (i = 0; i < hw->aq.num_arq_entries; i++)
217                 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
218
219         /* free the descriptor memory */
220         iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
221
222         /* free the dma header */
223         iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
224 }
225
226 /**
227  *  iavf_free_asq_bufs - Free send queue buffer info elements
228  *  @hw: pointer to the hardware structure
229  **/
230 STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
231 {
232         int i;
233
234         /* only unmap if the address is non-NULL */
235         for (i = 0; i < hw->aq.num_asq_entries; i++)
236                 if (hw->aq.asq.r.asq_bi[i].pa)
237                         iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
238
239         /* free the buffer info list */
240         iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
241
242         /* free the descriptor memory */
243         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
244
245         /* free the dma header */
246         iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
247 }
248
249 /**
250  *  iavf_config_asq_regs - configure ASQ registers
251  *  @hw: pointer to the hardware structure
252  *
253  *  Configure base address and length registers for the transmit queue
254  **/
255 STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
256 {
257         enum iavf_status ret_code = IAVF_SUCCESS;
258         u32 reg = 0;
259
260         /* Clear Head and Tail */
261         wr32(hw, hw->aq.asq.head, 0);
262         wr32(hw, hw->aq.asq.tail, 0);
263
264         /* set starting point */
265         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
266                                   IAVF_VF_ATQLEN1_ATQENABLE_MASK));
267         wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
268         wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
269
270         /* Check one register to verify that config was applied */
271         reg = rd32(hw, hw->aq.asq.bal);
272         if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
273                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
274
275         return ret_code;
276 }
277
278 /**
279  *  iavf_config_arq_regs - ARQ register configuration
280  *  @hw: pointer to the hardware structure
281  *
282  * Configure base address and length registers for the receive (event queue)
283  **/
284 STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
285 {
286         enum iavf_status ret_code = IAVF_SUCCESS;
287         u32 reg = 0;
288
289         /* Clear Head and Tail */
290         wr32(hw, hw->aq.arq.head, 0);
291         wr32(hw, hw->aq.arq.tail, 0);
292
293         /* set starting point */
294         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
295                                   IAVF_VF_ARQLEN1_ARQENABLE_MASK));
296         wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
297         wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
298
299         /* Update tail in the HW to post pre-allocated buffers */
300         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
301
302         /* Check one register to verify that config was applied */
303         reg = rd32(hw, hw->aq.arq.bal);
304         if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
305                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
306
307         return ret_code;
308 }
309
310 /**
311  *  iavf_init_asq - main initialization routine for ASQ
312  *  @hw: pointer to the hardware structure
313  *
314  *  This is the main initialization routine for the Admin Send Queue
315  *  Prior to calling this function, drivers *MUST* set the following fields
316  *  in the hw->aq structure:
317  *     - hw->aq.num_asq_entries
318  *     - hw->aq.arq_buf_size
319  *
320  *  Do *NOT* hold the lock when calling this as the memory allocation routines
321  *  called are not going to be atomic context safe
322  **/
323 enum iavf_status iavf_init_asq(struct iavf_hw *hw)
324 {
325         enum iavf_status ret_code = IAVF_SUCCESS;
326
327         if (hw->aq.asq.count > 0) {
328                 /* queue already initialized */
329                 ret_code = IAVF_ERR_NOT_READY;
330                 goto init_adminq_exit;
331         }
332
333         /* verify input for valid configuration */
334         if ((hw->aq.num_asq_entries == 0) ||
335             (hw->aq.asq_buf_size == 0)) {
336                 ret_code = IAVF_ERR_CONFIG;
337                 goto init_adminq_exit;
338         }
339
340         hw->aq.asq.next_to_use = 0;
341         hw->aq.asq.next_to_clean = 0;
342
343         /* allocate the ring memory */
344         ret_code = iavf_alloc_adminq_asq_ring(hw);
345         if (ret_code != IAVF_SUCCESS)
346                 goto init_adminq_exit;
347
348         /* allocate buffers in the rings */
349         ret_code = iavf_alloc_asq_bufs(hw);
350         if (ret_code != IAVF_SUCCESS)
351                 goto init_adminq_free_rings;
352
353         /* initialize base registers */
354         ret_code = iavf_config_asq_regs(hw);
355         if (ret_code != IAVF_SUCCESS)
356                 goto init_adminq_free_rings;
357
358         /* success! */
359         hw->aq.asq.count = hw->aq.num_asq_entries;
360         goto init_adminq_exit;
361
362 init_adminq_free_rings:
363         iavf_free_adminq_asq(hw);
364
365 init_adminq_exit:
366         return ret_code;
367 }
368
369 /**
370  *  iavf_init_arq - initialize ARQ
371  *  @hw: pointer to the hardware structure
372  *
373  *  The main initialization routine for the Admin Receive (Event) Queue.
374  *  Prior to calling this function, drivers *MUST* set the following fields
375  *  in the hw->aq structure:
376  *     - hw->aq.num_asq_entries
377  *     - hw->aq.arq_buf_size
378  *
379  *  Do *NOT* hold the lock when calling this as the memory allocation routines
380  *  called are not going to be atomic context safe
381  **/
382 enum iavf_status iavf_init_arq(struct iavf_hw *hw)
383 {
384         enum iavf_status ret_code = IAVF_SUCCESS;
385
386         if (hw->aq.arq.count > 0) {
387                 /* queue already initialized */
388                 ret_code = IAVF_ERR_NOT_READY;
389                 goto init_adminq_exit;
390         }
391
392         /* verify input for valid configuration */
393         if ((hw->aq.num_arq_entries == 0) ||
394             (hw->aq.arq_buf_size == 0)) {
395                 ret_code = IAVF_ERR_CONFIG;
396                 goto init_adminq_exit;
397         }
398
399         hw->aq.arq.next_to_use = 0;
400         hw->aq.arq.next_to_clean = 0;
401
402         /* allocate the ring memory */
403         ret_code = iavf_alloc_adminq_arq_ring(hw);
404         if (ret_code != IAVF_SUCCESS)
405                 goto init_adminq_exit;
406
407         /* allocate buffers in the rings */
408         ret_code = iavf_alloc_arq_bufs(hw);
409         if (ret_code != IAVF_SUCCESS)
410                 goto init_adminq_free_rings;
411
412         /* initialize base registers */
413         ret_code = iavf_config_arq_regs(hw);
414         if (ret_code != IAVF_SUCCESS)
415                 goto init_adminq_free_rings;
416
417         /* success! */
418         hw->aq.arq.count = hw->aq.num_arq_entries;
419         goto init_adminq_exit;
420
421 init_adminq_free_rings:
422         iavf_free_adminq_arq(hw);
423
424 init_adminq_exit:
425         return ret_code;
426 }
427
428 /**
429  *  iavf_shutdown_asq - shutdown the ASQ
430  *  @hw: pointer to the hardware structure
431  *
432  *  The main shutdown routine for the Admin Send Queue
433  **/
434 enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
435 {
436         enum iavf_status ret_code = IAVF_SUCCESS;
437
438         iavf_acquire_spinlock(&hw->aq.asq_spinlock);
439
440         if (hw->aq.asq.count == 0) {
441                 ret_code = IAVF_ERR_NOT_READY;
442                 goto shutdown_asq_out;
443         }
444
445         /* Stop firmware AdminQ processing */
446         wr32(hw, hw->aq.asq.head, 0);
447         wr32(hw, hw->aq.asq.tail, 0);
448         wr32(hw, hw->aq.asq.len, 0);
449         wr32(hw, hw->aq.asq.bal, 0);
450         wr32(hw, hw->aq.asq.bah, 0);
451
452         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
453
454         /* free ring buffers */
455         iavf_free_asq_bufs(hw);
456
457 shutdown_asq_out:
458         iavf_release_spinlock(&hw->aq.asq_spinlock);
459         return ret_code;
460 }
461
462 /**
463  *  iavf_shutdown_arq - shutdown ARQ
464  *  @hw: pointer to the hardware structure
465  *
466  *  The main shutdown routine for the Admin Receive Queue
467  **/
468 enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
469 {
470         enum iavf_status ret_code = IAVF_SUCCESS;
471
472         iavf_acquire_spinlock(&hw->aq.arq_spinlock);
473
474         if (hw->aq.arq.count == 0) {
475                 ret_code = IAVF_ERR_NOT_READY;
476                 goto shutdown_arq_out;
477         }
478
479         /* Stop firmware AdminQ processing */
480         wr32(hw, hw->aq.arq.head, 0);
481         wr32(hw, hw->aq.arq.tail, 0);
482         wr32(hw, hw->aq.arq.len, 0);
483         wr32(hw, hw->aq.arq.bal, 0);
484         wr32(hw, hw->aq.arq.bah, 0);
485
486         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
487
488         /* free ring buffers */
489         iavf_free_arq_bufs(hw);
490
491 shutdown_arq_out:
492         iavf_release_spinlock(&hw->aq.arq_spinlock);
493         return ret_code;
494 }
495
496 /**
497  *  iavf_init_adminq - main initialization routine for Admin Queue
498  *  @hw: pointer to the hardware structure
499  *
500  *  Prior to calling this function, drivers *MUST* set the following fields
501  *  in the hw->aq structure:
502  *     - hw->aq.num_asq_entries
503  *     - hw->aq.num_arq_entries
504  *     - hw->aq.arq_buf_size
505  *     - hw->aq.asq_buf_size
506  **/
507 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
508 {
509         enum iavf_status ret_code;
510
511         /* verify input for valid configuration */
512         if ((hw->aq.num_arq_entries == 0) ||
513             (hw->aq.num_asq_entries == 0) ||
514             (hw->aq.arq_buf_size == 0) ||
515             (hw->aq.asq_buf_size == 0)) {
516                 ret_code = IAVF_ERR_CONFIG;
517                 goto init_adminq_exit;
518         }
519         iavf_init_spinlock(&hw->aq.asq_spinlock);
520         iavf_init_spinlock(&hw->aq.arq_spinlock);
521
522         /* Set up register offsets */
523         iavf_adminq_init_regs(hw);
524
525         /* setup ASQ command write back timeout */
526         hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
527
528         /* allocate the ASQ */
529         ret_code = iavf_init_asq(hw);
530         if (ret_code != IAVF_SUCCESS)
531                 goto init_adminq_destroy_spinlocks;
532
533         /* allocate the ARQ */
534         ret_code = iavf_init_arq(hw);
535         if (ret_code != IAVF_SUCCESS)
536                 goto init_adminq_free_asq;
537
538         /* success! */
539         goto init_adminq_exit;
540
541 init_adminq_free_asq:
542         iavf_shutdown_asq(hw);
543 init_adminq_destroy_spinlocks:
544         iavf_destroy_spinlock(&hw->aq.asq_spinlock);
545         iavf_destroy_spinlock(&hw->aq.arq_spinlock);
546
547 init_adminq_exit:
548         return ret_code;
549 }
550
551 /**
552  *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
553  *  @hw: pointer to the hardware structure
554  **/
555 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
556 {
557         enum iavf_status ret_code = IAVF_SUCCESS;
558
559         if (iavf_check_asq_alive(hw))
560                 iavf_aq_queue_shutdown(hw, true);
561
562         iavf_shutdown_asq(hw);
563         iavf_shutdown_arq(hw);
564         iavf_destroy_spinlock(&hw->aq.asq_spinlock);
565         iavf_destroy_spinlock(&hw->aq.arq_spinlock);
566
567         return ret_code;
568 }
569
570 /**
571  *  iavf_clean_asq - cleans Admin send queue
572  *  @hw: pointer to the hardware structure
573  *
574  *  returns the number of free desc
575  **/
576 u16 iavf_clean_asq(struct iavf_hw *hw)
577 {
578         struct iavf_adminq_ring *asq = &(hw->aq.asq);
579         struct iavf_asq_cmd_details *details;
580         u16 ntc = asq->next_to_clean;
581         struct iavf_aq_desc desc_cb;
582         struct iavf_aq_desc *desc;
583
584         desc = IAVF_ADMINQ_DESC(*asq, ntc);
585         details = IAVF_ADMINQ_DETAILS(*asq, ntc);
586         while (rd32(hw, hw->aq.asq.head) != ntc) {
587                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
588                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
589
590                 if (details->callback) {
591                         IAVF_ADMINQ_CALLBACK cb_func =
592                                         (IAVF_ADMINQ_CALLBACK)details->callback;
593                         iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
594                                     IAVF_DMA_TO_DMA);
595                         cb_func(hw, &desc_cb);
596                 }
597                 iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
598                 iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
599                 ntc++;
600                 if (ntc == asq->count)
601                         ntc = 0;
602                 desc = IAVF_ADMINQ_DESC(*asq, ntc);
603                 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
604         }
605
606         asq->next_to_clean = ntc;
607
608         return IAVF_DESC_UNUSED(asq);
609 }
610
611 /**
612  *  iavf_asq_done - check if FW has processed the Admin Send Queue
613  *  @hw: pointer to the hw struct
614  *
615  *  Returns true if the firmware has processed all descriptors on the
616  *  admin send queue. Returns false if there are still requests pending.
617  **/
618 bool iavf_asq_done(struct iavf_hw *hw)
619 {
620         /* AQ designers suggest use of head for better
621          * timing reliability than DD bit
622          */
623         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
624
625 }
626
627 /**
628  *  iavf_asq_send_command - send command to Admin Queue
629  *  @hw: pointer to the hw struct
630  *  @desc: prefilled descriptor describing the command (non DMA mem)
631  *  @buff: buffer to use for indirect commands
632  *  @buff_size: size of buffer for indirect commands
633  *  @cmd_details: pointer to command details structure
634  *
635  *  This is the main send command driver routine for the Admin Queue send
636  *  queue.  It runs the queue, cleans the queue, etc
637  **/
638 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
639                                 struct iavf_aq_desc *desc,
640                                 void *buff, /* can be NULL */
641                                 u16  buff_size,
642                                 struct iavf_asq_cmd_details *cmd_details)
643 {
644         enum iavf_status status = IAVF_SUCCESS;
645         struct iavf_dma_mem *dma_buff = NULL;
646         struct iavf_asq_cmd_details *details;
647         struct iavf_aq_desc *desc_on_ring;
648         bool cmd_completed = false;
649         u16  retval = 0;
650         u32  val = 0;
651
652         iavf_acquire_spinlock(&hw->aq.asq_spinlock);
653
654         hw->aq.asq_last_status = IAVF_AQ_RC_OK;
655
656         if (hw->aq.asq.count == 0) {
657                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
658                            "AQTX: Admin queue not initialized.\n");
659                 status = IAVF_ERR_QUEUE_EMPTY;
660                 goto asq_send_command_error;
661         }
662
663         val = rd32(hw, hw->aq.asq.head);
664         if (val >= hw->aq.num_asq_entries) {
665                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
666                            "AQTX: head overrun at %d\n", val);
667                 status = IAVF_ERR_QUEUE_EMPTY;
668                 goto asq_send_command_error;
669         }
670
671         details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
672         if (cmd_details) {
673                 iavf_memcpy(details,
674                             cmd_details,
675                             sizeof(struct iavf_asq_cmd_details),
676                             IAVF_NONDMA_TO_NONDMA);
677
678                 /* If the cmd_details are defined copy the cookie.  The
679                  * CPU_TO_LE32 is not needed here because the data is ignored
680                  * by the FW, only used by the driver
681                  */
682                 if (details->cookie) {
683                         desc->cookie_high =
684                                 CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
685                         desc->cookie_low =
686                                 CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
687                 }
688         } else {
689                 iavf_memset(details, 0,
690                             sizeof(struct iavf_asq_cmd_details),
691                             IAVF_NONDMA_MEM);
692         }
693
694         /* clear requested flags and then set additional flags if defined */
695         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
696         desc->flags |= CPU_TO_LE16(details->flags_ena);
697
698         if (buff_size > hw->aq.asq_buf_size) {
699                 iavf_debug(hw,
700                            IAVF_DEBUG_AQ_MESSAGE,
701                            "AQTX: Invalid buffer size: %d.\n",
702                            buff_size);
703                 status = IAVF_ERR_INVALID_SIZE;
704                 goto asq_send_command_error;
705         }
706
707         if (details->postpone && !details->async) {
708                 iavf_debug(hw,
709                            IAVF_DEBUG_AQ_MESSAGE,
710                            "AQTX: Async flag not set along with postpone flag");
711                 status = IAVF_ERR_PARAM;
712                 goto asq_send_command_error;
713         }
714
715         /* call clean and check queue available function to reclaim the
716          * descriptors that were processed by FW, the function returns the
717          * number of desc available
718          */
719         /* the clean function called here could be called in a separate thread
720          * in case of asynchronous completions
721          */
722         if (iavf_clean_asq(hw) == 0) {
723                 iavf_debug(hw,
724                            IAVF_DEBUG_AQ_MESSAGE,
725                            "AQTX: Error queue is full.\n");
726                 status = IAVF_ERR_ADMIN_QUEUE_FULL;
727                 goto asq_send_command_error;
728         }
729
730         /* initialize the temp desc pointer with the right desc */
731         desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
732
733         /* if the desc is available copy the temp desc to the right place */
734         iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
735                     IAVF_NONDMA_TO_DMA);
736
737         /* if buff is not NULL assume indirect command */
738         if (buff != NULL) {
739                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
740                 /* copy the user buff into the respective DMA buff */
741                 iavf_memcpy(dma_buff->va, buff, buff_size,
742                             IAVF_NONDMA_TO_DMA);
743                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
744
745                 /* Update the address values in the desc with the pa value
746                  * for respective buffer
747                  */
748                 desc_on_ring->params.external.addr_high =
749                                 CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
750                 desc_on_ring->params.external.addr_low =
751                                 CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
752         }
753
754         /* bump the tail */
755         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
756         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
757                       buff, buff_size);
758         (hw->aq.asq.next_to_use)++;
759         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
760                 hw->aq.asq.next_to_use = 0;
761         if (!details->postpone)
762                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
763
764         /* if cmd_details are not defined or async flag is not set,
765          * we need to wait for desc write back
766          */
767         if (!details->async && !details->postpone) {
768                 u32 total_delay = 0;
769
770                 do {
771                         /* AQ designers suggest use of head for better
772                          * timing reliability than DD bit
773                          */
774                         if (iavf_asq_done(hw))
775                                 break;
776                         iavf_usec_delay(50);
777                         total_delay += 50;
778                 } while (total_delay < hw->aq.asq_cmd_timeout);
779         }
780
781         /* if ready, copy the desc back to temp */
782         if (iavf_asq_done(hw)) {
783                 iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
784                             IAVF_DMA_TO_NONDMA);
785                 if (buff != NULL)
786                         iavf_memcpy(buff, dma_buff->va, buff_size,
787                                     IAVF_DMA_TO_NONDMA);
788                 retval = LE16_TO_CPU(desc->retval);
789                 if (retval != 0) {
790                         iavf_debug(hw,
791                                    IAVF_DEBUG_AQ_MESSAGE,
792                                    "AQTX: Command completed with error 0x%X.\n",
793                                    retval);
794
795                         /* strip off FW internal code */
796                         retval &= 0xff;
797                 }
798                 cmd_completed = true;
799                 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
800                         status = IAVF_SUCCESS;
801                 else
802                         status = IAVF_ERR_ADMIN_QUEUE_ERROR;
803                 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
804         }
805
806         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
807                    "AQTX: desc and buffer writeback:\n");
808         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
809
810         /* save writeback aq if requested */
811         if (details->wb_desc)
812                 iavf_memcpy(details->wb_desc, desc_on_ring,
813                             sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
814
815         /* update the error if time out occurred */
816         if ((!cmd_completed) &&
817             (!details->async && !details->postpone)) {
818                 if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
819                         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
820                                    "AQTX: AQ Critical error.\n");
821                         status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
822                 } else {
823                         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
824                                    "AQTX: Writeback timeout.\n");
825                         status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
826                 }
827         }
828
829 asq_send_command_error:
830         iavf_release_spinlock(&hw->aq.asq_spinlock);
831         return status;
832 }
833
834 /**
835  *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
836  *  @desc:     pointer to the temp descriptor (non DMA mem)
837  *  @opcode:   the opcode can be used to decide which flags to turn off or on
838  *
839  *  Fill the desc with default values
840  **/
841 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
842                                        u16 opcode)
843 {
844         /* zero out the desc */
845         iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
846                     IAVF_NONDMA_MEM);
847         desc->opcode = CPU_TO_LE16(opcode);
848         desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
849 }
850
851 /**
852  *  iavf_clean_arq_element
853  *  @hw: pointer to the hw struct
854  *  @e: event info from the receive descriptor, includes any buffers
855  *  @pending: number of events that could be left to process
856  *
857  *  This function cleans one Admin Receive Queue element and returns
858  *  the contents through e.  It can also return how many events are
859  *  left to process through 'pending'
860  **/
861 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
862                                              struct iavf_arq_event_info *e,
863                                              u16 *pending)
864 {
865         enum iavf_status ret_code = IAVF_SUCCESS;
866         u16 ntc = hw->aq.arq.next_to_clean;
867         struct iavf_aq_desc *desc;
868         struct iavf_dma_mem *bi;
869         u16 desc_idx;
870         u16 datalen;
871         u16 flags;
872         u16 ntu;
873
874         /* pre-clean the event info */
875         iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
876
877         /* take the lock before we start messing with the ring */
878         iavf_acquire_spinlock(&hw->aq.arq_spinlock);
879
880         if (hw->aq.arq.count == 0) {
881                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
882                            "AQRX: Admin queue not initialized.\n");
883                 ret_code = IAVF_ERR_QUEUE_EMPTY;
884                 goto clean_arq_element_err;
885         }
886
887         /* set next_to_use to head */
888         ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
889         if (ntu == ntc) {
890                 /* nothing to do - shouldn't need to update ring's values */
891                 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
892                 goto clean_arq_element_out;
893         }
894
895         /* now clean the next descriptor */
896         desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
897         desc_idx = ntc;
898
899         hw->aq.arq_last_status =
900                 (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
901         flags = LE16_TO_CPU(desc->flags);
902         if (flags & IAVF_AQ_FLAG_ERR) {
903                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
904                 iavf_debug(hw,
905                            IAVF_DEBUG_AQ_MESSAGE,
906                            "AQRX: Event received with error 0x%X.\n",
907                            hw->aq.arq_last_status);
908         }
909
910         iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
911                     IAVF_DMA_TO_NONDMA);
912         datalen = LE16_TO_CPU(desc->datalen);
913         e->msg_len = min(datalen, e->buf_len);
914         if (e->msg_buf != NULL && (e->msg_len != 0))
915                 iavf_memcpy(e->msg_buf,
916                             hw->aq.arq.r.arq_bi[desc_idx].va,
917                             e->msg_len, IAVF_DMA_TO_NONDMA);
918
919         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
920         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
921                       hw->aq.arq_buf_size);
922
923         /* Restore the original datalen and buffer address in the desc,
924          * FW updates datalen to indicate the event message
925          * size
926          */
927         bi = &hw->aq.arq.r.arq_bi[ntc];
928         iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
929
930         desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
931         if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
932                 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
933         desc->datalen = CPU_TO_LE16((u16)bi->size);
934         desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
935         desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
936
937         /* set tail = the last cleaned desc index. */
938         wr32(hw, hw->aq.arq.tail, ntc);
939         /* ntc is updated to tail + 1 */
940         ntc++;
941         if (ntc == hw->aq.num_arq_entries)
942                 ntc = 0;
943         hw->aq.arq.next_to_clean = ntc;
944         hw->aq.arq.next_to_use = ntu;
945
946 clean_arq_element_out:
947         /* Set pending if needed, unlock and return */
948         if (pending != NULL)
949                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
950 clean_arq_element_err:
951         iavf_release_spinlock(&hw->aq.arq_spinlock);
952
953         return ret_code;
954 }
955