common/iavf: add session ID fields for L2TPv2
[dpdk.git] / drivers / common / iavf / iavf_adminq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "iavf_status.h"
6 #include "iavf_type.h"
7 #include "iavf_register.h"
8 #include "iavf_adminq.h"
9 #include "iavf_prototype.h"
10
11 /**
12  *  iavf_adminq_init_regs - Initialize AdminQ registers
13  *  @hw: pointer to the hardware structure
14  *
15  *  This assumes the alloc_asq and alloc_arq functions have already been called
16  **/
17 STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
18 {
19         /* set head and tail registers in our local struct */
20         hw->aq.asq.tail = IAVF_VF_ATQT1;
21         hw->aq.asq.head = IAVF_VF_ATQH1;
22         hw->aq.asq.len  = IAVF_VF_ATQLEN1;
23         hw->aq.asq.bal  = IAVF_VF_ATQBAL1;
24         hw->aq.asq.bah  = IAVF_VF_ATQBAH1;
25         hw->aq.arq.tail = IAVF_VF_ARQT1;
26         hw->aq.arq.head = IAVF_VF_ARQH1;
27         hw->aq.arq.len  = IAVF_VF_ARQLEN1;
28         hw->aq.arq.bal  = IAVF_VF_ARQBAL1;
29         hw->aq.arq.bah  = IAVF_VF_ARQBAH1;
30 }
31
32 /**
33  *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
34  *  @hw: pointer to the hardware structure
35  **/
36 enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
37 {
38         enum iavf_status ret_code;
39
40         ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
41                                          iavf_mem_atq_ring,
42                                          (hw->aq.num_asq_entries *
43                                          sizeof(struct iavf_aq_desc)),
44                                          IAVF_ADMINQ_DESC_ALIGNMENT);
45         if (ret_code)
46                 return ret_code;
47
48         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
49                                           (hw->aq.num_asq_entries *
50                                           sizeof(struct iavf_asq_cmd_details)));
51         if (ret_code) {
52                 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
53                 return ret_code;
54         }
55
56         return ret_code;
57 }
58
59 /**
60  *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
61  *  @hw: pointer to the hardware structure
62  **/
63 enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
64 {
65         enum iavf_status ret_code;
66
67         ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
68                                          iavf_mem_arq_ring,
69                                          (hw->aq.num_arq_entries *
70                                          sizeof(struct iavf_aq_desc)),
71                                          IAVF_ADMINQ_DESC_ALIGNMENT);
72
73         return ret_code;
74 }
75
76 /**
77  *  iavf_free_adminq_asq - Free Admin Queue send rings
78  *  @hw: pointer to the hardware structure
79  *
80  *  This assumes the posted send buffers have already been cleaned
81  *  and de-allocated
82  **/
83 void iavf_free_adminq_asq(struct iavf_hw *hw)
84 {
85         iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
86         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
87 }
88
89 /**
90  *  iavf_free_adminq_arq - Free Admin Queue receive rings
91  *  @hw: pointer to the hardware structure
92  *
93  *  This assumes the posted receive buffers have already been cleaned
94  *  and de-allocated
95  **/
96 void iavf_free_adminq_arq(struct iavf_hw *hw)
97 {
98         iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
99 }
100
101 /**
102  *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
103  *  @hw: pointer to the hardware structure
104  **/
105 STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
106 {
107         enum iavf_status ret_code;
108         struct iavf_aq_desc *desc;
109         struct iavf_dma_mem *bi;
110         int i;
111
112         /* We'll be allocating the buffer info memory first, then we can
113          * allocate the mapped buffers for the event processing
114          */
115
116         /* buffer_info structures do not need alignment */
117         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
118                 (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
119         if (ret_code)
120                 goto alloc_arq_bufs;
121         hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
122
123         /* allocate the mapped buffers */
124         for (i = 0; i < hw->aq.num_arq_entries; i++) {
125                 bi = &hw->aq.arq.r.arq_bi[i];
126                 ret_code = iavf_allocate_dma_mem(hw, bi,
127                                                  iavf_mem_arq_buf,
128                                                  hw->aq.arq_buf_size,
129                                                  IAVF_ADMINQ_DESC_ALIGNMENT);
130                 if (ret_code)
131                         goto unwind_alloc_arq_bufs;
132
133                 /* now configure the descriptors for use */
134                 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
135
136                 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
137                 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
138                         desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
139                 desc->opcode = 0;
140                 /* This is in accordance with Admin queue design, there is no
141                  * register for buffer size configuration
142                  */
143                 desc->datalen = CPU_TO_LE16((u16)bi->size);
144                 desc->retval = 0;
145                 desc->cookie_high = 0;
146                 desc->cookie_low = 0;
147                 desc->params.external.addr_high =
148                         CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
149                 desc->params.external.addr_low =
150                         CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
151                 desc->params.external.param0 = 0;
152                 desc->params.external.param1 = 0;
153         }
154
155 alloc_arq_bufs:
156         return ret_code;
157
158 unwind_alloc_arq_bufs:
159         /* don't try to free the one that failed... */
160         i--;
161         for (; i >= 0; i--)
162                 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
163         iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
164
165         return ret_code;
166 }
167
168 /**
169  *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
170  *  @hw: pointer to the hardware structure
171  **/
172 STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
173 {
174         enum iavf_status ret_code;
175         struct iavf_dma_mem *bi;
176         int i;
177
178         /* No mapped memory needed yet, just the buffer info structures */
179         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
180                 (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
181         if (ret_code)
182                 goto alloc_asq_bufs;
183         hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
184
185         /* allocate the mapped buffers */
186         for (i = 0; i < hw->aq.num_asq_entries; i++) {
187                 bi = &hw->aq.asq.r.asq_bi[i];
188                 ret_code = iavf_allocate_dma_mem(hw, bi,
189                                                  iavf_mem_asq_buf,
190                                                  hw->aq.asq_buf_size,
191                                                  IAVF_ADMINQ_DESC_ALIGNMENT);
192                 if (ret_code)
193                         goto unwind_alloc_asq_bufs;
194         }
195 alloc_asq_bufs:
196         return ret_code;
197
198 unwind_alloc_asq_bufs:
199         /* don't try to free the one that failed... */
200         i--;
201         for (; i >= 0; i--)
202                 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
203         iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
204
205         return ret_code;
206 }
207
208 /**
209  *  iavf_free_arq_bufs - Free receive queue buffer info elements
210  *  @hw: pointer to the hardware structure
211  **/
212 STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
213 {
214         int i;
215
216         /* free descriptors */
217         for (i = 0; i < hw->aq.num_arq_entries; i++)
218                 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
219
220         /* free the descriptor memory */
221         iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
222
223         /* free the dma header */
224         iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
225 }
226
227 /**
228  *  iavf_free_asq_bufs - Free send queue buffer info elements
229  *  @hw: pointer to the hardware structure
230  **/
231 STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
232 {
233         int i;
234
235         /* only unmap if the address is non-NULL */
236         for (i = 0; i < hw->aq.num_asq_entries; i++)
237                 if (hw->aq.asq.r.asq_bi[i].pa)
238                         iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
239
240         /* free the buffer info list */
241         iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
242
243         /* free the descriptor memory */
244         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
245
246         /* free the dma header */
247         iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
248 }
249
250 /**
251  *  iavf_config_asq_regs - configure ASQ registers
252  *  @hw: pointer to the hardware structure
253  *
254  *  Configure base address and length registers for the transmit queue
255  **/
256 STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
257 {
258         enum iavf_status ret_code = IAVF_SUCCESS;
259         u32 reg = 0;
260
261         /* Clear Head and Tail */
262         wr32(hw, hw->aq.asq.head, 0);
263         wr32(hw, hw->aq.asq.tail, 0);
264
265         /* set starting point */
266         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
267                                   IAVF_VF_ATQLEN1_ATQENABLE_MASK));
268         wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
269         wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
270
271         /* Check one register to verify that config was applied */
272         reg = rd32(hw, hw->aq.asq.bal);
273         if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
274                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
275
276         return ret_code;
277 }
278
279 /**
280  *  iavf_config_arq_regs - ARQ register configuration
281  *  @hw: pointer to the hardware structure
282  *
283  * Configure base address and length registers for the receive (event queue)
284  **/
285 STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
286 {
287         enum iavf_status ret_code = IAVF_SUCCESS;
288         u32 reg = 0;
289
290         /* Clear Head and Tail */
291         wr32(hw, hw->aq.arq.head, 0);
292         wr32(hw, hw->aq.arq.tail, 0);
293
294         /* set starting point */
295         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
296                                   IAVF_VF_ARQLEN1_ARQENABLE_MASK));
297         wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
298         wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
299
300         /* Update tail in the HW to post pre-allocated buffers */
301         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
302
303         /* Check one register to verify that config was applied */
304         reg = rd32(hw, hw->aq.arq.bal);
305         if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
306                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
307
308         return ret_code;
309 }
310
311 /**
312  *  iavf_init_asq - main initialization routine for ASQ
313  *  @hw: pointer to the hardware structure
314  *
315  *  This is the main initialization routine for the Admin Send Queue
316  *  Prior to calling this function, drivers *MUST* set the following fields
317  *  in the hw->aq structure:
318  *     - hw->aq.num_asq_entries
319  *     - hw->aq.arq_buf_size
320  *
321  *  Do *NOT* hold the lock when calling this as the memory allocation routines
322  *  called are not going to be atomic context safe
323  **/
324 enum iavf_status iavf_init_asq(struct iavf_hw *hw)
325 {
326         enum iavf_status ret_code = IAVF_SUCCESS;
327
328         if (hw->aq.asq.count > 0) {
329                 /* queue already initialized */
330                 ret_code = IAVF_ERR_NOT_READY;
331                 goto init_adminq_exit;
332         }
333
334         /* verify input for valid configuration */
335         if ((hw->aq.num_asq_entries == 0) ||
336             (hw->aq.asq_buf_size == 0)) {
337                 ret_code = IAVF_ERR_CONFIG;
338                 goto init_adminq_exit;
339         }
340
341         hw->aq.asq.next_to_use = 0;
342         hw->aq.asq.next_to_clean = 0;
343
344         /* allocate the ring memory */
345         ret_code = iavf_alloc_adminq_asq_ring(hw);
346         if (ret_code != IAVF_SUCCESS)
347                 goto init_adminq_exit;
348
349         /* allocate buffers in the rings */
350         ret_code = iavf_alloc_asq_bufs(hw);
351         if (ret_code != IAVF_SUCCESS)
352                 goto init_adminq_free_rings;
353
354         /* initialize base registers */
355         ret_code = iavf_config_asq_regs(hw);
356         if (ret_code != IAVF_SUCCESS)
357                 goto init_config_regs;
358
359         /* success! */
360         hw->aq.asq.count = hw->aq.num_asq_entries;
361         goto init_adminq_exit;
362
363 init_adminq_free_rings:
364         iavf_free_adminq_asq(hw);
365         return ret_code;
366
367 init_config_regs:
368         iavf_free_asq_bufs(hw);
369
370 init_adminq_exit:
371         return ret_code;
372 }
373
374 /**
375  *  iavf_init_arq - initialize ARQ
376  *  @hw: pointer to the hardware structure
377  *
378  *  The main initialization routine for the Admin Receive (Event) Queue.
379  *  Prior to calling this function, drivers *MUST* set the following fields
380  *  in the hw->aq structure:
381  *     - hw->aq.num_asq_entries
382  *     - hw->aq.arq_buf_size
383  *
384  *  Do *NOT* hold the lock when calling this as the memory allocation routines
385  *  called are not going to be atomic context safe
386  **/
387 enum iavf_status iavf_init_arq(struct iavf_hw *hw)
388 {
389         enum iavf_status ret_code = IAVF_SUCCESS;
390
391         if (hw->aq.arq.count > 0) {
392                 /* queue already initialized */
393                 ret_code = IAVF_ERR_NOT_READY;
394                 goto init_adminq_exit;
395         }
396
397         /* verify input for valid configuration */
398         if ((hw->aq.num_arq_entries == 0) ||
399             (hw->aq.arq_buf_size == 0)) {
400                 ret_code = IAVF_ERR_CONFIG;
401                 goto init_adminq_exit;
402         }
403
404         hw->aq.arq.next_to_use = 0;
405         hw->aq.arq.next_to_clean = 0;
406
407         /* allocate the ring memory */
408         ret_code = iavf_alloc_adminq_arq_ring(hw);
409         if (ret_code != IAVF_SUCCESS)
410                 goto init_adminq_exit;
411
412         /* allocate buffers in the rings */
413         ret_code = iavf_alloc_arq_bufs(hw);
414         if (ret_code != IAVF_SUCCESS)
415                 goto init_adminq_free_rings;
416
417         /* initialize base registers */
418         ret_code = iavf_config_arq_regs(hw);
419         if (ret_code != IAVF_SUCCESS)
420                 goto init_config_regs;
421
422         /* success! */
423         hw->aq.arq.count = hw->aq.num_arq_entries;
424         goto init_adminq_exit;
425
426 init_adminq_free_rings:
427         iavf_free_adminq_arq(hw);
428         return ret_code;
429
430 init_config_regs:
431         iavf_free_arq_bufs(hw);
432
433 init_adminq_exit:
434         return ret_code;
435 }
436
437 /**
438  *  iavf_shutdown_asq - shutdown the ASQ
439  *  @hw: pointer to the hardware structure
440  *
441  *  The main shutdown routine for the Admin Send Queue
442  **/
443 enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
444 {
445         enum iavf_status ret_code = IAVF_SUCCESS;
446
447         iavf_acquire_spinlock(&hw->aq.asq_spinlock);
448
449         if (hw->aq.asq.count == 0) {
450                 ret_code = IAVF_ERR_NOT_READY;
451                 goto shutdown_asq_out;
452         }
453
454         /* Stop firmware AdminQ processing */
455         wr32(hw, hw->aq.asq.head, 0);
456         wr32(hw, hw->aq.asq.tail, 0);
457         wr32(hw, hw->aq.asq.len, 0);
458         wr32(hw, hw->aq.asq.bal, 0);
459         wr32(hw, hw->aq.asq.bah, 0);
460
461         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
462
463         /* free ring buffers */
464         iavf_free_asq_bufs(hw);
465
466 shutdown_asq_out:
467         iavf_release_spinlock(&hw->aq.asq_spinlock);
468         return ret_code;
469 }
470
471 /**
472  *  iavf_shutdown_arq - shutdown ARQ
473  *  @hw: pointer to the hardware structure
474  *
475  *  The main shutdown routine for the Admin Receive Queue
476  **/
477 enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
478 {
479         enum iavf_status ret_code = IAVF_SUCCESS;
480
481         iavf_acquire_spinlock(&hw->aq.arq_spinlock);
482
483         if (hw->aq.arq.count == 0) {
484                 ret_code = IAVF_ERR_NOT_READY;
485                 goto shutdown_arq_out;
486         }
487
488         /* Stop firmware AdminQ processing */
489         wr32(hw, hw->aq.arq.head, 0);
490         wr32(hw, hw->aq.arq.tail, 0);
491         wr32(hw, hw->aq.arq.len, 0);
492         wr32(hw, hw->aq.arq.bal, 0);
493         wr32(hw, hw->aq.arq.bah, 0);
494
495         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
496
497         /* free ring buffers */
498         iavf_free_arq_bufs(hw);
499
500 shutdown_arq_out:
501         iavf_release_spinlock(&hw->aq.arq_spinlock);
502         return ret_code;
503 }
504
505 /**
506  *  iavf_init_adminq - main initialization routine for Admin Queue
507  *  @hw: pointer to the hardware structure
508  *
509  *  Prior to calling this function, drivers *MUST* set the following fields
510  *  in the hw->aq structure:
511  *     - hw->aq.num_asq_entries
512  *     - hw->aq.num_arq_entries
513  *     - hw->aq.arq_buf_size
514  *     - hw->aq.asq_buf_size
515  **/
516 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
517 {
518         enum iavf_status ret_code;
519
520         /* verify input for valid configuration */
521         if ((hw->aq.num_arq_entries == 0) ||
522             (hw->aq.num_asq_entries == 0) ||
523             (hw->aq.arq_buf_size == 0) ||
524             (hw->aq.asq_buf_size == 0)) {
525                 ret_code = IAVF_ERR_CONFIG;
526                 goto init_adminq_exit;
527         }
528         iavf_init_spinlock(&hw->aq.asq_spinlock);
529         iavf_init_spinlock(&hw->aq.arq_spinlock);
530
531         /* Set up register offsets */
532         iavf_adminq_init_regs(hw);
533
534         /* setup ASQ command write back timeout */
535         hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
536
537         /* allocate the ASQ */
538         ret_code = iavf_init_asq(hw);
539         if (ret_code != IAVF_SUCCESS)
540                 goto init_adminq_destroy_spinlocks;
541
542         /* allocate the ARQ */
543         ret_code = iavf_init_arq(hw);
544         if (ret_code != IAVF_SUCCESS)
545                 goto init_adminq_free_asq;
546
547         /* success! */
548         goto init_adminq_exit;
549
550 init_adminq_free_asq:
551         iavf_shutdown_asq(hw);
552 init_adminq_destroy_spinlocks:
553         iavf_destroy_spinlock(&hw->aq.asq_spinlock);
554         iavf_destroy_spinlock(&hw->aq.arq_spinlock);
555
556 init_adminq_exit:
557         return ret_code;
558 }
559
560 /**
561  *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
562  *  @hw: pointer to the hardware structure
563  **/
564 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
565 {
566         enum iavf_status ret_code = IAVF_SUCCESS;
567
568         if (iavf_check_asq_alive(hw))
569                 iavf_aq_queue_shutdown(hw, true);
570
571         iavf_shutdown_asq(hw);
572         iavf_shutdown_arq(hw);
573         iavf_destroy_spinlock(&hw->aq.asq_spinlock);
574         iavf_destroy_spinlock(&hw->aq.arq_spinlock);
575
576         return ret_code;
577 }
578
579 /**
580  *  iavf_clean_asq - cleans Admin send queue
581  *  @hw: pointer to the hardware structure
582  *
583  *  returns the number of free desc
584  **/
585 u16 iavf_clean_asq(struct iavf_hw *hw)
586 {
587         struct iavf_adminq_ring *asq = &(hw->aq.asq);
588         struct iavf_asq_cmd_details *details;
589         u16 ntc = asq->next_to_clean;
590         struct iavf_aq_desc desc_cb;
591         struct iavf_aq_desc *desc;
592
593         desc = IAVF_ADMINQ_DESC(*asq, ntc);
594         details = IAVF_ADMINQ_DETAILS(*asq, ntc);
595         while (rd32(hw, hw->aq.asq.head) != ntc) {
596                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
597                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
598
599                 if (details->callback) {
600                         IAVF_ADMINQ_CALLBACK cb_func =
601                                         (IAVF_ADMINQ_CALLBACK)details->callback;
602                         iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
603                                     IAVF_DMA_TO_DMA);
604                         cb_func(hw, &desc_cb);
605                 }
606                 iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
607                 iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
608                 ntc++;
609                 if (ntc == asq->count)
610                         ntc = 0;
611                 desc = IAVF_ADMINQ_DESC(*asq, ntc);
612                 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
613         }
614
615         asq->next_to_clean = ntc;
616
617         return IAVF_DESC_UNUSED(asq);
618 }
619
620 /**
621  *  iavf_asq_done - check if FW has processed the Admin Send Queue
622  *  @hw: pointer to the hw struct
623  *
624  *  Returns true if the firmware has processed all descriptors on the
625  *  admin send queue. Returns false if there are still requests pending.
626  **/
627 bool iavf_asq_done(struct iavf_hw *hw)
628 {
629         /* AQ designers suggest use of head for better
630          * timing reliability than DD bit
631          */
632         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
633
634 }
635
636 /**
637  *  iavf_asq_send_command - send command to Admin Queue
638  *  @hw: pointer to the hw struct
639  *  @desc: prefilled descriptor describing the command (non DMA mem)
640  *  @buff: buffer to use for indirect commands
641  *  @buff_size: size of buffer for indirect commands
642  *  @cmd_details: pointer to command details structure
643  *
644  *  This is the main send command driver routine for the Admin Queue send
645  *  queue.  It runs the queue, cleans the queue, etc
646  **/
647 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
648                                 struct iavf_aq_desc *desc,
649                                 void *buff, /* can be NULL */
650                                 u16  buff_size,
651                                 struct iavf_asq_cmd_details *cmd_details)
652 {
653         enum iavf_status status = IAVF_SUCCESS;
654         struct iavf_dma_mem *dma_buff = NULL;
655         struct iavf_asq_cmd_details *details;
656         struct iavf_aq_desc *desc_on_ring;
657         bool cmd_completed = false;
658         u16  retval = 0;
659         u32  val = 0;
660
661         iavf_acquire_spinlock(&hw->aq.asq_spinlock);
662
663         hw->aq.asq_last_status = IAVF_AQ_RC_OK;
664
665         if (hw->aq.asq.count == 0) {
666                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
667                            "AQTX: Admin queue not initialized.\n");
668                 status = IAVF_ERR_QUEUE_EMPTY;
669                 goto asq_send_command_error;
670         }
671
672         val = rd32(hw, hw->aq.asq.head);
673         if (val >= hw->aq.num_asq_entries) {
674                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
675                            "AQTX: head overrun at %d\n", val);
676                 status = IAVF_ERR_QUEUE_EMPTY;
677                 goto asq_send_command_error;
678         }
679
680         details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
681         if (cmd_details) {
682                 iavf_memcpy(details,
683                             cmd_details,
684                             sizeof(struct iavf_asq_cmd_details),
685                             IAVF_NONDMA_TO_NONDMA);
686
687                 /* If the cmd_details are defined copy the cookie.  The
688                  * CPU_TO_LE32 is not needed here because the data is ignored
689                  * by the FW, only used by the driver
690                  */
691                 if (details->cookie) {
692                         desc->cookie_high =
693                                 CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
694                         desc->cookie_low =
695                                 CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
696                 }
697         } else {
698                 iavf_memset(details, 0,
699                             sizeof(struct iavf_asq_cmd_details),
700                             IAVF_NONDMA_MEM);
701         }
702
703         /* clear requested flags and then set additional flags if defined */
704         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
705         desc->flags |= CPU_TO_LE16(details->flags_ena);
706
707         if (buff_size > hw->aq.asq_buf_size) {
708                 iavf_debug(hw,
709                            IAVF_DEBUG_AQ_MESSAGE,
710                            "AQTX: Invalid buffer size: %d.\n",
711                            buff_size);
712                 status = IAVF_ERR_INVALID_SIZE;
713                 goto asq_send_command_error;
714         }
715
716         if (details->postpone && !details->async) {
717                 iavf_debug(hw,
718                            IAVF_DEBUG_AQ_MESSAGE,
719                            "AQTX: Async flag not set along with postpone flag");
720                 status = IAVF_ERR_PARAM;
721                 goto asq_send_command_error;
722         }
723
724         /* call clean and check queue available function to reclaim the
725          * descriptors that were processed by FW, the function returns the
726          * number of desc available
727          */
728         /* the clean function called here could be called in a separate thread
729          * in case of asynchronous completions
730          */
731         if (iavf_clean_asq(hw) == 0) {
732                 iavf_debug(hw,
733                            IAVF_DEBUG_AQ_MESSAGE,
734                            "AQTX: Error queue is full.\n");
735                 status = IAVF_ERR_ADMIN_QUEUE_FULL;
736                 goto asq_send_command_error;
737         }
738
739         /* initialize the temp desc pointer with the right desc */
740         desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
741
742         /* if the desc is available copy the temp desc to the right place */
743         iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
744                     IAVF_NONDMA_TO_DMA);
745
746         /* if buff is not NULL assume indirect command */
747         if (buff != NULL) {
748                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
749                 /* copy the user buff into the respective DMA buff */
750                 iavf_memcpy(dma_buff->va, buff, buff_size,
751                             IAVF_NONDMA_TO_DMA);
752                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
753
754                 /* Update the address values in the desc with the pa value
755                  * for respective buffer
756                  */
757                 desc_on_ring->params.external.addr_high =
758                                 CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
759                 desc_on_ring->params.external.addr_low =
760                                 CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
761         }
762
763         /* bump the tail */
764         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
765         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
766                       buff, buff_size);
767         (hw->aq.asq.next_to_use)++;
768         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
769                 hw->aq.asq.next_to_use = 0;
770         if (!details->postpone)
771                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
772
773         /* if cmd_details are not defined or async flag is not set,
774          * we need to wait for desc write back
775          */
776         if (!details->async && !details->postpone) {
777                 u32 total_delay = 0;
778
779                 do {
780                         /* AQ designers suggest use of head for better
781                          * timing reliability than DD bit
782                          */
783                         if (iavf_asq_done(hw))
784                                 break;
785                         iavf_usec_delay(50);
786                         total_delay += 50;
787                 } while (total_delay < hw->aq.asq_cmd_timeout);
788         }
789
790         /* if ready, copy the desc back to temp */
791         if (iavf_asq_done(hw)) {
792                 iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
793                             IAVF_DMA_TO_NONDMA);
794                 if (buff != NULL)
795                         iavf_memcpy(buff, dma_buff->va, buff_size,
796                                     IAVF_DMA_TO_NONDMA);
797                 retval = LE16_TO_CPU(desc->retval);
798                 if (retval != 0) {
799                         iavf_debug(hw,
800                                    IAVF_DEBUG_AQ_MESSAGE,
801                                    "AQTX: Command completed with error 0x%X.\n",
802                                    retval);
803
804                         /* strip off FW internal code */
805                         retval &= 0xff;
806                 }
807                 cmd_completed = true;
808                 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
809                         status = IAVF_SUCCESS;
810                 else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
811                         status = IAVF_ERR_NOT_READY;
812                 else
813                         status = IAVF_ERR_ADMIN_QUEUE_ERROR;
814                 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
815         }
816
817         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
818                    "AQTX: desc and buffer writeback:\n");
819         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
820
821         /* save writeback aq if requested */
822         if (details->wb_desc)
823                 iavf_memcpy(details->wb_desc, desc_on_ring,
824                             sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
825
826         /* update the error if time out occurred */
827         if ((!cmd_completed) &&
828             (!details->async && !details->postpone)) {
829                 if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
830                         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
831                                    "AQTX: AQ Critical error.\n");
832                         status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
833                 } else {
834                         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
835                                    "AQTX: Writeback timeout.\n");
836                         status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
837                 }
838         }
839
840 asq_send_command_error:
841         iavf_release_spinlock(&hw->aq.asq_spinlock);
842         return status;
843 }
844
845 /**
846  *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
847  *  @desc:     pointer to the temp descriptor (non DMA mem)
848  *  @opcode:   the opcode can be used to decide which flags to turn off or on
849  *
850  *  Fill the desc with default values
851  **/
852 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
853                                        u16 opcode)
854 {
855         /* zero out the desc */
856         iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
857                     IAVF_NONDMA_MEM);
858         desc->opcode = CPU_TO_LE16(opcode);
859         desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
860 }
861
862 /**
863  *  iavf_clean_arq_element
864  *  @hw: pointer to the hw struct
865  *  @e: event info from the receive descriptor, includes any buffers
866  *  @pending: number of events that could be left to process
867  *
868  *  This function cleans one Admin Receive Queue element and returns
869  *  the contents through e.  It can also return how many events are
870  *  left to process through 'pending'
871  **/
872 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
873                                              struct iavf_arq_event_info *e,
874                                              u16 *pending)
875 {
876         enum iavf_status ret_code = IAVF_SUCCESS;
877         u16 ntc = hw->aq.arq.next_to_clean;
878         struct iavf_aq_desc *desc;
879         struct iavf_dma_mem *bi;
880         u16 desc_idx;
881         u16 datalen;
882         u16 flags;
883         u16 ntu;
884
885         /* pre-clean the event info */
886         iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
887
888         /* take the lock before we start messing with the ring */
889         iavf_acquire_spinlock(&hw->aq.arq_spinlock);
890
891         if (hw->aq.arq.count == 0) {
892                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
893                            "AQRX: Admin queue not initialized.\n");
894                 ret_code = IAVF_ERR_QUEUE_EMPTY;
895                 goto clean_arq_element_err;
896         }
897
898         /* set next_to_use to head */
899         ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
900         if (ntu == ntc) {
901                 /* nothing to do - shouldn't need to update ring's values */
902                 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
903                 goto clean_arq_element_out;
904         }
905
906         /* now clean the next descriptor */
907         desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
908         desc_idx = ntc;
909
910         hw->aq.arq_last_status =
911                 (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
912         flags = LE16_TO_CPU(desc->flags);
913         if (flags & IAVF_AQ_FLAG_ERR) {
914                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
915                 iavf_debug(hw,
916                            IAVF_DEBUG_AQ_MESSAGE,
917                            "AQRX: Event received with error 0x%X.\n",
918                            hw->aq.arq_last_status);
919         }
920
921         iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
922                     IAVF_DMA_TO_NONDMA);
923         datalen = LE16_TO_CPU(desc->datalen);
924         e->msg_len = min(datalen, e->buf_len);
925         if (e->msg_buf != NULL && (e->msg_len != 0))
926                 iavf_memcpy(e->msg_buf,
927                             hw->aq.arq.r.arq_bi[desc_idx].va,
928                             e->msg_len, IAVF_DMA_TO_NONDMA);
929
930         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
931         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
932                       hw->aq.arq_buf_size);
933
934         /* Restore the original datalen and buffer address in the desc,
935          * FW updates datalen to indicate the event message
936          * size
937          */
938         bi = &hw->aq.arq.r.arq_bi[ntc];
939         iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
940
941         desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
942         if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
943                 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
944         desc->datalen = CPU_TO_LE16((u16)bi->size);
945         desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
946         desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
947
948         /* set tail = the last cleaned desc index. */
949         wr32(hw, hw->aq.arq.tail, ntc);
950         /* ntc is updated to tail + 1 */
951         ntc++;
952         if (ntc == hw->aq.num_arq_entries)
953                 ntc = 0;
954         hw->aq.arq.next_to_clean = ntc;
955         hw->aq.arq.next_to_use = ntu;
956
957 clean_arq_element_out:
958         /* Set pending if needed, unlock and return */
959         if (pending != NULL)
960                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
961 clean_arq_element_err:
962         iavf_release_spinlock(&hw->aq.arq_spinlock);
963
964         return ret_code;
965 }
966