net/i40e/base: replace license text with SPDX tag
[dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2018
3  */
4
5 #include "i40e_status.h"
6 #include "i40e_type.h"
7 #include "i40e_register.h"
8 #include "i40e_adminq.h"
9 #include "i40e_prototype.h"
10
11 /**
12  *  i40e_adminq_init_regs - Initialize AdminQ registers
13  *  @hw: pointer to the hardware structure
14  *
15  *  This assumes the alloc_asq and alloc_arq functions have already been called
16  **/
17 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
18 {
19         /* set head and tail registers in our local struct */
20         if (i40e_is_vf(hw)) {
21                 hw->aq.asq.tail = I40E_VF_ATQT1;
22                 hw->aq.asq.head = I40E_VF_ATQH1;
23                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
24                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
25                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
26                 hw->aq.arq.tail = I40E_VF_ARQT1;
27                 hw->aq.arq.head = I40E_VF_ARQH1;
28                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
29                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
30                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
31 #ifdef PF_DRIVER
32         } else {
33                 hw->aq.asq.tail = I40E_PF_ATQT;
34                 hw->aq.asq.head = I40E_PF_ATQH;
35                 hw->aq.asq.len  = I40E_PF_ATQLEN;
36                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
37                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
38                 hw->aq.arq.tail = I40E_PF_ARQT;
39                 hw->aq.arq.head = I40E_PF_ARQH;
40                 hw->aq.arq.len  = I40E_PF_ARQLEN;
41                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
42                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
43 #endif
44         }
45 }
46
47 /**
48  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
49  *  @hw: pointer to the hardware structure
50  **/
51 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
52 {
53         enum i40e_status_code ret_code;
54
55         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
56                                          i40e_mem_atq_ring,
57                                          (hw->aq.num_asq_entries *
58                                          sizeof(struct i40e_aq_desc)),
59                                          I40E_ADMINQ_DESC_ALIGNMENT);
60         if (ret_code)
61                 return ret_code;
62
63         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
64                                           (hw->aq.num_asq_entries *
65                                           sizeof(struct i40e_asq_cmd_details)));
66         if (ret_code) {
67                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
68                 return ret_code;
69         }
70
71         return ret_code;
72 }
73
74 /**
75  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
76  *  @hw: pointer to the hardware structure
77  **/
78 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
79 {
80         enum i40e_status_code ret_code;
81
82         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
83                                          i40e_mem_arq_ring,
84                                          (hw->aq.num_arq_entries *
85                                          sizeof(struct i40e_aq_desc)),
86                                          I40E_ADMINQ_DESC_ALIGNMENT);
87
88         return ret_code;
89 }
90
91 /**
92  *  i40e_free_adminq_asq - Free Admin Queue send rings
93  *  @hw: pointer to the hardware structure
94  *
95  *  This assumes the posted send buffers have already been cleaned
96  *  and de-allocated
97  **/
98 void i40e_free_adminq_asq(struct i40e_hw *hw)
99 {
100         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
101 }
102
103 /**
104  *  i40e_free_adminq_arq - Free Admin Queue receive rings
105  *  @hw: pointer to the hardware structure
106  *
107  *  This assumes the posted receive buffers have already been cleaned
108  *  and de-allocated
109  **/
110 void i40e_free_adminq_arq(struct i40e_hw *hw)
111 {
112         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
113 }
114
115 /**
116  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
117  *  @hw: pointer to the hardware structure
118  **/
119 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
120 {
121         enum i40e_status_code ret_code;
122         struct i40e_aq_desc *desc;
123         struct i40e_dma_mem *bi;
124         int i;
125
126         /* We'll be allocating the buffer info memory first, then we can
127          * allocate the mapped buffers for the event processing
128          */
129
130         /* buffer_info structures do not need alignment */
131         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
132                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
133         if (ret_code)
134                 goto alloc_arq_bufs;
135         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
136
137         /* allocate the mapped buffers */
138         for (i = 0; i < hw->aq.num_arq_entries; i++) {
139                 bi = &hw->aq.arq.r.arq_bi[i];
140                 ret_code = i40e_allocate_dma_mem(hw, bi,
141                                                  i40e_mem_arq_buf,
142                                                  hw->aq.arq_buf_size,
143                                                  I40E_ADMINQ_DESC_ALIGNMENT);
144                 if (ret_code)
145                         goto unwind_alloc_arq_bufs;
146
147                 /* now configure the descriptors for use */
148                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
149
150                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
151                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
152                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
153                 desc->opcode = 0;
154                 /* This is in accordance with Admin queue design, there is no
155                  * register for buffer size configuration
156                  */
157                 desc->datalen = CPU_TO_LE16((u16)bi->size);
158                 desc->retval = 0;
159                 desc->cookie_high = 0;
160                 desc->cookie_low = 0;
161                 desc->params.external.addr_high =
162                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
163                 desc->params.external.addr_low =
164                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
165                 desc->params.external.param0 = 0;
166                 desc->params.external.param1 = 0;
167         }
168
169 alloc_arq_bufs:
170         return ret_code;
171
172 unwind_alloc_arq_bufs:
173         /* don't try to free the one that failed... */
174         i--;
175         for (; i >= 0; i--)
176                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
177         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
178
179         return ret_code;
180 }
181
182 /**
183  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
184  *  @hw: pointer to the hardware structure
185  **/
186 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
187 {
188         enum i40e_status_code ret_code;
189         struct i40e_dma_mem *bi;
190         int i;
191
192         /* No mapped memory needed yet, just the buffer info structures */
193         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
194                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
195         if (ret_code)
196                 goto alloc_asq_bufs;
197         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
198
199         /* allocate the mapped buffers */
200         for (i = 0; i < hw->aq.num_asq_entries; i++) {
201                 bi = &hw->aq.asq.r.asq_bi[i];
202                 ret_code = i40e_allocate_dma_mem(hw, bi,
203                                                  i40e_mem_asq_buf,
204                                                  hw->aq.asq_buf_size,
205                                                  I40E_ADMINQ_DESC_ALIGNMENT);
206                 if (ret_code)
207                         goto unwind_alloc_asq_bufs;
208         }
209 alloc_asq_bufs:
210         return ret_code;
211
212 unwind_alloc_asq_bufs:
213         /* don't try to free the one that failed... */
214         i--;
215         for (; i >= 0; i--)
216                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
217         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
218
219         return ret_code;
220 }
221
222 /**
223  *  i40e_free_arq_bufs - Free receive queue buffer info elements
224  *  @hw: pointer to the hardware structure
225  **/
226 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
227 {
228         int i;
229
230         /* free descriptors */
231         for (i = 0; i < hw->aq.num_arq_entries; i++)
232                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
233
234         /* free the descriptor memory */
235         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
236
237         /* free the dma header */
238         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
239 }
240
241 /**
242  *  i40e_free_asq_bufs - Free send queue buffer info elements
243  *  @hw: pointer to the hardware structure
244  **/
245 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
246 {
247         int i;
248
249         /* only unmap if the address is non-NULL */
250         for (i = 0; i < hw->aq.num_asq_entries; i++)
251                 if (hw->aq.asq.r.asq_bi[i].pa)
252                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
253
254         /* free the buffer info list */
255         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
256
257         /* free the descriptor memory */
258         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
259
260         /* free the dma header */
261         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
262 }
263
264 /**
265  *  i40e_config_asq_regs - configure ASQ registers
266  *  @hw: pointer to the hardware structure
267  *
268  *  Configure base address and length registers for the transmit queue
269  **/
270 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
271 {
272         enum i40e_status_code ret_code = I40E_SUCCESS;
273         u32 reg = 0;
274
275         /* Clear Head and Tail */
276         wr32(hw, hw->aq.asq.head, 0);
277         wr32(hw, hw->aq.asq.tail, 0);
278
279         /* set starting point */
280 #ifdef PF_DRIVER
281 #ifdef INTEGRATED_VF
282         if (!i40e_is_vf(hw))
283                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
284                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
285 #else
286         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
287                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
288 #endif /* INTEGRATED_VF */
289 #endif /* PF_DRIVER */
290 #ifdef VF_DRIVER
291 #ifdef INTEGRATED_VF
292         if (i40e_is_vf(hw))
293                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
294                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
295 #else
296         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
297                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
298 #endif /* INTEGRATED_VF */
299 #endif /* VF_DRIVER */
300         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
301         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
302
303         /* Check one register to verify that config was applied */
304         reg = rd32(hw, hw->aq.asq.bal);
305         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
306                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
307
308         return ret_code;
309 }
310
311 /**
312  *  i40e_config_arq_regs - ARQ register configuration
313  *  @hw: pointer to the hardware structure
314  *
315  * Configure base address and length registers for the receive (event queue)
316  **/
317 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
318 {
319         enum i40e_status_code ret_code = I40E_SUCCESS;
320         u32 reg = 0;
321
322         /* Clear Head and Tail */
323         wr32(hw, hw->aq.arq.head, 0);
324         wr32(hw, hw->aq.arq.tail, 0);
325
326         /* set starting point */
327 #ifdef PF_DRIVER
328 #ifdef INTEGRATED_VF
329         if (!i40e_is_vf(hw))
330                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
331                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
332 #else
333         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
334                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
335 #endif /* INTEGRATED_VF */
336 #endif /* PF_DRIVER */
337 #ifdef VF_DRIVER
338 #ifdef INTEGRATED_VF
339         if (i40e_is_vf(hw))
340                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
341                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
342 #else
343         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
344                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
345 #endif /* INTEGRATED_VF */
346 #endif /* VF_DRIVER */
347         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
348         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
349
350         /* Update tail in the HW to post pre-allocated buffers */
351         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
352
353         /* Check one register to verify that config was applied */
354         reg = rd32(hw, hw->aq.arq.bal);
355         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
356                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
357
358         return ret_code;
359 }
360
361 /**
362  *  i40e_init_asq - main initialization routine for ASQ
363  *  @hw: pointer to the hardware structure
364  *
365  *  This is the main initialization routine for the Admin Send Queue
366  *  Prior to calling this function, drivers *MUST* set the following fields
367  *  in the hw->aq structure:
368  *     - hw->aq.num_asq_entries
369  *     - hw->aq.arq_buf_size
370  *
371  *  Do *NOT* hold the lock when calling this as the memory allocation routines
372  *  called are not going to be atomic context safe
373  **/
374 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
375 {
376         enum i40e_status_code ret_code = I40E_SUCCESS;
377
378         if (hw->aq.asq.count > 0) {
379                 /* queue already initialized */
380                 ret_code = I40E_ERR_NOT_READY;
381                 goto init_adminq_exit;
382         }
383
384         /* verify input for valid configuration */
385         if ((hw->aq.num_asq_entries == 0) ||
386             (hw->aq.asq_buf_size == 0)) {
387                 ret_code = I40E_ERR_CONFIG;
388                 goto init_adminq_exit;
389         }
390
391         hw->aq.asq.next_to_use = 0;
392         hw->aq.asq.next_to_clean = 0;
393
394         /* allocate the ring memory */
395         ret_code = i40e_alloc_adminq_asq_ring(hw);
396         if (ret_code != I40E_SUCCESS)
397                 goto init_adminq_exit;
398
399         /* allocate buffers in the rings */
400         ret_code = i40e_alloc_asq_bufs(hw);
401         if (ret_code != I40E_SUCCESS)
402                 goto init_adminq_free_rings;
403
404         /* initialize base registers */
405         ret_code = i40e_config_asq_regs(hw);
406         if (ret_code != I40E_SUCCESS)
407                 goto init_adminq_free_rings;
408
409         /* success! */
410         hw->aq.asq.count = hw->aq.num_asq_entries;
411         goto init_adminq_exit;
412
413 init_adminq_free_rings:
414         i40e_free_adminq_asq(hw);
415
416 init_adminq_exit:
417         return ret_code;
418 }
419
420 /**
421  *  i40e_init_arq - initialize ARQ
422  *  @hw: pointer to the hardware structure
423  *
424  *  The main initialization routine for the Admin Receive (Event) Queue.
425  *  Prior to calling this function, drivers *MUST* set the following fields
426  *  in the hw->aq structure:
427  *     - hw->aq.num_asq_entries
428  *     - hw->aq.arq_buf_size
429  *
430  *  Do *NOT* hold the lock when calling this as the memory allocation routines
431  *  called are not going to be atomic context safe
432  **/
433 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
434 {
435         enum i40e_status_code ret_code = I40E_SUCCESS;
436
437         if (hw->aq.arq.count > 0) {
438                 /* queue already initialized */
439                 ret_code = I40E_ERR_NOT_READY;
440                 goto init_adminq_exit;
441         }
442
443         /* verify input for valid configuration */
444         if ((hw->aq.num_arq_entries == 0) ||
445             (hw->aq.arq_buf_size == 0)) {
446                 ret_code = I40E_ERR_CONFIG;
447                 goto init_adminq_exit;
448         }
449
450         hw->aq.arq.next_to_use = 0;
451         hw->aq.arq.next_to_clean = 0;
452
453         /* allocate the ring memory */
454         ret_code = i40e_alloc_adminq_arq_ring(hw);
455         if (ret_code != I40E_SUCCESS)
456                 goto init_adminq_exit;
457
458         /* allocate buffers in the rings */
459         ret_code = i40e_alloc_arq_bufs(hw);
460         if (ret_code != I40E_SUCCESS)
461                 goto init_adminq_free_rings;
462
463         /* initialize base registers */
464         ret_code = i40e_config_arq_regs(hw);
465         if (ret_code != I40E_SUCCESS)
466                 goto init_adminq_free_rings;
467
468         /* success! */
469         hw->aq.arq.count = hw->aq.num_arq_entries;
470         goto init_adminq_exit;
471
472 init_adminq_free_rings:
473         i40e_free_adminq_arq(hw);
474
475 init_adminq_exit:
476         return ret_code;
477 }
478
479 /**
480  *  i40e_shutdown_asq - shutdown the ASQ
481  *  @hw: pointer to the hardware structure
482  *
483  *  The main shutdown routine for the Admin Send Queue
484  **/
485 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
486 {
487         enum i40e_status_code ret_code = I40E_SUCCESS;
488
489         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
490
491         if (hw->aq.asq.count == 0) {
492                 ret_code = I40E_ERR_NOT_READY;
493                 goto shutdown_asq_out;
494         }
495
496         /* Stop firmware AdminQ processing */
497         wr32(hw, hw->aq.asq.head, 0);
498         wr32(hw, hw->aq.asq.tail, 0);
499         wr32(hw, hw->aq.asq.len, 0);
500         wr32(hw, hw->aq.asq.bal, 0);
501         wr32(hw, hw->aq.asq.bah, 0);
502
503         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
504
505         /* free ring buffers */
506         i40e_free_asq_bufs(hw);
507
508 shutdown_asq_out:
509         i40e_release_spinlock(&hw->aq.asq_spinlock);
510         return ret_code;
511 }
512
513 /**
514  *  i40e_shutdown_arq - shutdown ARQ
515  *  @hw: pointer to the hardware structure
516  *
517  *  The main shutdown routine for the Admin Receive Queue
518  **/
519 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
520 {
521         enum i40e_status_code ret_code = I40E_SUCCESS;
522
523         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
524
525         if (hw->aq.arq.count == 0) {
526                 ret_code = I40E_ERR_NOT_READY;
527                 goto shutdown_arq_out;
528         }
529
530         /* Stop firmware AdminQ processing */
531         wr32(hw, hw->aq.arq.head, 0);
532         wr32(hw, hw->aq.arq.tail, 0);
533         wr32(hw, hw->aq.arq.len, 0);
534         wr32(hw, hw->aq.arq.bal, 0);
535         wr32(hw, hw->aq.arq.bah, 0);
536
537         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
538
539         /* free ring buffers */
540         i40e_free_arq_bufs(hw);
541
542 shutdown_arq_out:
543         i40e_release_spinlock(&hw->aq.arq_spinlock);
544         return ret_code;
545 }
546 #ifdef PF_DRIVER
547
548 /**
549  *  i40e_resume_aq - resume AQ processing from 0
550  *  @hw: pointer to the hardware structure
551  **/
552 STATIC void i40e_resume_aq(struct i40e_hw *hw)
553 {
554         /* Registers are reset after PF reset */
555         hw->aq.asq.next_to_use = 0;
556         hw->aq.asq.next_to_clean = 0;
557
558         i40e_config_asq_regs(hw);
559
560         hw->aq.arq.next_to_use = 0;
561         hw->aq.arq.next_to_clean = 0;
562
563         i40e_config_arq_regs(hw);
564 }
565 #endif /* PF_DRIVER */
566
567 /**
568  *  i40e_init_adminq - main initialization routine for Admin Queue
569  *  @hw: pointer to the hardware structure
570  *
571  *  Prior to calling this function, drivers *MUST* set the following fields
572  *  in the hw->aq structure:
573  *     - hw->aq.num_asq_entries
574  *     - hw->aq.num_arq_entries
575  *     - hw->aq.arq_buf_size
576  *     - hw->aq.asq_buf_size
577  **/
578 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
579 {
580 #ifdef PF_DRIVER
581         u16 cfg_ptr, oem_hi, oem_lo;
582         u16 eetrack_lo, eetrack_hi;
583 #endif
584         enum i40e_status_code ret_code;
585 #ifdef PF_DRIVER
586         int retry = 0;
587 #endif
588
589         /* verify input for valid configuration */
590         if ((hw->aq.num_arq_entries == 0) ||
591             (hw->aq.num_asq_entries == 0) ||
592             (hw->aq.arq_buf_size == 0) ||
593             (hw->aq.asq_buf_size == 0)) {
594                 ret_code = I40E_ERR_CONFIG;
595                 goto init_adminq_exit;
596         }
597         i40e_init_spinlock(&hw->aq.asq_spinlock);
598         i40e_init_spinlock(&hw->aq.arq_spinlock);
599
600         /* Set up register offsets */
601         i40e_adminq_init_regs(hw);
602
603         /* setup ASQ command write back timeout */
604         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
605
606         /* allocate the ASQ */
607         ret_code = i40e_init_asq(hw);
608         if (ret_code != I40E_SUCCESS)
609                 goto init_adminq_destroy_spinlocks;
610
611         /* allocate the ARQ */
612         ret_code = i40e_init_arq(hw);
613         if (ret_code != I40E_SUCCESS)
614                 goto init_adminq_free_asq;
615
616 #ifdef PF_DRIVER
617 #ifdef INTEGRATED_VF
618         /* VF has no need of firmware */
619         if (i40e_is_vf(hw))
620                 goto init_adminq_exit;
621 #endif
622         /* There are some cases where the firmware may not be quite ready
623          * for AdminQ operations, so we retry the AdminQ setup a few times
624          * if we see timeouts in this first AQ call.
625          */
626         do {
627                 ret_code = i40e_aq_get_firmware_version(hw,
628                                                         &hw->aq.fw_maj_ver,
629                                                         &hw->aq.fw_min_ver,
630                                                         &hw->aq.fw_build,
631                                                         &hw->aq.api_maj_ver,
632                                                         &hw->aq.api_min_ver,
633                                                         NULL);
634                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
635                         break;
636                 retry++;
637                 i40e_msec_delay(100);
638                 i40e_resume_aq(hw);
639         } while (retry < 10);
640         if (ret_code != I40E_SUCCESS)
641                 goto init_adminq_free_arq;
642
643         /* get the NVM version info */
644         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
645                            &hw->nvm.version);
646         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
647         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
648         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
649         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
650         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
651                            &oem_hi);
652         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
653                            &oem_lo);
654         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
655
656         /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
657         if ((hw->aq.api_maj_ver > 1) ||
658             ((hw->aq.api_maj_ver == 1) &&
659              (hw->aq.api_min_ver >= 7)))
660                 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
661
662         if (hw->mac.type == I40E_MAC_XL710 &&
663             hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
664             hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
665                 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
666         }
667
668         /* Newer versions of firmware require lock when reading the NVM */
669         if ((hw->aq.api_maj_ver > 1) ||
670             ((hw->aq.api_maj_ver == 1) &&
671              (hw->aq.api_min_ver >= 5)))
672                 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
673
674         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
675                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
676                 goto init_adminq_free_arq;
677         }
678
679         /* pre-emptive resource lock release */
680         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
681         hw->nvm_release_on_done = false;
682         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
683
684 #endif /* PF_DRIVER */
685         ret_code = I40E_SUCCESS;
686
687         /* success! */
688         goto init_adminq_exit;
689
690 #ifdef PF_DRIVER
691 init_adminq_free_arq:
692         i40e_shutdown_arq(hw);
693 #endif
694 init_adminq_free_asq:
695         i40e_shutdown_asq(hw);
696 init_adminq_destroy_spinlocks:
697         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
698         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
699
700 init_adminq_exit:
701         return ret_code;
702 }
703
704 /**
705  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
706  *  @hw: pointer to the hardware structure
707  **/
708 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
709 {
710         enum i40e_status_code ret_code = I40E_SUCCESS;
711
712         if (i40e_check_asq_alive(hw))
713                 i40e_aq_queue_shutdown(hw, true);
714
715         i40e_shutdown_asq(hw);
716         i40e_shutdown_arq(hw);
717         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
718         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
719
720         if (hw->nvm_buff.va)
721                 i40e_free_virt_mem(hw, &hw->nvm_buff);
722
723         return ret_code;
724 }
725
726 /**
727  *  i40e_clean_asq - cleans Admin send queue
728  *  @hw: pointer to the hardware structure
729  *
730  *  returns the number of free desc
731  **/
732 u16 i40e_clean_asq(struct i40e_hw *hw)
733 {
734         struct i40e_adminq_ring *asq = &(hw->aq.asq);
735         struct i40e_asq_cmd_details *details;
736         u16 ntc = asq->next_to_clean;
737         struct i40e_aq_desc desc_cb;
738         struct i40e_aq_desc *desc;
739
740         desc = I40E_ADMINQ_DESC(*asq, ntc);
741         details = I40E_ADMINQ_DETAILS(*asq, ntc);
742         while (rd32(hw, hw->aq.asq.head) != ntc) {
743                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
744                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
745
746                 if (details->callback) {
747                         I40E_ADMINQ_CALLBACK cb_func =
748                                         (I40E_ADMINQ_CALLBACK)details->callback;
749                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
750                                     I40E_DMA_TO_DMA);
751                         cb_func(hw, &desc_cb);
752                 }
753                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
754                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
755                 ntc++;
756                 if (ntc == asq->count)
757                         ntc = 0;
758                 desc = I40E_ADMINQ_DESC(*asq, ntc);
759                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
760         }
761
762         asq->next_to_clean = ntc;
763
764         return I40E_DESC_UNUSED(asq);
765 }
766
767 /**
768  *  i40e_asq_done - check if FW has processed the Admin Send Queue
769  *  @hw: pointer to the hw struct
770  *
771  *  Returns true if the firmware has processed all descriptors on the
772  *  admin send queue. Returns false if there are still requests pending.
773  **/
774 #ifdef VF_DRIVER
775 bool i40e_asq_done(struct i40e_hw *hw)
776 #else
777 STATIC bool i40e_asq_done(struct i40e_hw *hw)
778 #endif
779 {
780         /* AQ designers suggest use of head for better
781          * timing reliability than DD bit
782          */
783         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
784
785 }
786
787 /**
788  *  i40e_asq_send_command - send command to Admin Queue
789  *  @hw: pointer to the hw struct
790  *  @desc: prefilled descriptor describing the command (non DMA mem)
791  *  @buff: buffer to use for indirect commands
792  *  @buff_size: size of buffer for indirect commands
793  *  @cmd_details: pointer to command details structure
794  *
795  *  This is the main send command driver routine for the Admin Queue send
796  *  queue.  It runs the queue, cleans the queue, etc
797  **/
798 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
799                                 struct i40e_aq_desc *desc,
800                                 void *buff, /* can be NULL */
801                                 u16  buff_size,
802                                 struct i40e_asq_cmd_details *cmd_details)
803 {
804         enum i40e_status_code status = I40E_SUCCESS;
805         struct i40e_dma_mem *dma_buff = NULL;
806         struct i40e_asq_cmd_details *details;
807         struct i40e_aq_desc *desc_on_ring;
808         bool cmd_completed = false;
809         u16  retval = 0;
810         u32  val = 0;
811
812         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
813
814         hw->aq.asq_last_status = I40E_AQ_RC_OK;
815
816         if (hw->aq.asq.count == 0) {
817                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
818                            "AQTX: Admin queue not initialized.\n");
819                 status = I40E_ERR_QUEUE_EMPTY;
820                 goto asq_send_command_error;
821         }
822
823         val = rd32(hw, hw->aq.asq.head);
824         if (val >= hw->aq.num_asq_entries) {
825                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
826                            "AQTX: head overrun at %d\n", val);
827                 status = I40E_ERR_QUEUE_EMPTY;
828                 goto asq_send_command_error;
829         }
830
831         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
832         if (cmd_details) {
833                 i40e_memcpy(details,
834                             cmd_details,
835                             sizeof(struct i40e_asq_cmd_details),
836                             I40E_NONDMA_TO_NONDMA);
837
838                 /* If the cmd_details are defined copy the cookie.  The
839                  * CPU_TO_LE32 is not needed here because the data is ignored
840                  * by the FW, only used by the driver
841                  */
842                 if (details->cookie) {
843                         desc->cookie_high =
844                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
845                         desc->cookie_low =
846                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
847                 }
848         } else {
849                 i40e_memset(details, 0,
850                             sizeof(struct i40e_asq_cmd_details),
851                             I40E_NONDMA_MEM);
852         }
853
854         /* clear requested flags and then set additional flags if defined */
855         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
856         desc->flags |= CPU_TO_LE16(details->flags_ena);
857
858         if (buff_size > hw->aq.asq_buf_size) {
859                 i40e_debug(hw,
860                            I40E_DEBUG_AQ_MESSAGE,
861                            "AQTX: Invalid buffer size: %d.\n",
862                            buff_size);
863                 status = I40E_ERR_INVALID_SIZE;
864                 goto asq_send_command_error;
865         }
866
867         if (details->postpone && !details->async) {
868                 i40e_debug(hw,
869                            I40E_DEBUG_AQ_MESSAGE,
870                            "AQTX: Async flag not set along with postpone flag");
871                 status = I40E_ERR_PARAM;
872                 goto asq_send_command_error;
873         }
874
875         /* call clean and check queue available function to reclaim the
876          * descriptors that were processed by FW, the function returns the
877          * number of desc available
878          */
879         /* the clean function called here could be called in a separate thread
880          * in case of asynchronous completions
881          */
882         if (i40e_clean_asq(hw) == 0) {
883                 i40e_debug(hw,
884                            I40E_DEBUG_AQ_MESSAGE,
885                            "AQTX: Error queue is full.\n");
886                 status = I40E_ERR_ADMIN_QUEUE_FULL;
887                 goto asq_send_command_error;
888         }
889
890         /* initialize the temp desc pointer with the right desc */
891         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
892
893         /* if the desc is available copy the temp desc to the right place */
894         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
895                     I40E_NONDMA_TO_DMA);
896
897         /* if buff is not NULL assume indirect command */
898         if (buff != NULL) {
899                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
900                 /* copy the user buff into the respective DMA buff */
901                 i40e_memcpy(dma_buff->va, buff, buff_size,
902                             I40E_NONDMA_TO_DMA);
903                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
904
905                 /* Update the address values in the desc with the pa value
906                  * for respective buffer
907                  */
908                 desc_on_ring->params.external.addr_high =
909                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
910                 desc_on_ring->params.external.addr_low =
911                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
912         }
913
914         /* bump the tail */
915         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
916         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
917                       buff, buff_size);
918         (hw->aq.asq.next_to_use)++;
919         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
920                 hw->aq.asq.next_to_use = 0;
921         if (!details->postpone)
922                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
923
924         /* if cmd_details are not defined or async flag is not set,
925          * we need to wait for desc write back
926          */
927         if (!details->async && !details->postpone) {
928                 u32 total_delay = 0;
929
930                 do {
931                         /* AQ designers suggest use of head for better
932                          * timing reliability than DD bit
933                          */
934                         if (i40e_asq_done(hw))
935                                 break;
936                         i40e_usec_delay(50);
937                         total_delay += 50;
938                 } while (total_delay < hw->aq.asq_cmd_timeout);
939         }
940
941         /* if ready, copy the desc back to temp */
942         if (i40e_asq_done(hw)) {
943                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
944                             I40E_DMA_TO_NONDMA);
945                 if (buff != NULL)
946                         i40e_memcpy(buff, dma_buff->va, buff_size,
947                                     I40E_DMA_TO_NONDMA);
948                 retval = LE16_TO_CPU(desc->retval);
949                 if (retval != 0) {
950                         i40e_debug(hw,
951                                    I40E_DEBUG_AQ_MESSAGE,
952                                    "AQTX: Command completed with error 0x%X.\n",
953                                    retval);
954
955                         /* strip off FW internal code */
956                         retval &= 0xff;
957                 }
958                 cmd_completed = true;
959                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
960                         status = I40E_SUCCESS;
961                 else
962                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
963                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
964         }
965
966         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
967                    "AQTX: desc and buffer writeback:\n");
968         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
969
970         /* save writeback aq if requested */
971         if (details->wb_desc)
972                 i40e_memcpy(details->wb_desc, desc_on_ring,
973                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
974
975         /* update the error if time out occurred */
976         if ((!cmd_completed) &&
977             (!details->async && !details->postpone)) {
978 #ifdef PF_DRIVER
979                 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
980 #else
981                 if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
982 #endif
983                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
984                                    "AQTX: AQ Critical error.\n");
985                         status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
986                 } else {
987                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
988                                    "AQTX: Writeback timeout.\n");
989                         status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
990                 }
991         }
992
993 asq_send_command_error:
994         i40e_release_spinlock(&hw->aq.asq_spinlock);
995         return status;
996 }
997
998 /**
999  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1000  *  @desc:     pointer to the temp descriptor (non DMA mem)
1001  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1002  *
1003  *  Fill the desc with default values
1004  **/
1005 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1006                                        u16 opcode)
1007 {
1008         /* zero out the desc */
1009         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1010                     I40E_NONDMA_MEM);
1011         desc->opcode = CPU_TO_LE16(opcode);
1012         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1013 }
1014
1015 /**
1016  *  i40e_clean_arq_element
1017  *  @hw: pointer to the hw struct
1018  *  @e: event info from the receive descriptor, includes any buffers
1019  *  @pending: number of events that could be left to process
1020  *
1021  *  This function cleans one Admin Receive Queue element and returns
1022  *  the contents through e.  It can also return how many events are
1023  *  left to process through 'pending'
1024  **/
1025 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1026                                              struct i40e_arq_event_info *e,
1027                                              u16 *pending)
1028 {
1029         enum i40e_status_code ret_code = I40E_SUCCESS;
1030         u16 ntc = hw->aq.arq.next_to_clean;
1031         struct i40e_aq_desc *desc;
1032         struct i40e_dma_mem *bi;
1033         u16 desc_idx;
1034         u16 datalen;
1035         u16 flags;
1036         u16 ntu;
1037
1038         /* pre-clean the event info */
1039         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1040
1041         /* take the lock before we start messing with the ring */
1042         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1043
1044         if (hw->aq.arq.count == 0) {
1045                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1046                            "AQRX: Admin queue not initialized.\n");
1047                 ret_code = I40E_ERR_QUEUE_EMPTY;
1048                 goto clean_arq_element_err;
1049         }
1050
1051         /* set next_to_use to head */
1052 #ifdef INTEGRATED_VF
1053         if (!i40e_is_vf(hw))
1054                 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1055         else
1056                 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1057 #else
1058 #ifdef PF_DRIVER
1059         ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1060 #endif /* PF_DRIVER */
1061 #ifdef VF_DRIVER
1062         ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1063 #endif /* VF_DRIVER */
1064 #endif /* INTEGRATED_VF */
1065         if (ntu == ntc) {
1066                 /* nothing to do - shouldn't need to update ring's values */
1067                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1068                 goto clean_arq_element_out;
1069         }
1070
1071         /* now clean the next descriptor */
1072         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1073         desc_idx = ntc;
1074
1075         hw->aq.arq_last_status =
1076                 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1077         flags = LE16_TO_CPU(desc->flags);
1078         if (flags & I40E_AQ_FLAG_ERR) {
1079                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1080                 i40e_debug(hw,
1081                            I40E_DEBUG_AQ_MESSAGE,
1082                            "AQRX: Event received with error 0x%X.\n",
1083                            hw->aq.arq_last_status);
1084         }
1085
1086         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1087                     I40E_DMA_TO_NONDMA);
1088         datalen = LE16_TO_CPU(desc->datalen);
1089         e->msg_len = min(datalen, e->buf_len);
1090         if (e->msg_buf != NULL && (e->msg_len != 0))
1091                 i40e_memcpy(e->msg_buf,
1092                             hw->aq.arq.r.arq_bi[desc_idx].va,
1093                             e->msg_len, I40E_DMA_TO_NONDMA);
1094
1095         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1096         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1097                       hw->aq.arq_buf_size);
1098
1099         /* Restore the original datalen and buffer address in the desc,
1100          * FW updates datalen to indicate the event message
1101          * size
1102          */
1103         bi = &hw->aq.arq.r.arq_bi[ntc];
1104         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1105
1106         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1107         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1108                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1109         desc->datalen = CPU_TO_LE16((u16)bi->size);
1110         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1111         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1112
1113         /* set tail = the last cleaned desc index. */
1114         wr32(hw, hw->aq.arq.tail, ntc);
1115         /* ntc is updated to tail + 1 */
1116         ntc++;
1117         if (ntc == hw->aq.num_arq_entries)
1118                 ntc = 0;
1119         hw->aq.arq.next_to_clean = ntc;
1120         hw->aq.arq.next_to_use = ntu;
1121
1122 #ifdef PF_DRIVER
1123         i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1124 #endif /* PF_DRIVER */
1125 clean_arq_element_out:
1126         /* Set pending if needed, unlock and return */
1127         if (pending != NULL)
1128                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1129 clean_arq_element_err:
1130         i40e_release_spinlock(&hw->aq.arq_spinlock);
1131
1132         return ret_code;
1133 }
1134