net/hns3: fix Tx interrupt when enabling Rx interrupt
[dpdk.git] / drivers / net / ice / base / ice_controlq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2019
3  */
4
5 #include "ice_common.h"
6
7 #define ICE_CQ_INIT_REGS(qinfo, prefix)                         \
8 do {                                                            \
9         (qinfo)->sq.head = prefix##_ATQH;                       \
10         (qinfo)->sq.tail = prefix##_ATQT;                       \
11         (qinfo)->sq.len = prefix##_ATQLEN;                      \
12         (qinfo)->sq.bah = prefix##_ATQBAH;                      \
13         (qinfo)->sq.bal = prefix##_ATQBAL;                      \
14         (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;        \
15         (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
16         (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;           \
17         (qinfo)->rq.head = prefix##_ARQH;                       \
18         (qinfo)->rq.tail = prefix##_ARQT;                       \
19         (qinfo)->rq.len = prefix##_ARQLEN;                      \
20         (qinfo)->rq.bah = prefix##_ARQBAH;                      \
21         (qinfo)->rq.bal = prefix##_ARQBAL;                      \
22         (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;        \
23         (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
24         (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;           \
25 } while (0)
26
27 /**
28  * ice_adminq_init_regs - Initialize AdminQ registers
29  * @hw: pointer to the hardware structure
30  *
31  * This assumes the alloc_sq and alloc_rq functions have already been called
32  */
33 static void ice_adminq_init_regs(struct ice_hw *hw)
34 {
35         struct ice_ctl_q_info *cq = &hw->adminq;
36
37         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
38
39         ICE_CQ_INIT_REGS(cq, PF_FW);
40 }
41
42 /**
43  * ice_mailbox_init_regs - Initialize Mailbox registers
44  * @hw: pointer to the hardware structure
45  *
46  * This assumes the alloc_sq and alloc_rq functions have already been called
47  */
48 static void ice_mailbox_init_regs(struct ice_hw *hw)
49 {
50         struct ice_ctl_q_info *cq = &hw->mailboxq;
51
52         ICE_CQ_INIT_REGS(cq, PF_MBX);
53 }
54
55 /**
56  * ice_check_sq_alive
57  * @hw: pointer to the HW struct
58  * @cq: pointer to the specific Control queue
59  *
60  * Returns true if Queue is enabled else false.
61  */
62 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
63 {
64         /* check both queue-length and queue-enable fields */
65         if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
66                 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
67                                                 cq->sq.len_ena_mask)) ==
68                         (cq->num_sq_entries | cq->sq.len_ena_mask);
69
70         return false;
71 }
72
73 /**
74  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
75  * @hw: pointer to the hardware structure
76  * @cq: pointer to the specific Control queue
77  */
78 static enum ice_status
79 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
80 {
81         size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
82
83         cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
84         if (!cq->sq.desc_buf.va)
85                 return ICE_ERR_NO_MEMORY;
86
87         cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
88                                     sizeof(struct ice_sq_cd));
89         if (!cq->sq.cmd_buf) {
90                 ice_free_dma_mem(hw, &cq->sq.desc_buf);
91                 return ICE_ERR_NO_MEMORY;
92         }
93
94         return ICE_SUCCESS;
95 }
96
97 /**
98  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
99  * @hw: pointer to the hardware structure
100  * @cq: pointer to the specific Control queue
101  */
102 static enum ice_status
103 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
104 {
105         size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
106
107         cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
108         if (!cq->rq.desc_buf.va)
109                 return ICE_ERR_NO_MEMORY;
110         return ICE_SUCCESS;
111 }
112
113 /**
114  * ice_free_cq_ring - Free control queue ring
115  * @hw: pointer to the hardware structure
116  * @ring: pointer to the specific control queue ring
117  *
118  * This assumes the posted buffers have already been cleaned
119  * and de-allocated
120  */
121 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
122 {
123         ice_free_dma_mem(hw, &ring->desc_buf);
124 }
125
126 /**
127  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
128  * @hw: pointer to the hardware structure
129  * @cq: pointer to the specific Control queue
130  */
131 static enum ice_status
132 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
133 {
134         int i;
135
136         /* We'll be allocating the buffer info memory first, then we can
137          * allocate the mapped buffers for the event processing
138          */
139         cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
140                                      sizeof(cq->rq.desc_buf));
141         if (!cq->rq.dma_head)
142                 return ICE_ERR_NO_MEMORY;
143         cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
144
145         /* allocate the mapped buffers */
146         for (i = 0; i < cq->num_rq_entries; i++) {
147                 struct ice_aq_desc *desc;
148                 struct ice_dma_mem *bi;
149
150                 bi = &cq->rq.r.rq_bi[i];
151                 bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
152                 if (!bi->va)
153                         goto unwind_alloc_rq_bufs;
154
155                 /* now configure the descriptors for use */
156                 desc = ICE_CTL_Q_DESC(cq->rq, i);
157
158                 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
159                 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
160                         desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
161                 desc->opcode = 0;
162                 /* This is in accordance with Admin queue design, there is no
163                  * register for buffer size configuration
164                  */
165                 desc->datalen = CPU_TO_LE16(bi->size);
166                 desc->retval = 0;
167                 desc->cookie_high = 0;
168                 desc->cookie_low = 0;
169                 desc->params.generic.addr_high =
170                         CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
171                 desc->params.generic.addr_low =
172                         CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
173                 desc->params.generic.param0 = 0;
174                 desc->params.generic.param1 = 0;
175         }
176         return ICE_SUCCESS;
177
178 unwind_alloc_rq_bufs:
179         /* don't try to free the one that failed... */
180         i--;
181         for (; i >= 0; i--)
182                 ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
183         ice_free(hw, cq->rq.dma_head);
184
185         return ICE_ERR_NO_MEMORY;
186 }
187
188 /**
189  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
190  * @hw: pointer to the hardware structure
191  * @cq: pointer to the specific Control queue
192  */
193 static enum ice_status
194 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
195 {
196         int i;
197
198         /* No mapped memory needed yet, just the buffer info structures */
199         cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
200                                      sizeof(cq->sq.desc_buf));
201         if (!cq->sq.dma_head)
202                 return ICE_ERR_NO_MEMORY;
203         cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
204
205         /* allocate the mapped buffers */
206         for (i = 0; i < cq->num_sq_entries; i++) {
207                 struct ice_dma_mem *bi;
208
209                 bi = &cq->sq.r.sq_bi[i];
210                 bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
211                 if (!bi->va)
212                         goto unwind_alloc_sq_bufs;
213         }
214         return ICE_SUCCESS;
215
216 unwind_alloc_sq_bufs:
217         /* don't try to free the one that failed... */
218         i--;
219         for (; i >= 0; i--)
220                 ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
221         ice_free(hw, cq->sq.dma_head);
222
223         return ICE_ERR_NO_MEMORY;
224 }
225
226 static enum ice_status
227 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
228 {
229         /* Clear Head and Tail */
230         wr32(hw, ring->head, 0);
231         wr32(hw, ring->tail, 0);
232
233         /* set starting point */
234         wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
235         wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
236         wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
237
238         /* Check one register to verify that config was applied */
239         if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
240                 return ICE_ERR_AQ_ERROR;
241
242         return ICE_SUCCESS;
243 }
244
245 /**
246  * ice_cfg_sq_regs - configure Control ATQ registers
247  * @hw: pointer to the hardware structure
248  * @cq: pointer to the specific Control queue
249  *
250  * Configure base address and length registers for the transmit queue
251  */
252 static enum ice_status
253 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
254 {
255         return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
256 }
257
258 /**
259  * ice_cfg_rq_regs - configure Control ARQ register
260  * @hw: pointer to the hardware structure
261  * @cq: pointer to the specific Control queue
262  *
263  * Configure base address and length registers for the receive (event queue)
264  */
265 static enum ice_status
266 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
267 {
268         enum ice_status status;
269
270         status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
271         if (status)
272                 return status;
273
274         /* Update tail in the HW to post pre-allocated buffers */
275         wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
276
277         return ICE_SUCCESS;
278 }
279
280 /**
281  * ice_init_sq - main initialization routine for Control ATQ
282  * @hw: pointer to the hardware structure
283  * @cq: pointer to the specific Control queue
284  *
285  * This is the main initialization routine for the Control Send Queue
286  * Prior to calling this function, the driver *MUST* set the following fields
287  * in the cq->structure:
288  *     - cq->num_sq_entries
289  *     - cq->sq_buf_size
290  *
291  * Do *NOT* hold the lock when calling this as the memory allocation routines
292  * called are not going to be atomic context safe
293  */
294 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
295 {
296         enum ice_status ret_code;
297
298         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
299
300         if (cq->sq.count > 0) {
301                 /* queue already initialized */
302                 ret_code = ICE_ERR_NOT_READY;
303                 goto init_ctrlq_exit;
304         }
305
306         /* verify input for valid configuration */
307         if (!cq->num_sq_entries || !cq->sq_buf_size) {
308                 ret_code = ICE_ERR_CFG;
309                 goto init_ctrlq_exit;
310         }
311
312         cq->sq.next_to_use = 0;
313         cq->sq.next_to_clean = 0;
314
315         /* allocate the ring memory */
316         ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
317         if (ret_code)
318                 goto init_ctrlq_exit;
319
320         /* allocate buffers in the rings */
321         ret_code = ice_alloc_sq_bufs(hw, cq);
322         if (ret_code)
323                 goto init_ctrlq_free_rings;
324
325         /* initialize base registers */
326         ret_code = ice_cfg_sq_regs(hw, cq);
327         if (ret_code)
328                 goto init_ctrlq_free_rings;
329
330         /* success! */
331         cq->sq.count = cq->num_sq_entries;
332         goto init_ctrlq_exit;
333
334 init_ctrlq_free_rings:
335         ice_free_cq_ring(hw, &cq->sq);
336
337 init_ctrlq_exit:
338         return ret_code;
339 }
340
341 /**
342  * ice_init_rq - initialize ARQ
343  * @hw: pointer to the hardware structure
344  * @cq: pointer to the specific Control queue
345  *
346  * The main initialization routine for the Admin Receive (Event) Queue.
347  * Prior to calling this function, the driver *MUST* set the following fields
348  * in the cq->structure:
349  *     - cq->num_rq_entries
350  *     - cq->rq_buf_size
351  *
352  * Do *NOT* hold the lock when calling this as the memory allocation routines
353  * called are not going to be atomic context safe
354  */
355 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
356 {
357         enum ice_status ret_code;
358
359         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
360
361         if (cq->rq.count > 0) {
362                 /* queue already initialized */
363                 ret_code = ICE_ERR_NOT_READY;
364                 goto init_ctrlq_exit;
365         }
366
367         /* verify input for valid configuration */
368         if (!cq->num_rq_entries || !cq->rq_buf_size) {
369                 ret_code = ICE_ERR_CFG;
370                 goto init_ctrlq_exit;
371         }
372
373         cq->rq.next_to_use = 0;
374         cq->rq.next_to_clean = 0;
375
376         /* allocate the ring memory */
377         ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
378         if (ret_code)
379                 goto init_ctrlq_exit;
380
381         /* allocate buffers in the rings */
382         ret_code = ice_alloc_rq_bufs(hw, cq);
383         if (ret_code)
384                 goto init_ctrlq_free_rings;
385
386         /* initialize base registers */
387         ret_code = ice_cfg_rq_regs(hw, cq);
388         if (ret_code)
389                 goto init_ctrlq_free_rings;
390
391         /* success! */
392         cq->rq.count = cq->num_rq_entries;
393         goto init_ctrlq_exit;
394
395 init_ctrlq_free_rings:
396         ice_free_cq_ring(hw, &cq->rq);
397
398 init_ctrlq_exit:
399         return ret_code;
400 }
401
402 #define ICE_FREE_CQ_BUFS(hw, qi, ring)                                  \
403 do {                                                                    \
404         int i;                                                          \
405         /* free descriptors */                                          \
406         for (i = 0; i < (qi)->num_##ring##_entries; i++)                \
407                 if ((qi)->ring.r.ring##_bi[i].pa)                       \
408                         ice_free_dma_mem((hw),                          \
409                                          &(qi)->ring.r.ring##_bi[i]);   \
410         /* free the buffer info list */                                 \
411         if ((qi)->ring.cmd_buf)                                         \
412                 ice_free(hw, (qi)->ring.cmd_buf);                       \
413         /* free DMA head */                                             \
414         ice_free(hw, (qi)->ring.dma_head);                              \
415 } while (0)
416
417 /**
418  * ice_shutdown_sq - shutdown the Control ATQ
419  * @hw: pointer to the hardware structure
420  * @cq: pointer to the specific Control queue
421  *
422  * The main shutdown routine for the Control Transmit Queue
423  */
424 static enum ice_status
425 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
426 {
427         enum ice_status ret_code = ICE_SUCCESS;
428
429         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
430
431         ice_acquire_lock(&cq->sq_lock);
432
433         if (!cq->sq.count) {
434                 ret_code = ICE_ERR_NOT_READY;
435                 goto shutdown_sq_out;
436         }
437
438         /* Stop firmware AdminQ processing */
439         wr32(hw, cq->sq.head, 0);
440         wr32(hw, cq->sq.tail, 0);
441         wr32(hw, cq->sq.len, 0);
442         wr32(hw, cq->sq.bal, 0);
443         wr32(hw, cq->sq.bah, 0);
444
445         cq->sq.count = 0;       /* to indicate uninitialized queue */
446
447         /* free ring buffers and the ring itself */
448         ICE_FREE_CQ_BUFS(hw, cq, sq);
449         ice_free_cq_ring(hw, &cq->sq);
450
451 shutdown_sq_out:
452         ice_release_lock(&cq->sq_lock);
453         return ret_code;
454 }
455
456 /**
457  * ice_aq_ver_check - Check the reported AQ API version.
458  * @hw: pointer to the hardware structure
459  *
460  * Checks if the driver should load on a given AQ API version.
461  *
462  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
463  */
464 static bool ice_aq_ver_check(struct ice_hw *hw)
465 {
466         if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
467                 /* Major API version is newer than expected, don't load */
468                 ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
469                 return false;
470         } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
471                 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
472                         ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
473                 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
474                         ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
475         } else {
476                 /* Major API version is older than expected, log a warning */
477                 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
478         }
479         return true;
480 }
481
482 /**
483  * ice_shutdown_rq - shutdown Control ARQ
484  * @hw: pointer to the hardware structure
485  * @cq: pointer to the specific Control queue
486  *
487  * The main shutdown routine for the Control Receive Queue
488  */
489 static enum ice_status
490 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
491 {
492         enum ice_status ret_code = ICE_SUCCESS;
493
494         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
495
496         ice_acquire_lock(&cq->rq_lock);
497
498         if (!cq->rq.count) {
499                 ret_code = ICE_ERR_NOT_READY;
500                 goto shutdown_rq_out;
501         }
502
503         /* Stop Control Queue processing */
504         wr32(hw, cq->rq.head, 0);
505         wr32(hw, cq->rq.tail, 0);
506         wr32(hw, cq->rq.len, 0);
507         wr32(hw, cq->rq.bal, 0);
508         wr32(hw, cq->rq.bah, 0);
509
510         /* set rq.count to 0 to indicate uninitialized queue */
511         cq->rq.count = 0;
512
513         /* free ring buffers and the ring itself */
514         ICE_FREE_CQ_BUFS(hw, cq, rq);
515         ice_free_cq_ring(hw, &cq->rq);
516
517 shutdown_rq_out:
518         ice_release_lock(&cq->rq_lock);
519         return ret_code;
520 }
521
522 /**
523  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
524  * @hw: pointer to the hardware structure
525  */
526 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
527 {
528         struct ice_ctl_q_info *cq = &hw->adminq;
529         enum ice_status status;
530
531         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
532
533         status = ice_aq_get_fw_ver(hw, NULL);
534         if (status)
535                 goto init_ctrlq_free_rq;
536
537         if (!ice_aq_ver_check(hw)) {
538                 status = ICE_ERR_FW_API_VER;
539                 goto init_ctrlq_free_rq;
540         }
541
542         return ICE_SUCCESS;
543
544 init_ctrlq_free_rq:
545         ice_shutdown_rq(hw, cq);
546         ice_shutdown_sq(hw, cq);
547         return status;
548 }
549
550 /**
551  * ice_init_ctrlq - main initialization routine for any control Queue
552  * @hw: pointer to the hardware structure
553  * @q_type: specific Control queue type
554  *
555  * Prior to calling this function, the driver *MUST* set the following fields
556  * in the cq->structure:
557  *     - cq->num_sq_entries
558  *     - cq->num_rq_entries
559  *     - cq->rq_buf_size
560  *     - cq->sq_buf_size
561  *
562  * NOTE: this function does not initialize the controlq locks
563  */
564 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
565 {
566         struct ice_ctl_q_info *cq;
567         enum ice_status ret_code;
568
569         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
570
571         switch (q_type) {
572         case ICE_CTL_Q_ADMIN:
573                 ice_adminq_init_regs(hw);
574                 cq = &hw->adminq;
575                 break;
576         case ICE_CTL_Q_MAILBOX:
577                 ice_mailbox_init_regs(hw);
578                 cq = &hw->mailboxq;
579                 break;
580         default:
581                 return ICE_ERR_PARAM;
582         }
583         cq->qtype = q_type;
584
585         /* verify input for valid configuration */
586         if (!cq->num_rq_entries || !cq->num_sq_entries ||
587             !cq->rq_buf_size || !cq->sq_buf_size) {
588                 return ICE_ERR_CFG;
589         }
590
591         /* setup SQ command write back timeout */
592         cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
593
594         /* allocate the ATQ */
595         ret_code = ice_init_sq(hw, cq);
596         if (ret_code)
597                 return ret_code;
598
599         /* allocate the ARQ */
600         ret_code = ice_init_rq(hw, cq);
601         if (ret_code)
602                 goto init_ctrlq_free_sq;
603
604         /* success! */
605         return ICE_SUCCESS;
606
607 init_ctrlq_free_sq:
608         ice_shutdown_sq(hw, cq);
609         return ret_code;
610 }
611
612 /**
613  * ice_init_all_ctrlq - main initialization routine for all control queues
614  * @hw: pointer to the hardware structure
615  *
616  * Prior to calling this function, the driver MUST* set the following fields
617  * in the cq->structure for all control queues:
618  *     - cq->num_sq_entries
619  *     - cq->num_rq_entries
620  *     - cq->rq_buf_size
621  *     - cq->sq_buf_size
622  *
623  * NOTE: this function does not initialize the controlq locks.
624  */
625 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
626 {
627         enum ice_status ret_code;
628
629         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
630
631         /* Init FW admin queue */
632         ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
633         if (ret_code)
634                 return ret_code;
635
636         ret_code = ice_init_check_adminq(hw);
637         if (ret_code)
638                 return ret_code;
639         /* Init Mailbox queue */
640         return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
641 }
642
643 /**
644  * ice_init_ctrlq_locks - Initialize locks for a control queue
645  * @cq: pointer to the control queue
646  *
647  * Initializes the send and receive queue locks for a given control queue.
648  */
649 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
650 {
651         ice_init_lock(&cq->sq_lock);
652         ice_init_lock(&cq->rq_lock);
653 }
654
655 /**
656  * ice_create_all_ctrlq - main initialization routine for all control queues
657  * @hw: pointer to the hardware structure
658  *
659  * Prior to calling this function, the driver *MUST* set the following fields
660  * in the cq->structure for all control queues:
661  *     - cq->num_sq_entries
662  *     - cq->num_rq_entries
663  *     - cq->rq_buf_size
664  *     - cq->sq_buf_size
665  *
666  * This function creates all the control queue locks and then calls
667  * ice_init_all_ctrlq. It should be called once during driver load. If the
668  * driver needs to re-initialize control queues at run time it should call
669  * ice_init_all_ctrlq instead.
670  */
671 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
672 {
673         ice_init_ctrlq_locks(&hw->adminq);
674         ice_init_ctrlq_locks(&hw->mailboxq);
675
676         return ice_init_all_ctrlq(hw);
677 }
678
679 /**
680  * ice_shutdown_ctrlq - shutdown routine for any control queue
681  * @hw: pointer to the hardware structure
682  * @q_type: specific Control queue type
683  *
684  * NOTE: this function does not destroy the control queue locks.
685  */
686 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
687 {
688         struct ice_ctl_q_info *cq;
689
690         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
691
692         switch (q_type) {
693         case ICE_CTL_Q_ADMIN:
694                 cq = &hw->adminq;
695                 if (ice_check_sq_alive(hw, cq))
696                         ice_aq_q_shutdown(hw, true);
697                 break;
698         case ICE_CTL_Q_MAILBOX:
699                 cq = &hw->mailboxq;
700                 break;
701         default:
702                 return;
703         }
704
705         ice_shutdown_sq(hw, cq);
706         ice_shutdown_rq(hw, cq);
707 }
708
709 /**
710  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
711  * @hw: pointer to the hardware structure
712  *
713  * NOTE: this function does not destroy the control queue locks. The driver
714  * may call this at runtime to shutdown and later restart control queues, such
715  * as in response to a reset event.
716  */
717 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
718 {
719         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
720         /* Shutdown FW admin queue */
721         ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
722         /* Shutdown PF-VF Mailbox */
723         ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
724 }
725
726 /**
727  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
728  * @cq: pointer to the control queue
729  *
730  * Destroys the send and receive queue locks for a given control queue.
731  */
732 static void
733 ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
734 {
735         ice_destroy_lock(&cq->sq_lock);
736         ice_destroy_lock(&cq->rq_lock);
737 }
738
739 /**
740  * ice_destroy_all_ctrlq - exit routine for all control queues
741  * @hw: pointer to the hardware structure
742  *
743  * This function shuts down all the control queues and then destroys the
744  * control queue locks. It should be called once during driver unload. The
745  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
746  * reinitialize control queues, such as in response to a reset event.
747  */
748 void ice_destroy_all_ctrlq(struct ice_hw *hw)
749 {
750         /* shut down all the control queues first */
751         ice_shutdown_all_ctrlq(hw);
752
753         ice_destroy_ctrlq_locks(&hw->adminq);
754         ice_destroy_ctrlq_locks(&hw->mailboxq);
755 }
756
757 /**
758  * ice_clean_sq - cleans Admin send queue (ATQ)
759  * @hw: pointer to the hardware structure
760  * @cq: pointer to the specific Control queue
761  *
762  * returns the number of free desc
763  */
764 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
765 {
766         struct ice_ctl_q_ring *sq = &cq->sq;
767         u16 ntc = sq->next_to_clean;
768         struct ice_sq_cd *details;
769         struct ice_aq_desc *desc;
770
771         desc = ICE_CTL_Q_DESC(*sq, ntc);
772         details = ICE_CTL_Q_DETAILS(*sq, ntc);
773
774         while (rd32(hw, cq->sq.head) != ntc) {
775                 ice_debug(hw, ICE_DBG_AQ_MSG,
776                           "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
777                 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
778                 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
779                 ntc++;
780                 if (ntc == sq->count)
781                         ntc = 0;
782                 desc = ICE_CTL_Q_DESC(*sq, ntc);
783                 details = ICE_CTL_Q_DETAILS(*sq, ntc);
784         }
785
786         sq->next_to_clean = ntc;
787
788         return ICE_CTL_Q_DESC_UNUSED(sq);
789 }
790
791 /**
792  * ice_debug_cq
793  * @hw: pointer to the hardware structure
794  * @desc: pointer to control queue descriptor
795  * @buf: pointer to command buffer
796  * @buf_len: max length of buf
797  *
798  * Dumps debug log about control command with descriptor contents.
799  */
800 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
801 {
802         struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
803         u16 datalen, flags;
804
805         if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
806                 return;
807
808         if (!desc)
809                 return;
810
811         datalen = LE16_TO_CPU(cq_desc->datalen);
812         flags = LE16_TO_CPU(cq_desc->flags);
813
814         ice_debug(hw, ICE_DBG_AQ_DESC,
815                   "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
816                   LE16_TO_CPU(cq_desc->opcode), flags, datalen,
817                   LE16_TO_CPU(cq_desc->retval));
818         ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
819                   LE32_TO_CPU(cq_desc->cookie_high),
820                   LE32_TO_CPU(cq_desc->cookie_low));
821         ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
822                   LE32_TO_CPU(cq_desc->params.generic.param0),
823                   LE32_TO_CPU(cq_desc->params.generic.param1));
824         ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
825                   LE32_TO_CPU(cq_desc->params.generic.addr_high),
826                   LE32_TO_CPU(cq_desc->params.generic.addr_low));
827         /* Dump buffer iff 1) one exists and 2) is either a response indicated
828          * by the DD and/or CMP flag set or a command with the RD flag set.
829          */
830         if (buf && cq_desc->datalen != 0 &&
831             (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
832              flags & ICE_AQ_FLAG_RD)) {
833                 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
834                 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
835                                 min(buf_len, datalen));
836         }
837 }
838
839 /**
840  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
841  * @hw: pointer to the HW struct
842  * @cq: pointer to the specific Control queue
843  *
844  * Returns true if the firmware has processed all descriptors on the
845  * admin send queue. Returns false if there are still requests pending.
846  */
847 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
848 {
849         /* AQ designers suggest use of head for better
850          * timing reliability than DD bit
851          */
852         return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
853 }
854
855 /**
856  * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
857  * @hw: pointer to the HW struct
858  * @cq: pointer to the specific Control queue
859  * @desc: prefilled descriptor describing the command (non DMA mem)
860  * @buf: buffer to use for indirect commands (or NULL for direct commands)
861  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
862  * @cd: pointer to command details structure
863  *
864  * This is the main send command routine for the ATQ. It runs the queue,
865  * cleans the queue, etc.
866  */
867 static enum ice_status
868 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
869                        struct ice_aq_desc *desc, void *buf, u16 buf_size,
870                        struct ice_sq_cd *cd)
871 {
872         struct ice_dma_mem *dma_buf = NULL;
873         struct ice_aq_desc *desc_on_ring;
874         bool cmd_completed = false;
875         enum ice_status status = ICE_SUCCESS;
876         struct ice_sq_cd *details;
877         u32 total_delay = 0;
878         u16 retval = 0;
879         u32 val = 0;
880
881         /* if reset is in progress return a soft error */
882         if (hw->reset_ongoing)
883                 return ICE_ERR_RESET_ONGOING;
884
885         cq->sq_last_status = ICE_AQ_RC_OK;
886
887         if (!cq->sq.count) {
888                 ice_debug(hw, ICE_DBG_AQ_MSG,
889                           "Control Send queue not initialized.\n");
890                 status = ICE_ERR_AQ_EMPTY;
891                 goto sq_send_command_error;
892         }
893
894         if ((buf && !buf_size) || (!buf && buf_size)) {
895                 status = ICE_ERR_PARAM;
896                 goto sq_send_command_error;
897         }
898
899         if (buf) {
900                 if (buf_size > cq->sq_buf_size) {
901                         ice_debug(hw, ICE_DBG_AQ_MSG,
902                                   "Invalid buffer size for Control Send queue: %d.\n",
903                                   buf_size);
904                         status = ICE_ERR_INVAL_SIZE;
905                         goto sq_send_command_error;
906                 }
907
908                 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
909                 if (buf_size > ICE_AQ_LG_BUF)
910                         desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
911         }
912
913         val = rd32(hw, cq->sq.head);
914         if (val >= cq->num_sq_entries) {
915                 ice_debug(hw, ICE_DBG_AQ_MSG,
916                           "head overrun at %d in the Control Send Queue ring\n",
917                           val);
918                 status = ICE_ERR_AQ_EMPTY;
919                 goto sq_send_command_error;
920         }
921
922         details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
923         if (cd)
924                 *details = *cd;
925         else
926                 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
927
928         /* Call clean and check queue available function to reclaim the
929          * descriptors that were processed by FW/MBX; the function returns the
930          * number of desc available. The clean function called here could be
931          * called in a separate thread in case of asynchronous completions.
932          */
933         if (ice_clean_sq(hw, cq) == 0) {
934                 ice_debug(hw, ICE_DBG_AQ_MSG,
935                           "Error: Control Send Queue is full.\n");
936                 status = ICE_ERR_AQ_FULL;
937                 goto sq_send_command_error;
938         }
939
940         /* initialize the temp desc pointer with the right desc */
941         desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
942
943         /* if the desc is available copy the temp desc to the right place */
944         ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
945                    ICE_NONDMA_TO_DMA);
946
947         /* if buf is not NULL assume indirect command */
948         if (buf) {
949                 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
950                 /* copy the user buf into the respective DMA buf */
951                 ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
952                 desc_on_ring->datalen = CPU_TO_LE16(buf_size);
953
954                 /* Update the address values in the desc with the pa value
955                  * for respective buffer
956                  */
957                 desc_on_ring->params.generic.addr_high =
958                         CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
959                 desc_on_ring->params.generic.addr_low =
960                         CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
961         }
962
963         /* Debug desc and buffer */
964         ice_debug(hw, ICE_DBG_AQ_DESC,
965                   "ATQ: Control Send queue desc and buffer:\n");
966
967         ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
968
969         (cq->sq.next_to_use)++;
970         if (cq->sq.next_to_use == cq->sq.count)
971                 cq->sq.next_to_use = 0;
972         wr32(hw, cq->sq.tail, cq->sq.next_to_use);
973
974         do {
975                 if (ice_sq_done(hw, cq))
976                         break;
977
978                 ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
979                 total_delay++;
980         } while (total_delay < cq->sq_cmd_timeout);
981
982         /* if ready, copy the desc back to temp */
983         if (ice_sq_done(hw, cq)) {
984                 ice_memcpy(desc, desc_on_ring, sizeof(*desc),
985                            ICE_DMA_TO_NONDMA);
986                 if (buf) {
987                         /* get returned length to copy */
988                         u16 copy_size = LE16_TO_CPU(desc->datalen);
989
990                         if (copy_size > buf_size) {
991                                 ice_debug(hw, ICE_DBG_AQ_MSG,
992                                           "Return len %d > than buf len %d\n",
993                                           copy_size, buf_size);
994                                 status = ICE_ERR_AQ_ERROR;
995                         } else {
996                                 ice_memcpy(buf, dma_buf->va, copy_size,
997                                            ICE_DMA_TO_NONDMA);
998                         }
999                 }
1000                 retval = LE16_TO_CPU(desc->retval);
1001                 if (retval) {
1002                         ice_debug(hw, ICE_DBG_AQ_MSG,
1003                                   "Control Send Queue command 0x%04X completed with error 0x%X\n",
1004                                   LE16_TO_CPU(desc->opcode),
1005                                   retval);
1006
1007                         /* strip off FW internal code */
1008                         retval &= 0xff;
1009                 }
1010                 cmd_completed = true;
1011                 if (!status && retval != ICE_AQ_RC_OK)
1012                         status = ICE_ERR_AQ_ERROR;
1013                 cq->sq_last_status = (enum ice_aq_err)retval;
1014         }
1015
1016         ice_debug(hw, ICE_DBG_AQ_MSG,
1017                   "ATQ: desc and buffer writeback:\n");
1018
1019         ice_debug_cq(hw, (void *)desc, buf, buf_size);
1020
1021         /* save writeback AQ if requested */
1022         if (details->wb_desc)
1023                 ice_memcpy(details->wb_desc, desc_on_ring,
1024                            sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1025
1026         /* update the error if time out occurred */
1027         if (!cmd_completed) {
1028                 ice_debug(hw, ICE_DBG_AQ_MSG,
1029                           "Control Send Queue Writeback timeout.\n");
1030                 status = ICE_ERR_AQ_TIMEOUT;
1031         }
1032
1033 sq_send_command_error:
1034         return status;
1035 }
1036
1037 /**
1038  * ice_sq_send_cmd - send command to Control Queue (ATQ)
1039  * @hw: pointer to the HW struct
1040  * @cq: pointer to the specific Control queue
1041  * @desc: prefilled descriptor describing the command (non DMA mem)
1042  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1043  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1044  * @cd: pointer to command details structure
1045  *
1046  * This is the main send command routine for the ATQ. It runs the queue,
1047  * cleans the queue, etc.
1048  */
1049 enum ice_status
1050 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1051                 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1052                 struct ice_sq_cd *cd)
1053 {
1054         enum ice_status status = ICE_SUCCESS;
1055
1056         /* if reset is in progress return a soft error */
1057         if (hw->reset_ongoing)
1058                 return ICE_ERR_RESET_ONGOING;
1059
1060         ice_acquire_lock(&cq->sq_lock);
1061         status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1062         ice_release_lock(&cq->sq_lock);
1063
1064         return status;
1065 }
1066
1067 /**
1068  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1069  * @desc: pointer to the temp descriptor (non DMA mem)
1070  * @opcode: the opcode can be used to decide which flags to turn off or on
1071  *
1072  * Fill the desc with default values
1073  */
1074 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1075 {
1076         /* zero out the desc */
1077         ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1078         desc->opcode = CPU_TO_LE16(opcode);
1079         desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1080 }
1081
1082 /**
1083  * ice_clean_rq_elem
1084  * @hw: pointer to the HW struct
1085  * @cq: pointer to the specific Control queue
1086  * @e: event info from the receive descriptor, includes any buffers
1087  * @pending: number of events that could be left to process
1088  *
1089  * This function cleans one Admin Receive Queue element and returns
1090  * the contents through e. It can also return how many events are
1091  * left to process through 'pending'.
1092  */
1093 enum ice_status
1094 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1095                   struct ice_rq_event_info *e, u16 *pending)
1096 {
1097         u16 ntc = cq->rq.next_to_clean;
1098         enum ice_status ret_code = ICE_SUCCESS;
1099         struct ice_aq_desc *desc;
1100         struct ice_dma_mem *bi;
1101         u16 desc_idx;
1102         u16 datalen;
1103         u16 flags;
1104         u16 ntu;
1105
1106         /* pre-clean the event info */
1107         ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1108
1109         /* take the lock before we start messing with the ring */
1110         ice_acquire_lock(&cq->rq_lock);
1111
1112         if (!cq->rq.count) {
1113                 ice_debug(hw, ICE_DBG_AQ_MSG,
1114                           "Control Receive queue not initialized.\n");
1115                 ret_code = ICE_ERR_AQ_EMPTY;
1116                 goto clean_rq_elem_err;
1117         }
1118
1119         /* set next_to_use to head */
1120         ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1121
1122         if (ntu == ntc) {
1123                 /* nothing to do - shouldn't need to update ring's values */
1124                 ret_code = ICE_ERR_AQ_NO_WORK;
1125                 goto clean_rq_elem_out;
1126         }
1127
1128         /* now clean the next descriptor */
1129         desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1130         desc_idx = ntc;
1131
1132         cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1133         flags = LE16_TO_CPU(desc->flags);
1134         if (flags & ICE_AQ_FLAG_ERR) {
1135                 ret_code = ICE_ERR_AQ_ERROR;
1136                 ice_debug(hw, ICE_DBG_AQ_MSG,
1137                           "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1138                           LE16_TO_CPU(desc->opcode),
1139                           cq->rq_last_status);
1140         }
1141         ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1142         datalen = LE16_TO_CPU(desc->datalen);
1143         e->msg_len = min(datalen, e->buf_len);
1144         if (e->msg_buf && e->msg_len)
1145                 ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1146                            e->msg_len, ICE_DMA_TO_NONDMA);
1147
1148         ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1149
1150         ice_debug_cq(hw, (void *)desc, e->msg_buf,
1151                      cq->rq_buf_size);
1152
1153         /* Restore the original datalen and buffer address in the desc,
1154          * FW updates datalen to indicate the event message size
1155          */
1156         bi = &cq->rq.r.rq_bi[ntc];
1157         ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1158
1159         desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1160         if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1161                 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1162         desc->datalen = CPU_TO_LE16(bi->size);
1163         desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1164         desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1165
1166         /* set tail = the last cleaned desc index. */
1167         wr32(hw, cq->rq.tail, ntc);
1168         /* ntc is updated to tail + 1 */
1169         ntc++;
1170         if (ntc == cq->num_rq_entries)
1171                 ntc = 0;
1172         cq->rq.next_to_clean = ntc;
1173         cq->rq.next_to_use = ntu;
1174
1175 clean_rq_elem_out:
1176         /* Set pending if needed, unlock and return */
1177         if (pending) {
1178                 /* re-read HW head to calculate actual pending messages */
1179                 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1180                 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1181         }
1182 clean_rq_elem_err:
1183         ice_release_lock(&cq->rq_lock);
1184
1185         return ret_code;
1186 }