net/mlx5: support more tunnel types
[dpdk.git] / drivers / net / ice / base / ice_controlq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_common.h"
6
7 #define ICE_CQ_INIT_REGS(qinfo, prefix)                         \
8 do {                                                            \
9         (qinfo)->sq.head = prefix##_ATQH;                       \
10         (qinfo)->sq.tail = prefix##_ATQT;                       \
11         (qinfo)->sq.len = prefix##_ATQLEN;                      \
12         (qinfo)->sq.bah = prefix##_ATQBAH;                      \
13         (qinfo)->sq.bal = prefix##_ATQBAL;                      \
14         (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;        \
15         (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
16         (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;  \
17         (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;           \
18         (qinfo)->rq.head = prefix##_ARQH;                       \
19         (qinfo)->rq.tail = prefix##_ARQT;                       \
20         (qinfo)->rq.len = prefix##_ARQLEN;                      \
21         (qinfo)->rq.bah = prefix##_ARQBAH;                      \
22         (qinfo)->rq.bal = prefix##_ARQBAL;                      \
23         (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;        \
24         (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
25         (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;  \
26         (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;           \
27 } while (0)
28
29 /**
30  * ice_adminq_init_regs - Initialize AdminQ registers
31  * @hw: pointer to the hardware structure
32  *
33  * This assumes the alloc_sq and alloc_rq functions have already been called
34  */
35 static void ice_adminq_init_regs(struct ice_hw *hw)
36 {
37         struct ice_ctl_q_info *cq = &hw->adminq;
38
39         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
40
41         ICE_CQ_INIT_REGS(cq, PF_FW);
42 }
43
44 /**
45  * ice_mailbox_init_regs - Initialize Mailbox registers
46  * @hw: pointer to the hardware structure
47  *
48  * This assumes the alloc_sq and alloc_rq functions have already been called
49  */
50 static void ice_mailbox_init_regs(struct ice_hw *hw)
51 {
52         struct ice_ctl_q_info *cq = &hw->mailboxq;
53
54         ICE_CQ_INIT_REGS(cq, PF_MBX);
55 }
56
57 /**
58  * ice_sb_init_regs - Initialize Sideband registers
59  * @hw: pointer to the hardware structure
60  *
61  * This assumes the alloc_sq and alloc_rq functions have already been called
62  */
63 static void ice_sb_init_regs(struct ice_hw *hw)
64 {
65         struct ice_ctl_q_info *cq = &hw->sbq;
66
67         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
68
69         ICE_CQ_INIT_REGS(cq, PF_SB);
70 }
71
72 /**
73  * ice_check_sq_alive
74  * @hw: pointer to the HW struct
75  * @cq: pointer to the specific Control queue
76  *
77  * Returns true if Queue is enabled else false.
78  */
79 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
80 {
81         /* check both queue-length and queue-enable fields */
82         if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
83                 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
84                                                 cq->sq.len_ena_mask)) ==
85                         (cq->num_sq_entries | cq->sq.len_ena_mask);
86
87         return false;
88 }
89
90 /**
91  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
92  * @hw: pointer to the hardware structure
93  * @cq: pointer to the specific Control queue
94  */
95 static enum ice_status
96 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
97 {
98         size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
99
100         cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
101         if (!cq->sq.desc_buf.va)
102                 return ICE_ERR_NO_MEMORY;
103
104         cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
105                                     sizeof(struct ice_sq_cd));
106         if (!cq->sq.cmd_buf) {
107                 ice_free_dma_mem(hw, &cq->sq.desc_buf);
108                 return ICE_ERR_NO_MEMORY;
109         }
110
111         return ICE_SUCCESS;
112 }
113
114 /**
115  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
116  * @hw: pointer to the hardware structure
117  * @cq: pointer to the specific Control queue
118  */
119 static enum ice_status
120 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
121 {
122         size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
123
124         cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
125         if (!cq->rq.desc_buf.va)
126                 return ICE_ERR_NO_MEMORY;
127         return ICE_SUCCESS;
128 }
129
130 /**
131  * ice_free_cq_ring - Free control queue ring
132  * @hw: pointer to the hardware structure
133  * @ring: pointer to the specific control queue ring
134  *
135  * This assumes the posted buffers have already been cleaned
136  * and de-allocated
137  */
138 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
139 {
140         ice_free_dma_mem(hw, &ring->desc_buf);
141 }
142
143 /**
144  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
145  * @hw: pointer to the hardware structure
146  * @cq: pointer to the specific Control queue
147  */
148 static enum ice_status
149 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
150 {
151         int i;
152
153         /* We'll be allocating the buffer info memory first, then we can
154          * allocate the mapped buffers for the event processing
155          */
156         cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
157                                      sizeof(cq->rq.desc_buf));
158         if (!cq->rq.dma_head)
159                 return ICE_ERR_NO_MEMORY;
160         cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
161
162         /* allocate the mapped buffers */
163         for (i = 0; i < cq->num_rq_entries; i++) {
164                 struct ice_aq_desc *desc;
165                 struct ice_dma_mem *bi;
166
167                 bi = &cq->rq.r.rq_bi[i];
168                 bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
169                 if (!bi->va)
170                         goto unwind_alloc_rq_bufs;
171
172                 /* now configure the descriptors for use */
173                 desc = ICE_CTL_Q_DESC(cq->rq, i);
174
175                 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
176                 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
177                         desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
178                 desc->opcode = 0;
179                 /* This is in accordance with Admin queue design, there is no
180                  * register for buffer size configuration
181                  */
182                 desc->datalen = CPU_TO_LE16(bi->size);
183                 desc->retval = 0;
184                 desc->cookie_high = 0;
185                 desc->cookie_low = 0;
186                 desc->params.generic.addr_high =
187                         CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
188                 desc->params.generic.addr_low =
189                         CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
190                 desc->params.generic.param0 = 0;
191                 desc->params.generic.param1 = 0;
192         }
193         return ICE_SUCCESS;
194
195 unwind_alloc_rq_bufs:
196         /* don't try to free the one that failed... */
197         i--;
198         for (; i >= 0; i--)
199                 ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
200         cq->rq.r.rq_bi = NULL;
201         ice_free(hw, cq->rq.dma_head);
202         cq->rq.dma_head = NULL;
203
204         return ICE_ERR_NO_MEMORY;
205 }
206
207 /**
208  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
209  * @hw: pointer to the hardware structure
210  * @cq: pointer to the specific Control queue
211  */
212 static enum ice_status
213 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
214 {
215         int i;
216
217         /* No mapped memory needed yet, just the buffer info structures */
218         cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
219                                      sizeof(cq->sq.desc_buf));
220         if (!cq->sq.dma_head)
221                 return ICE_ERR_NO_MEMORY;
222         cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
223
224         /* allocate the mapped buffers */
225         for (i = 0; i < cq->num_sq_entries; i++) {
226                 struct ice_dma_mem *bi;
227
228                 bi = &cq->sq.r.sq_bi[i];
229                 bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
230                 if (!bi->va)
231                         goto unwind_alloc_sq_bufs;
232         }
233         return ICE_SUCCESS;
234
235 unwind_alloc_sq_bufs:
236         /* don't try to free the one that failed... */
237         i--;
238         for (; i >= 0; i--)
239                 ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
240         cq->sq.r.sq_bi = NULL;
241         ice_free(hw, cq->sq.dma_head);
242         cq->sq.dma_head = NULL;
243
244         return ICE_ERR_NO_MEMORY;
245 }
246
247 static enum ice_status
248 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
249 {
250         /* Clear Head and Tail */
251         wr32(hw, ring->head, 0);
252         wr32(hw, ring->tail, 0);
253
254         /* set starting point */
255         wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
256         wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
257         wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
258
259         /* Check one register to verify that config was applied */
260         if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
261                 return ICE_ERR_AQ_ERROR;
262
263         return ICE_SUCCESS;
264 }
265
266 /**
267  * ice_cfg_sq_regs - configure Control ATQ registers
268  * @hw: pointer to the hardware structure
269  * @cq: pointer to the specific Control queue
270  *
271  * Configure base address and length registers for the transmit queue
272  */
273 static enum ice_status
274 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
275 {
276         return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
277 }
278
279 /**
280  * ice_cfg_rq_regs - configure Control ARQ register
281  * @hw: pointer to the hardware structure
282  * @cq: pointer to the specific Control queue
283  *
284  * Configure base address and length registers for the receive (event queue)
285  */
286 static enum ice_status
287 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
288 {
289         enum ice_status status;
290
291         status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
292         if (status)
293                 return status;
294
295         /* Update tail in the HW to post pre-allocated buffers */
296         wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
297
298         return ICE_SUCCESS;
299 }
300
301 #define ICE_FREE_CQ_BUFS(hw, qi, ring)                                  \
302 do {                                                                    \
303         /* free descriptors */                                          \
304         if ((qi)->ring.r.ring##_bi) {                                   \
305                 int i;                                                  \
306                                                                         \
307                 for (i = 0; i < (qi)->num_##ring##_entries; i++)        \
308                         if ((qi)->ring.r.ring##_bi[i].pa)               \
309                                 ice_free_dma_mem((hw),                  \
310                                         &(qi)->ring.r.ring##_bi[i]);    \
311         }                                                               \
312         /* free the buffer info list */                                 \
313         if ((qi)->ring.cmd_buf)                                         \
314                 ice_free(hw, (qi)->ring.cmd_buf);                       \
315         /* free DMA head */                                             \
316         ice_free(hw, (qi)->ring.dma_head);                              \
317 } while (0)
318
319 /**
320  * ice_init_sq - main initialization routine for Control ATQ
321  * @hw: pointer to the hardware structure
322  * @cq: pointer to the specific Control queue
323  *
324  * This is the main initialization routine for the Control Send Queue
325  * Prior to calling this function, the driver *MUST* set the following fields
326  * in the cq->structure:
327  *     - cq->num_sq_entries
328  *     - cq->sq_buf_size
329  *
330  * Do *NOT* hold the lock when calling this as the memory allocation routines
331  * called are not going to be atomic context safe
332  */
333 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
334 {
335         enum ice_status ret_code;
336
337         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
338
339         if (cq->sq.count > 0) {
340                 /* queue already initialized */
341                 ret_code = ICE_ERR_NOT_READY;
342                 goto init_ctrlq_exit;
343         }
344
345         /* verify input for valid configuration */
346         if (!cq->num_sq_entries || !cq->sq_buf_size) {
347                 ret_code = ICE_ERR_CFG;
348                 goto init_ctrlq_exit;
349         }
350
351         cq->sq.next_to_use = 0;
352         cq->sq.next_to_clean = 0;
353
354         /* allocate the ring memory */
355         ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
356         if (ret_code)
357                 goto init_ctrlq_exit;
358
359         /* allocate buffers in the rings */
360         ret_code = ice_alloc_sq_bufs(hw, cq);
361         if (ret_code)
362                 goto init_ctrlq_free_rings;
363
364         /* initialize base registers */
365         ret_code = ice_cfg_sq_regs(hw, cq);
366         if (ret_code)
367                 goto init_ctrlq_free_rings;
368
369         /* success! */
370         cq->sq.count = cq->num_sq_entries;
371         goto init_ctrlq_exit;
372
373 init_ctrlq_free_rings:
374         ICE_FREE_CQ_BUFS(hw, cq, sq);
375         ice_free_cq_ring(hw, &cq->sq);
376
377 init_ctrlq_exit:
378         return ret_code;
379 }
380
381 /**
382  * ice_init_rq - initialize ARQ
383  * @hw: pointer to the hardware structure
384  * @cq: pointer to the specific Control queue
385  *
386  * The main initialization routine for the Admin Receive (Event) Queue.
387  * Prior to calling this function, the driver *MUST* set the following fields
388  * in the cq->structure:
389  *     - cq->num_rq_entries
390  *     - cq->rq_buf_size
391  *
392  * Do *NOT* hold the lock when calling this as the memory allocation routines
393  * called are not going to be atomic context safe
394  */
395 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
396 {
397         enum ice_status ret_code;
398
399         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
400
401         if (cq->rq.count > 0) {
402                 /* queue already initialized */
403                 ret_code = ICE_ERR_NOT_READY;
404                 goto init_ctrlq_exit;
405         }
406
407         /* verify input for valid configuration */
408         if (!cq->num_rq_entries || !cq->rq_buf_size) {
409                 ret_code = ICE_ERR_CFG;
410                 goto init_ctrlq_exit;
411         }
412
413         cq->rq.next_to_use = 0;
414         cq->rq.next_to_clean = 0;
415
416         /* allocate the ring memory */
417         ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
418         if (ret_code)
419                 goto init_ctrlq_exit;
420
421         /* allocate buffers in the rings */
422         ret_code = ice_alloc_rq_bufs(hw, cq);
423         if (ret_code)
424                 goto init_ctrlq_free_rings;
425
426         /* initialize base registers */
427         ret_code = ice_cfg_rq_regs(hw, cq);
428         if (ret_code)
429                 goto init_ctrlq_free_rings;
430
431         /* success! */
432         cq->rq.count = cq->num_rq_entries;
433         goto init_ctrlq_exit;
434
435 init_ctrlq_free_rings:
436         ICE_FREE_CQ_BUFS(hw, cq, rq);
437         ice_free_cq_ring(hw, &cq->rq);
438
439 init_ctrlq_exit:
440         return ret_code;
441 }
442
443 /**
444  * ice_shutdown_sq - shutdown the Control ATQ
445  * @hw: pointer to the hardware structure
446  * @cq: pointer to the specific Control queue
447  *
448  * The main shutdown routine for the Control Transmit Queue
449  */
450 static enum ice_status
451 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
452 {
453         enum ice_status ret_code = ICE_SUCCESS;
454
455         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
456
457         ice_acquire_lock(&cq->sq_lock);
458
459         if (!cq->sq.count) {
460                 ret_code = ICE_ERR_NOT_READY;
461                 goto shutdown_sq_out;
462         }
463
464         /* Stop firmware AdminQ processing */
465         wr32(hw, cq->sq.head, 0);
466         wr32(hw, cq->sq.tail, 0);
467         wr32(hw, cq->sq.len, 0);
468         wr32(hw, cq->sq.bal, 0);
469         wr32(hw, cq->sq.bah, 0);
470
471         cq->sq.count = 0;       /* to indicate uninitialized queue */
472
473         /* free ring buffers and the ring itself */
474         ICE_FREE_CQ_BUFS(hw, cq, sq);
475         ice_free_cq_ring(hw, &cq->sq);
476
477 shutdown_sq_out:
478         ice_release_lock(&cq->sq_lock);
479         return ret_code;
480 }
481
482 /**
483  * ice_aq_ver_check - Check the reported AQ API version.
484  * @hw: pointer to the hardware structure
485  *
486  * Checks if the driver should load on a given AQ API version.
487  *
488  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
489  */
490 static bool ice_aq_ver_check(struct ice_hw *hw)
491 {
492         if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
493                 /* Major API version is newer than expected, don't load */
494                 ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
495                 return false;
496         } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
497                 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
498                         ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
499                 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
500                         ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
501         } else {
502                 /* Major API version is older than expected, log a warning */
503                 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
504         }
505         return true;
506 }
507
508 /**
509  * ice_shutdown_rq - shutdown Control ARQ
510  * @hw: pointer to the hardware structure
511  * @cq: pointer to the specific Control queue
512  *
513  * The main shutdown routine for the Control Receive Queue
514  */
515 static enum ice_status
516 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
517 {
518         enum ice_status ret_code = ICE_SUCCESS;
519
520         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
521
522         ice_acquire_lock(&cq->rq_lock);
523
524         if (!cq->rq.count) {
525                 ret_code = ICE_ERR_NOT_READY;
526                 goto shutdown_rq_out;
527         }
528
529         /* Stop Control Queue processing */
530         wr32(hw, cq->rq.head, 0);
531         wr32(hw, cq->rq.tail, 0);
532         wr32(hw, cq->rq.len, 0);
533         wr32(hw, cq->rq.bal, 0);
534         wr32(hw, cq->rq.bah, 0);
535
536         /* set rq.count to 0 to indicate uninitialized queue */
537         cq->rq.count = 0;
538
539         /* free ring buffers and the ring itself */
540         ICE_FREE_CQ_BUFS(hw, cq, rq);
541         ice_free_cq_ring(hw, &cq->rq);
542
543 shutdown_rq_out:
544         ice_release_lock(&cq->rq_lock);
545         return ret_code;
546 }
547
548 /**
549  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
550  * @hw: pointer to the hardware structure
551  */
552 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
553 {
554         struct ice_ctl_q_info *cq = &hw->adminq;
555         enum ice_status status;
556
557         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
558
559         status = ice_aq_get_fw_ver(hw, NULL);
560         if (status)
561                 goto init_ctrlq_free_rq;
562
563         if (!ice_aq_ver_check(hw)) {
564                 status = ICE_ERR_FW_API_VER;
565                 goto init_ctrlq_free_rq;
566         }
567
568         return ICE_SUCCESS;
569
570 init_ctrlq_free_rq:
571         ice_shutdown_rq(hw, cq);
572         ice_shutdown_sq(hw, cq);
573         return status;
574 }
575
576 /**
577  * ice_init_ctrlq - main initialization routine for any control Queue
578  * @hw: pointer to the hardware structure
579  * @q_type: specific Control queue type
580  *
581  * Prior to calling this function, the driver *MUST* set the following fields
582  * in the cq->structure:
583  *     - cq->num_sq_entries
584  *     - cq->num_rq_entries
585  *     - cq->rq_buf_size
586  *     - cq->sq_buf_size
587  *
588  * NOTE: this function does not initialize the controlq locks
589  */
590 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
591 {
592         struct ice_ctl_q_info *cq;
593         enum ice_status ret_code;
594
595         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
596
597         switch (q_type) {
598         case ICE_CTL_Q_ADMIN:
599                 ice_adminq_init_regs(hw);
600                 cq = &hw->adminq;
601                 break;
602         case ICE_CTL_Q_SB:
603                 ice_sb_init_regs(hw);
604                 cq = &hw->sbq;
605                 break;
606         case ICE_CTL_Q_MAILBOX:
607                 ice_mailbox_init_regs(hw);
608                 cq = &hw->mailboxq;
609                 break;
610         default:
611                 return ICE_ERR_PARAM;
612         }
613         cq->qtype = q_type;
614
615         /* verify input for valid configuration */
616         if (!cq->num_rq_entries || !cq->num_sq_entries ||
617             !cq->rq_buf_size || !cq->sq_buf_size) {
618                 return ICE_ERR_CFG;
619         }
620
621         /* setup SQ command write back timeout */
622         cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
623
624         /* allocate the ATQ */
625         ret_code = ice_init_sq(hw, cq);
626         if (ret_code)
627                 return ret_code;
628
629         /* allocate the ARQ */
630         ret_code = ice_init_rq(hw, cq);
631         if (ret_code)
632                 goto init_ctrlq_free_sq;
633
634         /* success! */
635         return ICE_SUCCESS;
636
637 init_ctrlq_free_sq:
638         ice_shutdown_sq(hw, cq);
639         return ret_code;
640 }
641
642 /**
643  * ice_is_sbq_supported - is the sideband queue supported
644  * @hw: pointer to the hardware structure
645  *
646  * Returns true if the sideband control queue interface is
647  * supported for the device, false otherwise
648  */
649 static bool ice_is_sbq_supported(struct ice_hw *hw)
650 {
651         return ice_is_generic_mac(hw);
652 }
653
654 /**
655  * ice_shutdown_ctrlq - shutdown routine for any control queue
656  * @hw: pointer to the hardware structure
657  * @q_type: specific Control queue type
658  *
659  * NOTE: this function does not destroy the control queue locks.
660  */
661 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
662 {
663         struct ice_ctl_q_info *cq;
664
665         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
666
667         switch (q_type) {
668         case ICE_CTL_Q_ADMIN:
669                 cq = &hw->adminq;
670                 if (ice_check_sq_alive(hw, cq))
671                         ice_aq_q_shutdown(hw, true);
672                 break;
673         case ICE_CTL_Q_SB:
674                 cq = &hw->sbq;
675                 break;
676         case ICE_CTL_Q_MAILBOX:
677                 cq = &hw->mailboxq;
678                 break;
679         default:
680                 return;
681         }
682
683         ice_shutdown_sq(hw, cq);
684         ice_shutdown_rq(hw, cq);
685 }
686
687 /**
688  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
689  * @hw: pointer to the hardware structure
690  *
691  * NOTE: this function does not destroy the control queue locks. The driver
692  * may call this at runtime to shutdown and later restart control queues, such
693  * as in response to a reset event.
694  */
695 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
696 {
697         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
698         /* Shutdown FW admin queue */
699         ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
700         /* Shutdown PHY Sideband */
701         if (ice_is_sbq_supported(hw))
702                 ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
703         /* Shutdown PF-VF Mailbox */
704         ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
705 }
706
707 /**
708  * ice_init_all_ctrlq - main initialization routine for all control queues
709  * @hw: pointer to the hardware structure
710  *
711  * Prior to calling this function, the driver MUST* set the following fields
712  * in the cq->structure for all control queues:
713  *     - cq->num_sq_entries
714  *     - cq->num_rq_entries
715  *     - cq->rq_buf_size
716  *     - cq->sq_buf_size
717  *
718  * NOTE: this function does not initialize the controlq locks.
719  */
720 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
721 {
722         enum ice_status status;
723         u32 retry = 0;
724
725         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
726
727         /* Init FW admin queue */
728         do {
729                 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
730                 if (status)
731                         return status;
732
733                 status = ice_init_check_adminq(hw);
734                 if (status != ICE_ERR_AQ_FW_CRITICAL)
735                         break;
736
737                 ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
738                 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
739                 ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
740         } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
741
742         if (status)
743                 return status;
744         /* sideband control queue (SBQ) interface is not supported on some
745          * devices. Initialize if supported, else fallback to the admin queue
746          * interface
747          */
748         if (ice_is_sbq_supported(hw)) {
749                 status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
750                 if (status)
751                         return status;
752         }
753         /* Init Mailbox queue */
754         return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
755 }
756
757 /**
758  * ice_init_ctrlq_locks - Initialize locks for a control queue
759  * @cq: pointer to the control queue
760  *
761  * Initializes the send and receive queue locks for a given control queue.
762  */
763 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
764 {
765         ice_init_lock(&cq->sq_lock);
766         ice_init_lock(&cq->rq_lock);
767 }
768
769 /**
770  * ice_create_all_ctrlq - main initialization routine for all control queues
771  * @hw: pointer to the hardware structure
772  *
773  * Prior to calling this function, the driver *MUST* set the following fields
774  * in the cq->structure for all control queues:
775  *     - cq->num_sq_entries
776  *     - cq->num_rq_entries
777  *     - cq->rq_buf_size
778  *     - cq->sq_buf_size
779  *
780  * This function creates all the control queue locks and then calls
781  * ice_init_all_ctrlq. It should be called once during driver load. If the
782  * driver needs to re-initialize control queues at run time it should call
783  * ice_init_all_ctrlq instead.
784  */
785 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
786 {
787         ice_init_ctrlq_locks(&hw->adminq);
788         if (ice_is_sbq_supported(hw))
789                 ice_init_ctrlq_locks(&hw->sbq);
790         ice_init_ctrlq_locks(&hw->mailboxq);
791
792         return ice_init_all_ctrlq(hw);
793 }
794
795 /**
796  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
797  * @cq: pointer to the control queue
798  *
799  * Destroys the send and receive queue locks for a given control queue.
800  */
801 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
802 {
803         ice_destroy_lock(&cq->sq_lock);
804         ice_destroy_lock(&cq->rq_lock);
805 }
806
807 /**
808  * ice_destroy_all_ctrlq - exit routine for all control queues
809  * @hw: pointer to the hardware structure
810  *
811  * This function shuts down all the control queues and then destroys the
812  * control queue locks. It should be called once during driver unload. The
813  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
814  * reinitialize control queues, such as in response to a reset event.
815  */
816 void ice_destroy_all_ctrlq(struct ice_hw *hw)
817 {
818         /* shut down all the control queues first */
819         ice_shutdown_all_ctrlq(hw);
820
821         ice_destroy_ctrlq_locks(&hw->adminq);
822         if (ice_is_sbq_supported(hw))
823                 ice_destroy_ctrlq_locks(&hw->sbq);
824         ice_destroy_ctrlq_locks(&hw->mailboxq);
825 }
826
827 /**
828  * ice_clean_sq - cleans Admin send queue (ATQ)
829  * @hw: pointer to the hardware structure
830  * @cq: pointer to the specific Control queue
831  *
832  * returns the number of free desc
833  */
834 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
835 {
836         struct ice_ctl_q_ring *sq = &cq->sq;
837         u16 ntc = sq->next_to_clean;
838         struct ice_sq_cd *details;
839         struct ice_aq_desc *desc;
840
841         desc = ICE_CTL_Q_DESC(*sq, ntc);
842         details = ICE_CTL_Q_DETAILS(*sq, ntc);
843
844         while (rd32(hw, cq->sq.head) != ntc) {
845                 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
846                 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
847                 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
848                 ntc++;
849                 if (ntc == sq->count)
850                         ntc = 0;
851                 desc = ICE_CTL_Q_DESC(*sq, ntc);
852                 details = ICE_CTL_Q_DETAILS(*sq, ntc);
853         }
854
855         sq->next_to_clean = ntc;
856
857         return ICE_CTL_Q_DESC_UNUSED(sq);
858 }
859
860 /**
861  * ice_debug_cq
862  * @hw: pointer to the hardware structure
863  * @desc: pointer to control queue descriptor
864  * @buf: pointer to command buffer
865  * @buf_len: max length of buf
866  *
867  * Dumps debug log about control command with descriptor contents.
868  */
869 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
870 {
871         struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
872         u16 datalen, flags;
873
874         if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
875                 return;
876
877         if (!desc)
878                 return;
879
880         datalen = LE16_TO_CPU(cq_desc->datalen);
881         flags = LE16_TO_CPU(cq_desc->flags);
882
883         ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
884                   LE16_TO_CPU(cq_desc->opcode), flags, datalen,
885                   LE16_TO_CPU(cq_desc->retval));
886         ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
887                   LE32_TO_CPU(cq_desc->cookie_high),
888                   LE32_TO_CPU(cq_desc->cookie_low));
889         ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
890                   LE32_TO_CPU(cq_desc->params.generic.param0),
891                   LE32_TO_CPU(cq_desc->params.generic.param1));
892         ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
893                   LE32_TO_CPU(cq_desc->params.generic.addr_high),
894                   LE32_TO_CPU(cq_desc->params.generic.addr_low));
895         /* Dump buffer iff 1) one exists and 2) is either a response indicated
896          * by the DD and/or CMP flag set or a command with the RD flag set.
897          */
898         if (buf && cq_desc->datalen != 0 &&
899             (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
900              flags & ICE_AQ_FLAG_RD)) {
901                 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
902                 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
903                                 MIN_T(u16, buf_len, datalen));
904         }
905 }
906
907 /**
908  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
909  * @hw: pointer to the HW struct
910  * @cq: pointer to the specific Control queue
911  *
912  * Returns true if the firmware has processed all descriptors on the
913  * admin send queue. Returns false if there are still requests pending.
914  */
915 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
916 {
917         /* AQ designers suggest use of head for better
918          * timing reliability than DD bit
919          */
920         return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
921 }
922
923 /**
924  * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
925  * @hw: pointer to the HW struct
926  * @cq: pointer to the specific Control queue
927  * @desc: prefilled descriptor describing the command (non DMA mem)
928  * @buf: buffer to use for indirect commands (or NULL for direct commands)
929  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
930  * @cd: pointer to command details structure
931  *
932  * This is the main send command routine for the ATQ. It runs the queue,
933  * cleans the queue, etc.
934  */
935 enum ice_status
936 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
937                        struct ice_aq_desc *desc, void *buf, u16 buf_size,
938                        struct ice_sq_cd *cd)
939 {
940         struct ice_dma_mem *dma_buf = NULL;
941         struct ice_aq_desc *desc_on_ring;
942         bool cmd_completed = false;
943         enum ice_status status = ICE_SUCCESS;
944         struct ice_sq_cd *details;
945         u32 total_delay = 0;
946         u16 retval = 0;
947         u32 val = 0;
948
949         /* if reset is in progress return a soft error */
950         if (hw->reset_ongoing)
951                 return ICE_ERR_RESET_ONGOING;
952
953         cq->sq_last_status = ICE_AQ_RC_OK;
954
955         if (!cq->sq.count) {
956                 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
957                 status = ICE_ERR_AQ_EMPTY;
958                 goto sq_send_command_error;
959         }
960
961         if ((buf && !buf_size) || (!buf && buf_size)) {
962                 status = ICE_ERR_PARAM;
963                 goto sq_send_command_error;
964         }
965
966         if (buf) {
967                 if (buf_size > cq->sq_buf_size) {
968                         ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
969                                   buf_size);
970                         status = ICE_ERR_INVAL_SIZE;
971                         goto sq_send_command_error;
972                 }
973
974                 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
975                 if (buf_size > ICE_AQ_LG_BUF)
976                         desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
977         }
978
979         val = rd32(hw, cq->sq.head);
980         if (val >= cq->num_sq_entries) {
981                 ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
982                           val);
983                 status = ICE_ERR_AQ_EMPTY;
984                 goto sq_send_command_error;
985         }
986
987         details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
988         if (cd)
989                 *details = *cd;
990         else
991                 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
992
993         /* Call clean and check queue available function to reclaim the
994          * descriptors that were processed by FW/MBX; the function returns the
995          * number of desc available. The clean function called here could be
996          * called in a separate thread in case of asynchronous completions.
997          */
998         if (ice_clean_sq(hw, cq) == 0) {
999                 ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1000                 status = ICE_ERR_AQ_FULL;
1001                 goto sq_send_command_error;
1002         }
1003
1004         /* initialize the temp desc pointer with the right desc */
1005         desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1006
1007         /* if the desc is available copy the temp desc to the right place */
1008         ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
1009                    ICE_NONDMA_TO_DMA);
1010
1011         /* if buf is not NULL assume indirect command */
1012         if (buf) {
1013                 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1014                 /* copy the user buf into the respective DMA buf */
1015                 ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
1016                 desc_on_ring->datalen = CPU_TO_LE16(buf_size);
1017
1018                 /* Update the address values in the desc with the pa value
1019                  * for respective buffer
1020                  */
1021                 desc_on_ring->params.generic.addr_high =
1022                         CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1023                 desc_on_ring->params.generic.addr_low =
1024                         CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1025         }
1026
1027         /* Debug desc and buffer */
1028         ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1029
1030         ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1031
1032         (cq->sq.next_to_use)++;
1033         if (cq->sq.next_to_use == cq->sq.count)
1034                 cq->sq.next_to_use = 0;
1035         wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1036
1037         do {
1038                 if (ice_sq_done(hw, cq))
1039                         break;
1040
1041                 ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
1042                 total_delay++;
1043         } while (total_delay < cq->sq_cmd_timeout);
1044
1045         /* if ready, copy the desc back to temp */
1046         if (ice_sq_done(hw, cq)) {
1047                 ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1048                            ICE_DMA_TO_NONDMA);
1049                 if (buf) {
1050                         /* get returned length to copy */
1051                         u16 copy_size = LE16_TO_CPU(desc->datalen);
1052
1053                         if (copy_size > buf_size) {
1054                                 ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1055                                           copy_size, buf_size);
1056                                 status = ICE_ERR_AQ_ERROR;
1057                         } else {
1058                                 ice_memcpy(buf, dma_buf->va, copy_size,
1059                                            ICE_DMA_TO_NONDMA);
1060                         }
1061                 }
1062                 retval = LE16_TO_CPU(desc->retval);
1063                 if (retval) {
1064                         ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1065                                   LE16_TO_CPU(desc->opcode),
1066                                   retval);
1067
1068                         /* strip off FW internal code */
1069                         retval &= 0xff;
1070                 }
1071                 cmd_completed = true;
1072                 if (!status && retval != ICE_AQ_RC_OK)
1073                         status = ICE_ERR_AQ_ERROR;
1074                 cq->sq_last_status = (enum ice_aq_err)retval;
1075         }
1076
1077         ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1078
1079         ice_debug_cq(hw, (void *)desc, buf, buf_size);
1080
1081         /* save writeback AQ if requested */
1082         if (details->wb_desc)
1083                 ice_memcpy(details->wb_desc, desc_on_ring,
1084                            sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1085
1086         /* update the error if time out occurred */
1087         if (!cmd_completed) {
1088                 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1089                     rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1090                         ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1091                         status = ICE_ERR_AQ_FW_CRITICAL;
1092                 } else {
1093                         ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1094                         status = ICE_ERR_AQ_TIMEOUT;
1095                 }
1096         }
1097
1098 sq_send_command_error:
1099         return status;
1100 }
1101
1102 /**
1103  * ice_sq_send_cmd - send command to Control Queue (ATQ)
1104  * @hw: pointer to the HW struct
1105  * @cq: pointer to the specific Control queue
1106  * @desc: prefilled descriptor describing the command
1107  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1108  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1109  * @cd: pointer to command details structure
1110  *
1111  * This is the main send command routine for the ATQ. It runs the queue,
1112  * cleans the queue, etc.
1113  */
1114 enum ice_status
1115 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1116                 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1117                 struct ice_sq_cd *cd)
1118 {
1119         enum ice_status status = ICE_SUCCESS;
1120
1121         /* if reset is in progress return a soft error */
1122         if (hw->reset_ongoing)
1123                 return ICE_ERR_RESET_ONGOING;
1124
1125         ice_acquire_lock(&cq->sq_lock);
1126         status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1127         ice_release_lock(&cq->sq_lock);
1128
1129         return status;
1130 }
1131
1132 /**
1133  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1134  * @desc: pointer to the temp descriptor (non DMA mem)
1135  * @opcode: the opcode can be used to decide which flags to turn off or on
1136  *
1137  * Fill the desc with default values
1138  */
1139 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1140 {
1141         /* zero out the desc */
1142         ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1143         desc->opcode = CPU_TO_LE16(opcode);
1144         desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1145 }
1146
1147 /**
1148  * ice_clean_rq_elem
1149  * @hw: pointer to the HW struct
1150  * @cq: pointer to the specific Control queue
1151  * @e: event info from the receive descriptor, includes any buffers
1152  * @pending: number of events that could be left to process
1153  *
1154  * This function cleans one Admin Receive Queue element and returns
1155  * the contents through e. It can also return how many events are
1156  * left to process through 'pending'.
1157  */
1158 enum ice_status
1159 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1160                   struct ice_rq_event_info *e, u16 *pending)
1161 {
1162         u16 ntc = cq->rq.next_to_clean;
1163         enum ice_aq_err rq_last_status;
1164         enum ice_status ret_code = ICE_SUCCESS;
1165         struct ice_aq_desc *desc;
1166         struct ice_dma_mem *bi;
1167         u16 desc_idx;
1168         u16 datalen;
1169         u16 flags;
1170         u16 ntu;
1171
1172         /* pre-clean the event info */
1173         ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1174
1175         /* take the lock before we start messing with the ring */
1176         ice_acquire_lock(&cq->rq_lock);
1177
1178         if (!cq->rq.count) {
1179                 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1180                 ret_code = ICE_ERR_AQ_EMPTY;
1181                 goto clean_rq_elem_err;
1182         }
1183
1184         /* set next_to_use to head */
1185         ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1186
1187         if (ntu == ntc) {
1188                 /* nothing to do - shouldn't need to update ring's values */
1189                 ret_code = ICE_ERR_AQ_NO_WORK;
1190                 goto clean_rq_elem_out;
1191         }
1192
1193         /* now clean the next descriptor */
1194         desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1195         desc_idx = ntc;
1196
1197         rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1198         flags = LE16_TO_CPU(desc->flags);
1199         if (flags & ICE_AQ_FLAG_ERR) {
1200                 ret_code = ICE_ERR_AQ_ERROR;
1201                 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1202                           LE16_TO_CPU(desc->opcode), rq_last_status);
1203         }
1204         ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1205         datalen = LE16_TO_CPU(desc->datalen);
1206         e->msg_len = MIN_T(u16, datalen, e->buf_len);
1207         if (e->msg_buf && e->msg_len)
1208                 ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1209                            e->msg_len, ICE_DMA_TO_NONDMA);
1210
1211         ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1212
1213         ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1214
1215         /* Restore the original datalen and buffer address in the desc,
1216          * FW updates datalen to indicate the event message size
1217          */
1218         bi = &cq->rq.r.rq_bi[ntc];
1219         ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1220
1221         desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1222         if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1223                 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1224         desc->datalen = CPU_TO_LE16(bi->size);
1225         desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1226         desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1227
1228         /* set tail = the last cleaned desc index. */
1229         wr32(hw, cq->rq.tail, ntc);
1230         /* ntc is updated to tail + 1 */
1231         ntc++;
1232         if (ntc == cq->num_rq_entries)
1233                 ntc = 0;
1234         cq->rq.next_to_clean = ntc;
1235         cq->rq.next_to_use = ntu;
1236
1237 clean_rq_elem_out:
1238         /* Set pending if needed, unlock and return */
1239         if (pending) {
1240                 /* re-read HW head to calculate actual pending messages */
1241                 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1242                 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1243         }
1244 clean_rq_elem_err:
1245         ice_release_lock(&cq->rq_lock);
1246
1247         return ret_code;
1248 }