net/ena/base: add accelerated LLQ mode
[dpdk.git] / drivers / net / ena / base / ena_eth_com.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
3  * All rights reserved.
4  */
5
6 #include "ena_eth_com.h"
7
8 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
9         struct ena_com_io_cq *io_cq)
10 {
11         struct ena_eth_io_rx_cdesc_base *cdesc;
12         u16 expected_phase, head_masked;
13         u16 desc_phase;
14
15         head_masked = io_cq->head & (io_cq->q_depth - 1);
16         expected_phase = io_cq->phase;
17
18         cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19                         + (head_masked * io_cq->cdesc_entry_size_in_bytes));
20
21         desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
22                         ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
23
24         if (desc_phase != expected_phase)
25                 return NULL;
26
27         /* Make sure we read the rest of the descriptor after the phase bit
28          * has been read
29          */
30         dma_rmb();
31
32         return cdesc;
33 }
34
35 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
36 {
37         u16 tail_masked;
38         u32 offset;
39
40         tail_masked = io_sq->tail & (io_sq->q_depth - 1);
41
42         offset = tail_masked * io_sq->desc_entry_size;
43
44         return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
45 }
46
47 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
48                                                      u8 *bounce_buffer)
49 {
50         struct ena_com_llq_info *llq_info = &io_sq->llq_info;
51
52         u16 dst_tail_mask;
53         u32 dst_offset;
54
55         dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
56         dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
57
58         if (is_llq_max_tx_burst_exists(io_sq)) {
59                 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
60                         ena_trc_err("Error: trying to send more packets than tx burst allows\n");
61                         return ENA_COM_NO_SPACE;
62                 }
63
64                 io_sq->entries_in_tx_burst_left--;
65                 ena_trc_dbg("decreasing entries_in_tx_burst_left of queue %d to %d\n",
66                             io_sq->qid, io_sq->entries_in_tx_burst_left);
67         }
68
69         /* Make sure everything was written into the bounce buffer before
70          * writing the bounce buffer to the device
71          */
72         wmb();
73
74         /* The line is completed. Copy it to dev */
75         ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
76                                 bounce_buffer,
77                                 llq_info->desc_list_entry_size);
78
79         io_sq->tail++;
80
81         /* Switch phase bit in case of wrap around */
82         if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
83                 io_sq->phase ^= 1;
84
85         return ENA_COM_OK;
86 }
87
88 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
89                                                  u8 *header_src,
90                                                  u16 header_len)
91 {
92         struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
93         struct ena_com_llq_info *llq_info = &io_sq->llq_info;
94         u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
95         u16 header_offset;
96
97         if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
98                 return 0;
99
100         header_offset =
101                 llq_info->descs_num_before_header * io_sq->desc_entry_size;
102
103         if (unlikely((header_offset + header_len) >  llq_info->desc_list_entry_size)) {
104                 ena_trc_err("trying to write header larger than llq entry can accommodate\n");
105                 return ENA_COM_FAULT;
106         }
107
108         if (unlikely(!bounce_buffer)) {
109                 ena_trc_err("bounce buffer is NULL\n");
110                 return ENA_COM_FAULT;
111         }
112
113         memcpy(bounce_buffer + header_offset, header_src, header_len);
114
115         return 0;
116 }
117
118 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
119 {
120         struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
121         u8 *bounce_buffer;
122         void *sq_desc;
123
124         bounce_buffer = pkt_ctrl->curr_bounce_buf;
125
126         if (unlikely(!bounce_buffer)) {
127                 ena_trc_err("bounce buffer is NULL\n");
128                 return NULL;
129         }
130
131         sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
132         pkt_ctrl->idx++;
133         pkt_ctrl->descs_left_in_line--;
134
135         return sq_desc;
136 }
137
138 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
139 {
140         struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
141         struct ena_com_llq_info *llq_info = &io_sq->llq_info;
142         int rc;
143
144         if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
145                 return ENA_COM_OK;
146
147         /* bounce buffer was used, so write it and get a new one */
148         if (pkt_ctrl->idx) {
149                 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
150                                                         pkt_ctrl->curr_bounce_buf);
151                 if (unlikely(rc))
152                         return rc;
153
154                 pkt_ctrl->curr_bounce_buf =
155                         ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
156                 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
157                        0x0, llq_info->desc_list_entry_size);
158         }
159
160         pkt_ctrl->idx = 0;
161         pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
162         return ENA_COM_OK;
163 }
164
165 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
166 {
167         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
168                 return get_sq_desc_llq(io_sq);
169
170         return get_sq_desc_regular_queue(io_sq);
171 }
172
173 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
174 {
175         struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
176         struct ena_com_llq_info *llq_info = &io_sq->llq_info;
177         int rc;
178
179         if (!pkt_ctrl->descs_left_in_line) {
180                 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
181                                                         pkt_ctrl->curr_bounce_buf);
182                 if (unlikely(rc))
183                         return rc;
184
185                 pkt_ctrl->curr_bounce_buf =
186                         ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
187                         memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
188                                0x0, llq_info->desc_list_entry_size);
189
190                 pkt_ctrl->idx = 0;
191                 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
192                         pkt_ctrl->descs_left_in_line = 1;
193                 else
194                         pkt_ctrl->descs_left_in_line =
195                         llq_info->desc_list_entry_size / io_sq->desc_entry_size;
196         }
197
198         return ENA_COM_OK;
199 }
200
201 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
202 {
203         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
204                 return ena_com_sq_update_llq_tail(io_sq);
205
206         io_sq->tail++;
207
208         /* Switch phase bit in case of wrap around */
209         if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
210                 io_sq->phase ^= 1;
211
212         return ENA_COM_OK;
213 }
214
215 static struct ena_eth_io_rx_cdesc_base *
216         ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
217 {
218         idx &= (io_cq->q_depth - 1);
219         return (struct ena_eth_io_rx_cdesc_base *)
220                 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
221                 idx * io_cq->cdesc_entry_size_in_bytes);
222 }
223
224 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
225                                            u16 *first_cdesc_idx)
226 {
227         struct ena_eth_io_rx_cdesc_base *cdesc;
228         u16 count = 0, head_masked;
229         u32 last = 0;
230
231         do {
232                 cdesc = ena_com_get_next_rx_cdesc(io_cq);
233                 if (!cdesc)
234                         break;
235
236                 ena_com_cq_inc_head(io_cq);
237                 count++;
238                 last = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
239                         ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
240         } while (!last);
241
242         if (last) {
243                 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
244                 count += io_cq->cur_rx_pkt_cdesc_count;
245
246                 head_masked = io_cq->head & (io_cq->q_depth - 1);
247
248                 io_cq->cur_rx_pkt_cdesc_count = 0;
249                 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
250
251                 ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
252                             io_cq->qid, *first_cdesc_idx, count);
253         } else {
254                 io_cq->cur_rx_pkt_cdesc_count += count;
255                 count = 0;
256         }
257
258         return count;
259 }
260
261 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
262                                struct ena_com_tx_meta *ena_meta)
263 {
264         struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
265
266         meta_desc = get_sq_desc(io_sq);
267         memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
268
269         meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
270
271         meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
272
273         /* bits 0-9 of the mss */
274         meta_desc->word2 |= (ena_meta->mss <<
275                 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
276                 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
277         /* bits 10-13 of the mss */
278         meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
279                 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
280                 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
281
282         /* Extended meta desc */
283         meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
284         meta_desc->len_ctrl |= (io_sq->phase <<
285                 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
286                 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
287
288         meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
289         meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
290
291         meta_desc->word2 |= ena_meta->l3_hdr_len &
292                 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
293         meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
294                 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
295                 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
296
297         meta_desc->word2 |= (ena_meta->l4_hdr_len <<
298                 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
299                 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
300
301         return ena_com_sq_update_tail(io_sq);
302 }
303
304 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
305                                                  struct ena_com_tx_ctx *ena_tx_ctx,
306                                                  bool *have_meta)
307 {
308         struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
309
310         /* When disable meta caching is set, don't bother to save the meta and
311          * compare it to the stored version, just create the meta
312          */
313         if (io_sq->disable_meta_caching) {
314                 if (unlikely(!ena_tx_ctx->meta_valid))
315                         return ENA_COM_INVAL;
316
317                 *have_meta = true;
318                 return ena_com_create_meta(io_sq, ena_meta);
319         } else if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
320                 *have_meta = true;
321                 /* Cache the meta desc */
322                 memcpy(&io_sq->cached_tx_meta, ena_meta,
323                        sizeof(struct ena_com_tx_meta));
324                 return ena_com_create_meta(io_sq, ena_meta);
325         } else {
326                 *have_meta = false;
327                 return ENA_COM_OK;
328         }
329 }
330
331 static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
332                                         struct ena_eth_io_rx_cdesc_base *cdesc)
333 {
334         ena_rx_ctx->l3_proto = cdesc->status &
335                 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
336         ena_rx_ctx->l4_proto =
337                 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
338                 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
339         ena_rx_ctx->l3_csum_err =
340                 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
341                 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
342         ena_rx_ctx->l4_csum_err =
343                 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
344                 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
345         ena_rx_ctx->l4_csum_checked =
346                 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
347                 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
348         ena_rx_ctx->hash = cdesc->hash;
349         ena_rx_ctx->frag =
350                 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
351                 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
352
353         ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
354                     ena_rx_ctx->l3_proto,
355                     ena_rx_ctx->l4_proto,
356                     ena_rx_ctx->l3_csum_err,
357                     ena_rx_ctx->l4_csum_err,
358                     ena_rx_ctx->hash,
359                     ena_rx_ctx->frag,
360                     cdesc->status);
361 }
362
363 /*****************************************************************************/
364 /*****************************     API      **********************************/
365 /*****************************************************************************/
366
367 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
368                        struct ena_com_tx_ctx *ena_tx_ctx,
369                        int *nb_hw_desc)
370 {
371         struct ena_eth_io_tx_desc *desc = NULL;
372         struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
373         void *buffer_to_push = ena_tx_ctx->push_header;
374         u16 header_len = ena_tx_ctx->header_len;
375         u16 num_bufs = ena_tx_ctx->num_bufs;
376         u16 start_tail = io_sq->tail;
377         int i, rc;
378         bool have_meta;
379         u64 addr_hi;
380
381         ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
382                  "wrong Q type");
383
384         /* num_bufs +1 for potential meta desc */
385         if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
386                 ena_trc_dbg("Not enough space in the tx queue\n");
387                 return ENA_COM_NO_MEM;
388         }
389
390         if (unlikely(header_len > io_sq->tx_max_header_size)) {
391                 ena_trc_err("header size is too large %d max header: %d\n",
392                             header_len, io_sq->tx_max_header_size);
393                 return ENA_COM_INVAL;
394         }
395
396         if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
397                      && !buffer_to_push))
398                 return ENA_COM_INVAL;
399
400         rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
401         if (unlikely(rc))
402                 return rc;
403
404         rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
405         if (unlikely(rc)) {
406                 ena_trc_err("failed to create and store tx meta desc\n");
407                 return rc;
408         }
409
410         /* If the caller doesn't want to send packets */
411         if (unlikely(!num_bufs && !header_len)) {
412                 rc = ena_com_close_bounce_buffer(io_sq);
413                 *nb_hw_desc = io_sq->tail - start_tail;
414                 return rc;
415         }
416
417         desc = get_sq_desc(io_sq);
418         if (unlikely(!desc))
419                 return ENA_COM_FAULT;
420         memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
421
422         /* Set first desc when we don't have meta descriptor */
423         if (!have_meta)
424                 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
425
426         desc->buff_addr_hi_hdr_sz |= (header_len <<
427                 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
428                 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
429         desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
430                 ENA_ETH_IO_TX_DESC_PHASE_MASK;
431
432         desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
433
434         /* Bits 0-9 */
435         desc->meta_ctrl |= (ena_tx_ctx->req_id <<
436                 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
437                 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
438
439         desc->meta_ctrl |= (ena_tx_ctx->df <<
440                 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
441                 ENA_ETH_IO_TX_DESC_DF_MASK;
442
443         /* Bits 10-15 */
444         desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
445                 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
446                 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
447
448         if (ena_tx_ctx->meta_valid) {
449                 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
450                         ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
451                         ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
452                 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
453                         ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
454                 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
455                         ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
456                         ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
457                 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
458                         ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
459                         ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
460                 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
461                         ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
462                         ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
463                 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
464                         ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
465                         ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
466         }
467
468         for (i = 0; i < num_bufs; i++) {
469                 /* The first desc share the same desc as the header */
470                 if (likely(i != 0)) {
471                         rc = ena_com_sq_update_tail(io_sq);
472                         if (unlikely(rc))
473                                 return rc;
474
475                         desc = get_sq_desc(io_sq);
476                         if (unlikely(!desc))
477                                 return ENA_COM_FAULT;
478
479                         memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
480
481                         desc->len_ctrl |= (io_sq->phase <<
482                                 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
483                                 ENA_ETH_IO_TX_DESC_PHASE_MASK;
484                 }
485
486                 desc->len_ctrl |= ena_bufs->len &
487                         ENA_ETH_IO_TX_DESC_LENGTH_MASK;
488
489                 addr_hi = ((ena_bufs->paddr &
490                         GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
491
492                 desc->buff_addr_lo = (u32)ena_bufs->paddr;
493                 desc->buff_addr_hi_hdr_sz |= addr_hi &
494                         ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
495                 ena_bufs++;
496         }
497
498         /* set the last desc indicator */
499         desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
500
501         rc = ena_com_sq_update_tail(io_sq);
502         if (unlikely(rc))
503                 return rc;
504
505         rc = ena_com_close_bounce_buffer(io_sq);
506
507         *nb_hw_desc = io_sq->tail - start_tail;
508         return rc;
509 }
510
511 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
512                    struct ena_com_io_sq *io_sq,
513                    struct ena_com_rx_ctx *ena_rx_ctx)
514 {
515         struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
516         struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
517         u16 cdesc_idx = 0;
518         u16 nb_hw_desc;
519         u16 i = 0;
520
521         ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
522                  "wrong Q type");
523
524         nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
525         if (nb_hw_desc == 0) {
526                 ena_rx_ctx->descs = nb_hw_desc;
527                 return 0;
528         }
529
530         ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
531                     io_cq->qid, nb_hw_desc);
532
533         if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
534                 ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
535                             nb_hw_desc, ena_rx_ctx->max_bufs);
536                 return ENA_COM_NO_SPACE;
537         }
538
539         cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
540         ena_rx_ctx->pkt_offset = cdesc->offset;
541
542         do {
543                 ena_buf->len = cdesc->length;
544                 ena_buf->req_id = cdesc->req_id;
545                 ena_buf++;
546         } while ((++i < nb_hw_desc) && (cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i)));
547
548         /* Update SQ head ptr */
549         io_sq->next_to_comp += nb_hw_desc;
550
551         ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
552                     io_sq->qid, io_sq->next_to_comp);
553
554         /* Get rx flags from the last pkt */
555         ena_com_rx_set_flags(ena_rx_ctx, cdesc);
556
557         ena_rx_ctx->descs = nb_hw_desc;
558         return 0;
559 }
560
561 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
562                                struct ena_com_buf *ena_buf,
563                                u16 req_id)
564 {
565         struct ena_eth_io_rx_desc *desc;
566
567         ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
568                  "wrong Q type");
569
570         if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
571                 return ENA_COM_NO_SPACE;
572
573         desc = get_sq_desc(io_sq);
574         if (unlikely(!desc))
575                 return ENA_COM_FAULT;
576
577         memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
578
579         desc->length = ena_buf->len;
580
581         desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
582                 ENA_ETH_IO_RX_DESC_LAST_MASK |
583                 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
584                 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
585
586         desc->req_id = req_id;
587
588         desc->buff_addr_lo = (u32)ena_buf->paddr;
589         desc->buff_addr_hi =
590                 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
591
592         return ena_com_sq_update_tail(io_sq);
593 }
594
595 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
596 {
597         struct ena_eth_io_rx_cdesc_base *cdesc;
598
599         cdesc = ena_com_get_next_rx_cdesc(io_cq);
600         if (cdesc)
601                 return false;
602         else
603                 return true;
604 }