net/ena/base: improve style and comments
[dpdk.git] / drivers / net / ena / base / ena_eth_com.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
3  * All rights reserved.
4  */
5
6 #include "ena_eth_com.h"
7
8 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
9         struct ena_com_io_cq *io_cq)
10 {
11         struct ena_eth_io_rx_cdesc_base *cdesc;
12         u16 expected_phase, head_masked;
13         u16 desc_phase;
14
15         head_masked = io_cq->head & (io_cq->q_depth - 1);
16         expected_phase = io_cq->phase;
17
18         cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19                         + (head_masked * io_cq->cdesc_entry_size_in_bytes));
20
21         desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
22                         ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
23
24         if (desc_phase != expected_phase)
25                 return NULL;
26
27         /* Make sure we read the rest of the descriptor after the phase bit
28          * has been read
29          */
30         dma_rmb();
31
32         return cdesc;
33 }
34
35 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
36 {
37         u16 tail_masked;
38         u32 offset;
39
40         tail_masked = io_sq->tail & (io_sq->q_depth - 1);
41
42         offset = tail_masked * io_sq->desc_entry_size;
43
44         return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
45 }
46
47 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
48                                                      u8 *bounce_buffer)
49 {
50         struct ena_com_llq_info *llq_info = &io_sq->llq_info;
51
52         u16 dst_tail_mask;
53         u32 dst_offset;
54
55         dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
56         dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
57
58         if (is_llq_max_tx_burst_exists(io_sq)) {
59                 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
60                         ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
61                                     "Error: trying to send more packets than tx burst allows\n");
62                         return ENA_COM_NO_SPACE;
63                 }
64
65                 io_sq->entries_in_tx_burst_left--;
66                 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
67                             "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
68                             io_sq->qid, io_sq->entries_in_tx_burst_left);
69         }
70
71         /* Make sure everything was written into the bounce buffer before
72          * writing the bounce buffer to the device
73          */
74         wmb();
75
76         /* The line is completed. Copy it to dev */
77         ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
78                                 bounce_buffer,
79                                 llq_info->desc_list_entry_size);
80
81         io_sq->tail++;
82
83         /* Switch phase bit in case of wrap around */
84         if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
85                 io_sq->phase ^= 1;
86
87         return ENA_COM_OK;
88 }
89
90 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
91                                                  u8 *header_src,
92                                                  u16 header_len)
93 {
94         struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
95         struct ena_com_llq_info *llq_info = &io_sq->llq_info;
96         u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
97         u16 header_offset;
98
99         if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
100                 return 0;
101
102         header_offset =
103                 llq_info->descs_num_before_header * io_sq->desc_entry_size;
104
105         if (unlikely((header_offset + header_len) >  llq_info->desc_list_entry_size)) {
106                 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
107                             "Trying to write header larger than llq entry can accommodate\n");
108                 return ENA_COM_FAULT;
109         }
110
111         if (unlikely(!bounce_buffer)) {
112                 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
113                             "Bounce buffer is NULL\n");
114                 return ENA_COM_FAULT;
115         }
116
117         memcpy(bounce_buffer + header_offset, header_src, header_len);
118
119         return 0;
120 }
121
122 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
123 {
124         struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
125         u8 *bounce_buffer;
126         void *sq_desc;
127
128         bounce_buffer = pkt_ctrl->curr_bounce_buf;
129
130         if (unlikely(!bounce_buffer)) {
131                 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
132                             "Bounce buffer is NULL\n");
133                 return NULL;
134         }
135
136         sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
137         pkt_ctrl->idx++;
138         pkt_ctrl->descs_left_in_line--;
139
140         return sq_desc;
141 }
142
143 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
144 {
145         struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
146         struct ena_com_llq_info *llq_info = &io_sq->llq_info;
147         int rc;
148
149         if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
150                 return ENA_COM_OK;
151
152         /* bounce buffer was used, so write it and get a new one */
153         if (pkt_ctrl->idx) {
154                 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
155                                                         pkt_ctrl->curr_bounce_buf);
156                 if (unlikely(rc)) {
157                         ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
158                                     "Failed to write bounce buffer to device\n");
159                         return rc;
160                 }
161
162                 pkt_ctrl->curr_bounce_buf =
163                         ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
164                 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
165                        0x0, llq_info->desc_list_entry_size);
166         }
167
168         pkt_ctrl->idx = 0;
169         pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
170         return ENA_COM_OK;
171 }
172
173 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
174 {
175         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
176                 return get_sq_desc_llq(io_sq);
177
178         return get_sq_desc_regular_queue(io_sq);
179 }
180
181 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
182 {
183         struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
184         struct ena_com_llq_info *llq_info = &io_sq->llq_info;
185         int rc;
186
187         if (!pkt_ctrl->descs_left_in_line) {
188                 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
189                                                         pkt_ctrl->curr_bounce_buf);
190                 if (unlikely(rc)) {
191                         ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
192                                     "Failed to write bounce buffer to device\n");
193                         return rc;
194                 }
195
196                 pkt_ctrl->curr_bounce_buf =
197                         ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
198                 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
199                        0x0, llq_info->desc_list_entry_size);
200
201                 pkt_ctrl->idx = 0;
202                 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
203                         pkt_ctrl->descs_left_in_line = 1;
204                 else
205                         pkt_ctrl->descs_left_in_line =
206                         llq_info->desc_list_entry_size / io_sq->desc_entry_size;
207         }
208
209         return ENA_COM_OK;
210 }
211
212 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
213 {
214         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
215                 return ena_com_sq_update_llq_tail(io_sq);
216
217         io_sq->tail++;
218
219         /* Switch phase bit in case of wrap around */
220         if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
221                 io_sq->phase ^= 1;
222
223         return ENA_COM_OK;
224 }
225
226 static struct ena_eth_io_rx_cdesc_base *
227         ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
228 {
229         idx &= (io_cq->q_depth - 1);
230         return (struct ena_eth_io_rx_cdesc_base *)
231                 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
232                 idx * io_cq->cdesc_entry_size_in_bytes);
233 }
234
235 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
236                                            u16 *first_cdesc_idx)
237 {
238         struct ena_eth_io_rx_cdesc_base *cdesc;
239         u16 count = 0, head_masked;
240         u32 last = 0;
241
242         do {
243                 cdesc = ena_com_get_next_rx_cdesc(io_cq);
244                 if (!cdesc)
245                         break;
246
247                 ena_com_cq_inc_head(io_cq);
248                 count++;
249                 last = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
250                         ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
251         } while (!last);
252
253         if (last) {
254                 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
255                 count += io_cq->cur_rx_pkt_cdesc_count;
256
257                 head_masked = io_cq->head & (io_cq->q_depth - 1);
258
259                 io_cq->cur_rx_pkt_cdesc_count = 0;
260                 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
261
262                 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
263                             "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
264                             io_cq->qid, *first_cdesc_idx, count);
265         } else {
266                 io_cq->cur_rx_pkt_cdesc_count += count;
267                 count = 0;
268         }
269
270         return count;
271 }
272
273 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
274                                struct ena_com_tx_meta *ena_meta)
275 {
276         struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
277
278         meta_desc = get_sq_desc(io_sq);
279         if (unlikely(!meta_desc))
280                 return ENA_COM_FAULT;
281
282         memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
283
284         meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
285
286         meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
287
288         /* bits 0-9 of the mss */
289         meta_desc->word2 |= ((u32)ena_meta->mss <<
290                 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
291                 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
292         /* bits 10-13 of the mss */
293         meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
294                 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
295                 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
296
297         /* Extended meta desc */
298         meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
299         meta_desc->len_ctrl |= ((u32)io_sq->phase <<
300                 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
301                 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
302
303         meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
304         meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
305
306         meta_desc->word2 |= ena_meta->l3_hdr_len &
307                 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
308         meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
309                 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
310                 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
311
312         meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
313                 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
314                 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
315
316         return ena_com_sq_update_tail(io_sq);
317 }
318
319 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
320                                                  struct ena_com_tx_ctx *ena_tx_ctx,
321                                                  bool *have_meta)
322 {
323         struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
324
325         /* When disable meta caching is set, don't bother to save the meta and
326          * compare it to the stored version, just create the meta
327          */
328         if (io_sq->disable_meta_caching) {
329                 if (unlikely(!ena_tx_ctx->meta_valid))
330                         return ENA_COM_INVAL;
331
332                 *have_meta = true;
333                 return ena_com_create_meta(io_sq, ena_meta);
334         }
335
336         if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
337                 *have_meta = true;
338                 /* Cache the meta desc */
339                 memcpy(&io_sq->cached_tx_meta, ena_meta,
340                        sizeof(struct ena_com_tx_meta));
341                 return ena_com_create_meta(io_sq, ena_meta);
342         }
343
344         *have_meta = false;
345         return ENA_COM_OK;
346 }
347
348 static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
349                                  struct ena_com_rx_ctx *ena_rx_ctx,
350                                  struct ena_eth_io_rx_cdesc_base *cdesc)
351 {
352         ena_rx_ctx->l3_proto = cdesc->status &
353                 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
354         ena_rx_ctx->l4_proto =
355                 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
356                 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
357         ena_rx_ctx->l3_csum_err =
358                 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
359                 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
360         ena_rx_ctx->l4_csum_err =
361                 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
362                 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
363         ena_rx_ctx->l4_csum_checked =
364                 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
365                 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
366         ena_rx_ctx->hash = cdesc->hash;
367         ena_rx_ctx->frag =
368                 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
369                 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
370
371         ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
372                     "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
373                     ena_rx_ctx->l3_proto,
374                     ena_rx_ctx->l4_proto,
375                     ena_rx_ctx->l3_csum_err,
376                     ena_rx_ctx->l4_csum_err,
377                     ena_rx_ctx->hash,
378                     ena_rx_ctx->frag,
379                     cdesc->status);
380 }
381
382 /*****************************************************************************/
383 /*****************************     API      **********************************/
384 /*****************************************************************************/
385
386 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
387                        struct ena_com_tx_ctx *ena_tx_ctx,
388                        int *nb_hw_desc)
389 {
390         struct ena_eth_io_tx_desc *desc = NULL;
391         struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
392         void *buffer_to_push = ena_tx_ctx->push_header;
393         u16 header_len = ena_tx_ctx->header_len;
394         u16 num_bufs = ena_tx_ctx->num_bufs;
395         u16 start_tail = io_sq->tail;
396         int i, rc;
397         bool have_meta;
398         u64 addr_hi;
399
400         ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
401                  ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
402
403         /* num_bufs +1 for potential meta desc */
404         if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
405                 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
406                             "Not enough space in the tx queue\n");
407                 return ENA_COM_NO_MEM;
408         }
409
410         if (unlikely(header_len > io_sq->tx_max_header_size)) {
411                 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
412                             "Header size is too large %d max header: %d\n",
413                             header_len, io_sq->tx_max_header_size);
414                 return ENA_COM_INVAL;
415         }
416
417         if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
418                      && !buffer_to_push)) {
419                 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
420                             "Push header wasn't provided on LLQ mode\n");
421                 return ENA_COM_INVAL;
422         }
423
424         rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
425         if (unlikely(rc))
426                 return rc;
427
428         rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
429         if (unlikely(rc)) {
430                 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
431                             "Failed to create and store tx meta desc\n");
432                 return rc;
433         }
434
435         /* If the caller doesn't want to send packets */
436         if (unlikely(!num_bufs && !header_len)) {
437                 rc = ena_com_close_bounce_buffer(io_sq);
438                 if (rc)
439                         ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
440                                     "Failed to write buffers to LLQ\n");
441                 *nb_hw_desc = io_sq->tail - start_tail;
442                 return rc;
443         }
444
445         desc = get_sq_desc(io_sq);
446         if (unlikely(!desc))
447                 return ENA_COM_FAULT;
448         memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
449
450         /* Set first desc when we don't have meta descriptor */
451         if (!have_meta)
452                 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
453
454         desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
455                 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
456                 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
457         desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
458                 ENA_ETH_IO_TX_DESC_PHASE_MASK;
459
460         desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
461
462         /* Bits 0-9 */
463         desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
464                 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
465                 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
466
467         desc->meta_ctrl |= (ena_tx_ctx->df <<
468                 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
469                 ENA_ETH_IO_TX_DESC_DF_MASK;
470
471         /* Bits 10-15 */
472         desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
473                 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
474                 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
475
476         if (ena_tx_ctx->meta_valid) {
477                 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
478                         ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
479                         ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
480                 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
481                         ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
482                 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
483                         ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
484                         ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
485                 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
486                         ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
487                         ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
488                 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
489                         ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
490                         ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
491                 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
492                         ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
493                         ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
494         }
495
496         for (i = 0; i < num_bufs; i++) {
497                 /* The first desc share the same desc as the header */
498                 if (likely(i != 0)) {
499                         rc = ena_com_sq_update_tail(io_sq);
500                         if (unlikely(rc)) {
501                                 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
502                                             "Failed to update sq tail\n");
503                                 return rc;
504                         }
505
506                         desc = get_sq_desc(io_sq);
507                         if (unlikely(!desc))
508                                 return ENA_COM_FAULT;
509
510                         memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
511
512                         desc->len_ctrl |= ((u32)io_sq->phase <<
513                                 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
514                                 ENA_ETH_IO_TX_DESC_PHASE_MASK;
515                 }
516
517                 desc->len_ctrl |= ena_bufs->len &
518                         ENA_ETH_IO_TX_DESC_LENGTH_MASK;
519
520                 addr_hi = ((ena_bufs->paddr &
521                         GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
522
523                 desc->buff_addr_lo = (u32)ena_bufs->paddr;
524                 desc->buff_addr_hi_hdr_sz |= addr_hi &
525                         ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
526                 ena_bufs++;
527         }
528
529         /* set the last desc indicator */
530         desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
531
532         rc = ena_com_sq_update_tail(io_sq);
533         if (unlikely(rc)) {
534                 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
535                             "Failed to update sq tail of the last descriptor\n");
536                 return rc;
537         }
538
539         rc = ena_com_close_bounce_buffer(io_sq);
540         if (rc)
541                 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
542                             "Failed when closing bounce buffer\n");
543
544         *nb_hw_desc = io_sq->tail - start_tail;
545         return rc;
546 }
547
548 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
549                    struct ena_com_io_sq *io_sq,
550                    struct ena_com_rx_ctx *ena_rx_ctx)
551 {
552         struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
553         struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
554         u16 q_depth = io_cq->q_depth;
555         u16 cdesc_idx = 0;
556         u16 nb_hw_desc;
557         u16 i = 0;
558
559         ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
560                  ena_com_io_cq_to_ena_dev(io_cq), "wrong Q type");
561
562         nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
563         if (nb_hw_desc == 0) {
564                 ena_rx_ctx->descs = nb_hw_desc;
565                 return 0;
566         }
567
568         ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
569                     "Fetch rx packet: queue %d completed desc: %d\n",
570                     io_cq->qid, nb_hw_desc);
571
572         if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
573                 ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
574                             "Too many RX cdescs (%d) > MAX(%d)\n",
575                             nb_hw_desc, ena_rx_ctx->max_bufs);
576                 return ENA_COM_NO_SPACE;
577         }
578
579         cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
580         ena_rx_ctx->pkt_offset = cdesc->offset;
581
582         do {
583                 ena_buf[i].len = cdesc->length;
584                 ena_buf[i].req_id = cdesc->req_id;
585                 if (unlikely(ena_buf[i].req_id >= q_depth))
586                         return ENA_COM_EIO;
587
588                 if (++i >= nb_hw_desc)
589                         break;
590
591                 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
592
593         } while (1);
594
595         /* Update SQ head ptr */
596         io_sq->next_to_comp += nb_hw_desc;
597
598         ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
599                     "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
600                     io_sq->qid, io_sq->next_to_comp);
601
602         /* Get rx flags from the last pkt */
603         ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
604
605         ena_rx_ctx->descs = nb_hw_desc;
606
607         return 0;
608 }
609
610 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
611                                struct ena_com_buf *ena_buf,
612                                u16 req_id)
613 {
614         struct ena_eth_io_rx_desc *desc;
615
616         ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
617                  ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
618
619         if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
620                 return ENA_COM_NO_SPACE;
621
622         desc = get_sq_desc(io_sq);
623         if (unlikely(!desc))
624                 return ENA_COM_FAULT;
625
626         memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
627
628         desc->length = ena_buf->len;
629
630         desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
631                      ENA_ETH_IO_RX_DESC_LAST_MASK |
632                      ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
633                      (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
634
635         desc->req_id = req_id;
636
637         ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
638                     "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
639                     __func__, io_sq->qid, req_id);
640
641         desc->buff_addr_lo = (u32)ena_buf->paddr;
642         desc->buff_addr_hi =
643                 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
644
645         return ena_com_sq_update_tail(io_sq);
646 }
647
648 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
649 {
650         struct ena_eth_io_rx_cdesc_base *cdesc;
651
652         cdesc = ena_com_get_next_rx_cdesc(io_cq);
653         if (cdesc)
654                 return false;
655         else
656                 return true;
657 }