1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
6 #include "ena_eth_com.h"
8 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
9 struct ena_com_io_cq *io_cq)
11 struct ena_eth_io_rx_cdesc_base *cdesc;
12 u16 expected_phase, head_masked;
15 head_masked = io_cq->head & (io_cq->q_depth - 1);
16 expected_phase = io_cq->phase;
18 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
21 desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
22 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
24 if (desc_phase != expected_phase)
27 /* Make sure we read the rest of the descriptor after the phase bit
35 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
40 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
42 offset = tail_masked * io_sq->desc_entry_size;
44 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
47 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
50 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
55 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
56 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
58 if (is_llq_max_tx_burst_exists(io_sq)) {
59 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
60 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
61 "Error: trying to send more packets than tx burst allows\n");
62 return ENA_COM_NO_SPACE;
65 io_sq->entries_in_tx_burst_left--;
66 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
67 "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
68 io_sq->qid, io_sq->entries_in_tx_burst_left);
71 /* Make sure everything was written into the bounce buffer before
72 * writing the bounce buffer to the device
76 /* The line is completed. Copy it to dev */
77 ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
79 llq_info->desc_list_entry_size);
83 /* Switch phase bit in case of wrap around */
84 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
90 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
94 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
95 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
96 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
99 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
103 llq_info->descs_num_before_header * io_sq->desc_entry_size;
105 if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
106 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
107 "Trying to write header larger than llq entry can accommodate\n");
108 return ENA_COM_FAULT;
111 if (unlikely(!bounce_buffer)) {
112 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
113 "Bounce buffer is NULL\n");
114 return ENA_COM_FAULT;
117 memcpy(bounce_buffer + header_offset, header_src, header_len);
122 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
124 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
128 bounce_buffer = pkt_ctrl->curr_bounce_buf;
130 if (unlikely(!bounce_buffer)) {
131 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
132 "Bounce buffer is NULL\n");
136 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
138 pkt_ctrl->descs_left_in_line--;
143 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
145 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
146 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
149 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
152 /* bounce buffer was used, so write it and get a new one */
154 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
155 pkt_ctrl->curr_bounce_buf);
157 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
158 "Failed to write bounce buffer to device\n");
162 pkt_ctrl->curr_bounce_buf =
163 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
164 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
165 0x0, llq_info->desc_list_entry_size);
169 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
173 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
175 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
176 return get_sq_desc_llq(io_sq);
178 return get_sq_desc_regular_queue(io_sq);
181 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
183 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
184 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
187 if (!pkt_ctrl->descs_left_in_line) {
188 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
189 pkt_ctrl->curr_bounce_buf);
191 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
192 "Failed to write bounce buffer to device\n");
196 pkt_ctrl->curr_bounce_buf =
197 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
198 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
199 0x0, llq_info->desc_list_entry_size);
202 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
203 pkt_ctrl->descs_left_in_line = 1;
205 pkt_ctrl->descs_left_in_line =
206 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
212 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
214 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
215 return ena_com_sq_update_llq_tail(io_sq);
219 /* Switch phase bit in case of wrap around */
220 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
226 static struct ena_eth_io_rx_cdesc_base *
227 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
229 idx &= (io_cq->q_depth - 1);
230 return (struct ena_eth_io_rx_cdesc_base *)
231 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
232 idx * io_cq->cdesc_entry_size_in_bytes);
235 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
236 u16 *first_cdesc_idx)
238 struct ena_eth_io_rx_cdesc_base *cdesc;
239 u16 count = 0, head_masked;
243 cdesc = ena_com_get_next_rx_cdesc(io_cq);
247 ena_com_cq_inc_head(io_cq);
249 last = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
250 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
254 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
255 count += io_cq->cur_rx_pkt_cdesc_count;
257 head_masked = io_cq->head & (io_cq->q_depth - 1);
259 io_cq->cur_rx_pkt_cdesc_count = 0;
260 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
262 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
263 "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
264 io_cq->qid, *first_cdesc_idx, count);
266 io_cq->cur_rx_pkt_cdesc_count += count;
273 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
274 struct ena_com_tx_meta *ena_meta)
276 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
278 meta_desc = get_sq_desc(io_sq);
279 if (unlikely(!meta_desc))
280 return ENA_COM_FAULT;
282 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
284 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
286 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
288 /* bits 0-9 of the mss */
289 meta_desc->word2 |= ((u32)ena_meta->mss <<
290 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
291 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
292 /* bits 10-13 of the mss */
293 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
294 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
295 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
297 /* Extended meta desc */
298 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
299 meta_desc->len_ctrl |= ((u32)io_sq->phase <<
300 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
301 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
303 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
304 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
306 meta_desc->word2 |= ena_meta->l3_hdr_len &
307 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
308 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
309 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
310 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
312 meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
313 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
314 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
316 return ena_com_sq_update_tail(io_sq);
319 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
320 struct ena_com_tx_ctx *ena_tx_ctx,
323 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
325 /* When disable meta caching is set, don't bother to save the meta and
326 * compare it to the stored version, just create the meta
328 if (io_sq->disable_meta_caching) {
329 if (unlikely(!ena_tx_ctx->meta_valid))
330 return ENA_COM_INVAL;
333 return ena_com_create_meta(io_sq, ena_meta);
336 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
338 /* Cache the meta desc */
339 memcpy(&io_sq->cached_tx_meta, ena_meta,
340 sizeof(struct ena_com_tx_meta));
341 return ena_com_create_meta(io_sq, ena_meta);
348 static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
349 struct ena_com_rx_ctx *ena_rx_ctx,
350 struct ena_eth_io_rx_cdesc_base *cdesc)
352 ena_rx_ctx->l3_proto = cdesc->status &
353 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
354 ena_rx_ctx->l4_proto =
355 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
356 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
357 ena_rx_ctx->l3_csum_err =
358 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
359 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
360 ena_rx_ctx->l4_csum_err =
361 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
362 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
363 ena_rx_ctx->l4_csum_checked =
364 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
365 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
366 ena_rx_ctx->hash = cdesc->hash;
368 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
369 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
371 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
372 "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
373 ena_rx_ctx->l3_proto,
374 ena_rx_ctx->l4_proto,
375 ena_rx_ctx->l3_csum_err,
376 ena_rx_ctx->l4_csum_err,
382 /*****************************************************************************/
383 /***************************** API **********************************/
384 /*****************************************************************************/
386 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
387 struct ena_com_tx_ctx *ena_tx_ctx,
390 struct ena_eth_io_tx_desc *desc = NULL;
391 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
392 void *buffer_to_push = ena_tx_ctx->push_header;
393 u16 header_len = ena_tx_ctx->header_len;
394 u16 num_bufs = ena_tx_ctx->num_bufs;
395 u16 start_tail = io_sq->tail;
400 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
401 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
403 /* num_bufs +1 for potential meta desc */
404 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
405 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
406 "Not enough space in the tx queue\n");
407 return ENA_COM_NO_MEM;
410 if (unlikely(header_len > io_sq->tx_max_header_size)) {
411 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
412 "Header size is too large %d max header: %d\n",
413 header_len, io_sq->tx_max_header_size);
414 return ENA_COM_INVAL;
417 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
418 && !buffer_to_push)) {
419 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
420 "Push header wasn't provided on LLQ mode\n");
421 return ENA_COM_INVAL;
424 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
428 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
430 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
431 "Failed to create and store tx meta desc\n");
435 /* If the caller doesn't want to send packets */
436 if (unlikely(!num_bufs && !header_len)) {
437 rc = ena_com_close_bounce_buffer(io_sq);
439 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
440 "Failed to write buffers to LLQ\n");
441 *nb_hw_desc = io_sq->tail - start_tail;
445 desc = get_sq_desc(io_sq);
447 return ENA_COM_FAULT;
448 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
450 /* Set first desc when we don't have meta descriptor */
452 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
454 desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
455 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
456 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
457 desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
458 ENA_ETH_IO_TX_DESC_PHASE_MASK;
460 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
463 desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
464 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
465 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
467 desc->meta_ctrl |= (ena_tx_ctx->df <<
468 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
469 ENA_ETH_IO_TX_DESC_DF_MASK;
472 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
473 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
474 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
476 if (ena_tx_ctx->meta_valid) {
477 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
478 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
479 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
480 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
481 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
482 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
483 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
484 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
485 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
486 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
487 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
488 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
489 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
490 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
491 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
492 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
493 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
496 for (i = 0; i < num_bufs; i++) {
497 /* The first desc share the same desc as the header */
498 if (likely(i != 0)) {
499 rc = ena_com_sq_update_tail(io_sq);
501 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
502 "Failed to update sq tail\n");
506 desc = get_sq_desc(io_sq);
508 return ENA_COM_FAULT;
510 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
512 desc->len_ctrl |= ((u32)io_sq->phase <<
513 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
514 ENA_ETH_IO_TX_DESC_PHASE_MASK;
517 desc->len_ctrl |= ena_bufs->len &
518 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
520 addr_hi = ((ena_bufs->paddr &
521 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
523 desc->buff_addr_lo = (u32)ena_bufs->paddr;
524 desc->buff_addr_hi_hdr_sz |= addr_hi &
525 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
529 /* set the last desc indicator */
530 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
532 rc = ena_com_sq_update_tail(io_sq);
534 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
535 "Failed to update sq tail of the last descriptor\n");
539 rc = ena_com_close_bounce_buffer(io_sq);
541 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
542 "Failed when closing bounce buffer\n");
544 *nb_hw_desc = io_sq->tail - start_tail;
548 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
549 struct ena_com_io_sq *io_sq,
550 struct ena_com_rx_ctx *ena_rx_ctx)
552 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
553 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
554 u16 q_depth = io_cq->q_depth;
559 ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
560 ena_com_io_cq_to_ena_dev(io_cq), "wrong Q type");
562 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
563 if (nb_hw_desc == 0) {
564 ena_rx_ctx->descs = nb_hw_desc;
568 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
569 "Fetch rx packet: queue %d completed desc: %d\n",
570 io_cq->qid, nb_hw_desc);
572 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
573 ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
574 "Too many RX cdescs (%d) > MAX(%d)\n",
575 nb_hw_desc, ena_rx_ctx->max_bufs);
576 return ENA_COM_NO_SPACE;
579 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
580 ena_rx_ctx->pkt_offset = cdesc->offset;
583 ena_buf[i].len = cdesc->length;
584 ena_buf[i].req_id = cdesc->req_id;
585 if (unlikely(ena_buf[i].req_id >= q_depth))
588 if (++i >= nb_hw_desc)
591 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
595 /* Update SQ head ptr */
596 io_sq->next_to_comp += nb_hw_desc;
598 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
599 "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
600 io_sq->qid, io_sq->next_to_comp);
602 /* Get rx flags from the last pkt */
603 ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
605 ena_rx_ctx->descs = nb_hw_desc;
610 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
611 struct ena_com_buf *ena_buf,
614 struct ena_eth_io_rx_desc *desc;
616 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
617 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
619 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
620 return ENA_COM_NO_SPACE;
622 desc = get_sq_desc(io_sq);
624 return ENA_COM_FAULT;
626 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
628 desc->length = ena_buf->len;
630 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
631 ENA_ETH_IO_RX_DESC_LAST_MASK |
632 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
633 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
635 desc->req_id = req_id;
637 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
638 "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
639 __func__, io_sq->qid, req_id);
641 desc->buff_addr_lo = (u32)ena_buf->paddr;
643 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
645 return ena_com_sq_update_tail(io_sq);
648 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
650 struct ena_eth_io_rx_cdesc_base *cdesc;
652 cdesc = ena_com_get_next_rx_cdesc(io_cq);