1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
6 #include "ena_eth_com.h"
8 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
9 struct ena_com_io_cq *io_cq)
11 struct ena_eth_io_rx_cdesc_base *cdesc;
12 u16 expected_phase, head_masked;
15 head_masked = io_cq->head & (io_cq->q_depth - 1);
16 expected_phase = io_cq->phase;
18 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
21 desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
22 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
24 if (desc_phase != expected_phase)
27 /* Make sure we read the rest of the descriptor after the phase bit
35 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
40 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
42 offset = tail_masked * io_sq->desc_entry_size;
44 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
47 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
50 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
55 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
56 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
58 if (is_llq_max_tx_burst_exists(io_sq)) {
59 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
60 ena_trc_err("Error: trying to send more packets than tx burst allows\n");
61 return ENA_COM_NO_SPACE;
64 io_sq->entries_in_tx_burst_left--;
65 ena_trc_dbg("decreasing entries_in_tx_burst_left of queue %d to %d\n",
66 io_sq->qid, io_sq->entries_in_tx_burst_left);
69 /* Make sure everything was written into the bounce buffer before
70 * writing the bounce buffer to the device
74 /* The line is completed. Copy it to dev */
75 ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
77 llq_info->desc_list_entry_size);
81 /* Switch phase bit in case of wrap around */
82 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
88 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
92 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
93 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
94 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
97 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
101 llq_info->descs_num_before_header * io_sq->desc_entry_size;
103 if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
104 ena_trc_err("trying to write header larger than llq entry can accommodate\n");
105 return ENA_COM_FAULT;
108 if (unlikely(!bounce_buffer)) {
109 ena_trc_err("bounce buffer is NULL\n");
110 return ENA_COM_FAULT;
113 memcpy(bounce_buffer + header_offset, header_src, header_len);
118 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
120 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
124 bounce_buffer = pkt_ctrl->curr_bounce_buf;
126 if (unlikely(!bounce_buffer)) {
127 ena_trc_err("bounce buffer is NULL\n");
131 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
133 pkt_ctrl->descs_left_in_line--;
138 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
140 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
141 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
144 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
147 /* bounce buffer was used, so write it and get a new one */
149 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
150 pkt_ctrl->curr_bounce_buf);
154 pkt_ctrl->curr_bounce_buf =
155 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
156 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
157 0x0, llq_info->desc_list_entry_size);
161 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
165 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
167 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
168 return get_sq_desc_llq(io_sq);
170 return get_sq_desc_regular_queue(io_sq);
173 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
175 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
176 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
179 if (!pkt_ctrl->descs_left_in_line) {
180 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
181 pkt_ctrl->curr_bounce_buf);
185 pkt_ctrl->curr_bounce_buf =
186 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
187 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
188 0x0, llq_info->desc_list_entry_size);
191 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
192 pkt_ctrl->descs_left_in_line = 1;
194 pkt_ctrl->descs_left_in_line =
195 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
201 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
203 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
204 return ena_com_sq_update_llq_tail(io_sq);
208 /* Switch phase bit in case of wrap around */
209 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
215 static struct ena_eth_io_rx_cdesc_base *
216 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
218 idx &= (io_cq->q_depth - 1);
219 return (struct ena_eth_io_rx_cdesc_base *)
220 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
221 idx * io_cq->cdesc_entry_size_in_bytes);
224 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
225 u16 *first_cdesc_idx)
227 struct ena_eth_io_rx_cdesc_base *cdesc;
228 u16 count = 0, head_masked;
232 cdesc = ena_com_get_next_rx_cdesc(io_cq);
236 ena_com_cq_inc_head(io_cq);
238 last = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
239 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
243 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
244 count += io_cq->cur_rx_pkt_cdesc_count;
246 head_masked = io_cq->head & (io_cq->q_depth - 1);
248 io_cq->cur_rx_pkt_cdesc_count = 0;
249 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
251 ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
252 io_cq->qid, *first_cdesc_idx, count);
254 io_cq->cur_rx_pkt_cdesc_count += count;
261 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
262 struct ena_com_tx_ctx *ena_tx_ctx)
264 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
265 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
267 meta_desc = get_sq_desc(io_sq);
268 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
270 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
272 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
274 /* bits 0-9 of the mss */
275 meta_desc->word2 |= (ena_meta->mss <<
276 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
277 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
278 /* bits 10-13 of the mss */
279 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
280 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
281 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
283 /* Extended meta desc */
284 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
285 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
286 meta_desc->len_ctrl |= (io_sq->phase <<
287 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
288 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
290 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
291 meta_desc->word2 |= ena_meta->l3_hdr_len &
292 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
293 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
294 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
295 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
297 meta_desc->word2 |= (ena_meta->l4_hdr_len <<
298 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
299 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
301 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
303 /* Cached the meta desc */
304 memcpy(&io_sq->cached_tx_meta, ena_meta,
305 sizeof(struct ena_com_tx_meta));
307 return ena_com_sq_update_tail(io_sq);
310 static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
311 struct ena_eth_io_rx_cdesc_base *cdesc)
313 ena_rx_ctx->l3_proto = cdesc->status &
314 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
315 ena_rx_ctx->l4_proto =
316 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
317 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
318 ena_rx_ctx->l3_csum_err =
319 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
320 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
321 ena_rx_ctx->l4_csum_err =
322 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
323 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
324 ena_rx_ctx->l4_csum_checked =
325 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
326 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
327 ena_rx_ctx->hash = cdesc->hash;
329 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
330 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
332 ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
333 ena_rx_ctx->l3_proto,
334 ena_rx_ctx->l4_proto,
335 ena_rx_ctx->l3_csum_err,
336 ena_rx_ctx->l4_csum_err,
342 /*****************************************************************************/
343 /***************************** API **********************************/
344 /*****************************************************************************/
346 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
347 struct ena_com_tx_ctx *ena_tx_ctx,
350 struct ena_eth_io_tx_desc *desc = NULL;
351 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
352 void *buffer_to_push = ena_tx_ctx->push_header;
353 u16 header_len = ena_tx_ctx->header_len;
354 u16 num_bufs = ena_tx_ctx->num_bufs;
355 u16 start_tail = io_sq->tail;
360 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
363 /* num_bufs +1 for potential meta desc */
364 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
365 ena_trc_dbg("Not enough space in the tx queue\n");
366 return ENA_COM_NO_MEM;
369 if (unlikely(header_len > io_sq->tx_max_header_size)) {
370 ena_trc_err("header size is too large %d max header: %d\n",
371 header_len, io_sq->tx_max_header_size);
372 return ENA_COM_INVAL;
375 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
377 return ENA_COM_INVAL;
379 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
383 have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
386 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
391 /* If the caller doesn't want to send packets */
392 if (unlikely(!num_bufs && !header_len)) {
393 rc = ena_com_close_bounce_buffer(io_sq);
394 *nb_hw_desc = io_sq->tail - start_tail;
398 desc = get_sq_desc(io_sq);
400 return ENA_COM_FAULT;
401 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
403 /* Set first desc when we don't have meta descriptor */
405 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
407 desc->buff_addr_hi_hdr_sz |= (header_len <<
408 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
409 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
410 desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
411 ENA_ETH_IO_TX_DESC_PHASE_MASK;
413 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
416 desc->meta_ctrl |= (ena_tx_ctx->req_id <<
417 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
418 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
420 desc->meta_ctrl |= (ena_tx_ctx->df <<
421 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
422 ENA_ETH_IO_TX_DESC_DF_MASK;
425 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
426 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
427 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
429 if (ena_tx_ctx->meta_valid) {
430 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
431 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
432 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
433 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
434 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
435 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
436 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
437 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
438 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
439 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
440 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
441 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
442 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
443 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
444 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
445 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
446 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
449 for (i = 0; i < num_bufs; i++) {
450 /* The first desc share the same desc as the header */
451 if (likely(i != 0)) {
452 rc = ena_com_sq_update_tail(io_sq);
456 desc = get_sq_desc(io_sq);
458 return ENA_COM_FAULT;
460 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
462 desc->len_ctrl |= (io_sq->phase <<
463 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
464 ENA_ETH_IO_TX_DESC_PHASE_MASK;
467 desc->len_ctrl |= ena_bufs->len &
468 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
470 addr_hi = ((ena_bufs->paddr &
471 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
473 desc->buff_addr_lo = (u32)ena_bufs->paddr;
474 desc->buff_addr_hi_hdr_sz |= addr_hi &
475 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
479 /* set the last desc indicator */
480 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
482 rc = ena_com_sq_update_tail(io_sq);
486 rc = ena_com_close_bounce_buffer(io_sq);
488 *nb_hw_desc = io_sq->tail - start_tail;
492 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
493 struct ena_com_io_sq *io_sq,
494 struct ena_com_rx_ctx *ena_rx_ctx)
496 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
497 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
502 ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
505 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
506 if (nb_hw_desc == 0) {
507 ena_rx_ctx->descs = nb_hw_desc;
511 ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
512 io_cq->qid, nb_hw_desc);
514 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
515 ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
516 nb_hw_desc, ena_rx_ctx->max_bufs);
517 return ENA_COM_NO_SPACE;
520 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
521 ena_rx_ctx->pkt_offset = cdesc->offset;
524 ena_buf->len = cdesc->length;
525 ena_buf->req_id = cdesc->req_id;
527 } while ((++i < nb_hw_desc) && (cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i)));
529 /* Update SQ head ptr */
530 io_sq->next_to_comp += nb_hw_desc;
532 ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
533 io_sq->qid, io_sq->next_to_comp);
535 /* Get rx flags from the last pkt */
536 ena_com_rx_set_flags(ena_rx_ctx, cdesc);
538 ena_rx_ctx->descs = nb_hw_desc;
542 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
543 struct ena_com_buf *ena_buf,
546 struct ena_eth_io_rx_desc *desc;
548 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
551 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
552 return ENA_COM_NO_SPACE;
554 desc = get_sq_desc(io_sq);
556 return ENA_COM_FAULT;
558 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
560 desc->length = ena_buf->len;
562 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
563 ENA_ETH_IO_RX_DESC_LAST_MASK |
564 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
565 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
567 desc->req_id = req_id;
569 desc->buff_addr_lo = (u32)ena_buf->paddr;
571 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
573 return ena_com_sq_update_tail(io_sq);
576 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
578 struct ena_eth_io_rx_cdesc_base *cdesc;
580 cdesc = ena_com_get_next_rx_cdesc(io_cq);