4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "ena_eth_com.h"
36 static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
37 struct ena_com_io_cq *io_cq)
39 struct ena_eth_io_rx_cdesc_base *cdesc;
40 u16 expected_phase, head_masked;
43 head_masked = io_cq->head & (io_cq->q_depth - 1);
44 expected_phase = io_cq->phase;
46 cdesc = (struct ena_eth_io_rx_cdesc_base *)
47 ((unsigned char *)io_cq->cdesc_addr.virt_addr
48 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
50 desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
51 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
53 if (desc_phase != expected_phase)
59 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
63 /* Switch phase bit in case of wrap around */
64 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
68 static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
73 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
75 offset = tail_masked * io_sq->desc_entry_size;
77 return (unsigned char *)io_sq->desc_addr.virt_addr + offset;
80 static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
82 u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
83 u32 offset = tail_masked * io_sq->desc_entry_size;
85 /* In case this queue isn't a LLQ */
86 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
89 memcpy_toio((unsigned char *)io_sq->desc_addr.pbuf_dev_addr + offset,
90 (unsigned char *)io_sq->desc_addr.virt_addr + offset,
91 io_sq->desc_entry_size);
94 static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
98 /* Switch phase bit in case of wrap around */
99 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
103 static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
104 u8 *head_src, u16 header_len)
106 u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
107 u8 __iomem *dev_head_addr =
108 io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
110 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
113 if (unlikely(!io_sq->header_addr)) {
114 ena_trc_err("Push buffer header ptr is NULL\n");
115 return ENA_COM_INVAL;
118 memcpy_toio(dev_head_addr, head_src, header_len);
123 static inline struct ena_eth_io_rx_cdesc_base *
124 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
126 idx &= (io_cq->q_depth - 1);
127 return (struct ena_eth_io_rx_cdesc_base *)
128 ((unsigned char *)io_cq->cdesc_addr.virt_addr +
129 idx * io_cq->cdesc_entry_size_in_bytes);
132 static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
133 u16 *first_cdesc_idx)
135 struct ena_eth_io_rx_cdesc_base *cdesc;
136 u16 count = 0, head_masked;
140 cdesc = ena_com_get_next_rx_cdesc(io_cq);
144 ena_com_cq_inc_head(io_cq);
146 last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
147 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
151 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
152 count += io_cq->cur_rx_pkt_cdesc_count;
154 head_masked = io_cq->head & (io_cq->q_depth - 1);
156 io_cq->cur_rx_pkt_cdesc_count = 0;
157 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
159 ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
160 io_cq->qid, *first_cdesc_idx, count);
162 io_cq->cur_rx_pkt_cdesc_count += count;
169 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
170 struct ena_com_tx_ctx *ena_tx_ctx)
174 if (ena_tx_ctx->meta_valid) {
175 rc = memcmp(&io_sq->cached_tx_meta,
176 &ena_tx_ctx->ena_meta,
177 sizeof(struct ena_com_tx_meta));
179 if (unlikely(rc != 0))
186 static inline void ena_com_create_and_store_tx_meta_desc(
187 struct ena_com_io_sq *io_sq,
188 struct ena_com_tx_ctx *ena_tx_ctx)
190 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
191 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
193 meta_desc = get_sq_desc(io_sq);
194 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
196 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
198 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
200 /* bits 0-9 of the mss */
201 meta_desc->word2 |= (ena_meta->mss <<
202 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
203 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
204 /* bits 10-13 of the mss */
205 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
206 ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) &
207 ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;
209 /* Extended meta desc */
210 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
211 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
212 meta_desc->len_ctrl |= (io_sq->phase <<
213 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
214 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
216 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
217 meta_desc->word2 |= ena_meta->l3_hdr_len &
218 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
219 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
220 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
221 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
223 meta_desc->word2 |= (ena_meta->l4_hdr_len <<
224 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
225 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
227 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
229 /* Cached the meta desc */
230 memcpy(&io_sq->cached_tx_meta, ena_meta,
231 sizeof(struct ena_com_tx_meta));
233 ena_com_copy_curr_sq_desc_to_dev(io_sq);
234 ena_com_sq_update_tail(io_sq);
237 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
238 struct ena_eth_io_rx_cdesc_base *cdesc)
240 ena_rx_ctx->l3_proto = (enum ena_eth_io_l3_proto_index)(cdesc->status &
241 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK);
242 ena_rx_ctx->l4_proto = (enum ena_eth_io_l4_proto_index)
243 ((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
244 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT);
245 ena_rx_ctx->l3_csum_err =
246 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
247 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
248 ena_rx_ctx->l4_csum_err =
249 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
250 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
251 ena_rx_ctx->hash = cdesc->hash;
253 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
254 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
256 ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
257 ena_rx_ctx->l3_proto,
258 ena_rx_ctx->l4_proto,
259 ena_rx_ctx->l3_csum_err,
260 ena_rx_ctx->l4_csum_err,
266 /*****************************************************************************/
267 /***************************** API **********************************/
268 /*****************************************************************************/
270 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
271 struct ena_com_tx_ctx *ena_tx_ctx,
274 struct ena_eth_io_tx_desc *desc = NULL;
275 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
276 void *push_header = ena_tx_ctx->push_header;
277 u16 header_len = ena_tx_ctx->header_len;
278 u16 num_bufs = ena_tx_ctx->num_bufs;
279 int total_desc, i, rc;
283 ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX,
286 /* num_bufs +1 for potential meta desc */
287 if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
288 ena_trc_err("Not enough space in the tx queue\n");
289 return ENA_COM_NO_MEM;
292 if (unlikely(header_len > io_sq->tx_max_header_size)) {
293 ena_trc_err("header size is too large %d max header: %d\n",
294 header_len, io_sq->tx_max_header_size);
295 return ENA_COM_INVAL;
298 /* start with pushing the header (if needed) */
299 rc = ena_com_write_header(io_sq, push_header, header_len);
303 have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
306 ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
308 /* If the caller doesn't want send packets */
309 if (unlikely(!num_bufs && !header_len)) {
310 *nb_hw_desc = have_meta ? 0 : 1;
314 desc = get_sq_desc(io_sq);
315 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
317 /* Set first desc when we don't have meta descriptor */
319 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
321 desc->buff_addr_hi_hdr_sz |= (header_len <<
322 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
323 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
324 desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
325 ENA_ETH_IO_TX_DESC_PHASE_MASK;
327 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
330 desc->meta_ctrl |= (ena_tx_ctx->req_id <<
331 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
332 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
334 desc->meta_ctrl |= (ena_tx_ctx->df <<
335 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
336 ENA_ETH_IO_TX_DESC_DF_MASK;
339 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
340 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
341 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
343 if (ena_tx_ctx->meta_valid) {
344 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
345 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
346 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
347 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
348 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
349 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
350 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
351 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
352 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
353 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
354 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
355 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
356 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
357 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
358 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
359 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
360 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
363 for (i = 0; i < num_bufs; i++) {
364 /* The first desc share the same desc as the header */
365 if (likely(i != 0)) {
366 ena_com_copy_curr_sq_desc_to_dev(io_sq);
367 ena_com_sq_update_tail(io_sq);
369 desc = get_sq_desc(io_sq);
370 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
372 desc->len_ctrl |= (io_sq->phase <<
373 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
374 ENA_ETH_IO_TX_DESC_PHASE_MASK;
377 desc->len_ctrl |= ena_bufs->len &
378 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
380 addr_hi = ((ena_bufs->paddr &
381 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
383 desc->buff_addr_lo = (u32)ena_bufs->paddr;
384 desc->buff_addr_hi_hdr_sz |= addr_hi &
385 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
389 /* set the last desc indicator */
390 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
392 ena_com_copy_curr_sq_desc_to_dev(io_sq);
394 ena_com_sq_update_tail(io_sq);
396 total_desc = ENA_MAX16(num_bufs, 1);
397 total_desc += have_meta ? 1 : 0;
399 *nb_hw_desc = total_desc;
403 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
404 struct ena_com_io_sq *io_sq,
405 struct ena_com_rx_ctx *ena_rx_ctx)
407 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
408 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
413 ENA_ASSERT(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,
416 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
417 if (nb_hw_desc == 0) {
418 ena_rx_ctx->descs = nb_hw_desc;
422 ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
423 io_cq->qid, nb_hw_desc);
425 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
426 ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
427 nb_hw_desc, ena_rx_ctx->max_bufs);
428 return ENA_COM_NO_SPACE;
431 for (i = 0; i < nb_hw_desc; i++) {
432 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
434 ena_buf->len = cdesc->length;
435 ena_buf->req_id = cdesc->req_id;
439 /* Update SQ head ptr */
440 io_sq->next_to_comp += nb_hw_desc;
442 ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
443 io_sq->qid, io_sq->next_to_comp);
445 /* Get rx flags from the last pkt */
446 ena_com_rx_set_flags(ena_rx_ctx, cdesc);
448 ena_rx_ctx->descs = nb_hw_desc;
452 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
453 struct ena_com_buf *ena_buf,
456 struct ena_eth_io_rx_desc *desc;
458 ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,
461 if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
462 return ENA_COM_NO_SPACE;
464 desc = get_sq_desc(io_sq);
465 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
467 desc->length = ena_buf->len;
469 desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
470 desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
471 desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
472 desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
474 desc->req_id = req_id;
476 desc->buff_addr_lo = (u32)ena_buf->paddr;
479 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
481 ena_com_sq_update_tail(io_sq);
486 int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
488 u8 expected_phase, cdesc_phase;
489 struct ena_eth_io_tx_cdesc *cdesc;
492 masked_head = io_cq->head & (io_cq->q_depth - 1);
493 expected_phase = io_cq->phase;
495 cdesc = (struct ena_eth_io_tx_cdesc *)
496 ((unsigned char *)io_cq->cdesc_addr.virt_addr
497 + (masked_head * io_cq->cdesc_entry_size_in_bytes));
499 /* When the current completion descriptor phase isn't the same as the
500 * expected, it mean that the device still didn't update
503 cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
504 if (cdesc_phase != expected_phase)
505 return ENA_COM_TRY_AGAIN;
507 ena_com_cq_inc_head(io_cq);
509 *req_id = cdesc->req_id;