1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
9 #if defined(__cplusplus)
14 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
15 #define ENA_COMP_HEAD_THRESH 4
17 struct ena_com_tx_ctx {
18 struct ena_com_tx_meta ena_meta;
19 struct ena_com_buf *ena_bufs;
20 /* For LLQ, header buffer - pushed to the device mem space */
23 enum ena_eth_io_l3_proto_index l3_proto;
24 enum ena_eth_io_l4_proto_index l4_proto;
27 /* For regular queue, indicate the size of the header
28 * For LLQ, indicate the size of the pushed buffer
37 u8 df; /* Don't fragment */
40 struct ena_com_rx_ctx {
41 struct ena_com_rx_buf_info *ena_bufs;
42 enum ena_eth_io_l3_proto_index l3_proto;
43 enum ena_eth_io_l4_proto_index l4_proto;
47 /* fragmented packet */
55 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
56 struct ena_com_tx_ctx *ena_tx_ctx,
59 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
60 struct ena_com_io_sq *io_sq,
61 struct ena_com_rx_ctx *ena_rx_ctx);
63 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
64 struct ena_com_buf *ena_buf,
67 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
69 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
70 struct ena_eth_io_intr_reg *intr_reg)
72 ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
75 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
77 u16 tail, next_to_comp, cnt;
79 next_to_comp = io_sq->next_to_comp;
81 cnt = tail - next_to_comp;
83 return io_sq->q_depth - 1 - cnt;
86 /* Check if the submission queue has enough space to hold required_buffers */
87 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
92 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
93 return ena_com_free_q_entries(io_sq) >= required_buffers;
95 /* This calculation doesn't need to be 100% accurate. So to reduce
96 * the calculation overhead just Subtract 2 lines from the free descs
97 * (one for the header line and one to compensate the devision
100 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
102 return ena_com_free_q_entries(io_sq) > temp;
105 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
106 struct ena_com_tx_ctx *ena_tx_ctx)
108 if (!ena_tx_ctx->meta_valid)
111 return !!memcmp(&io_sq->cached_tx_meta,
112 &ena_tx_ctx->ena_meta,
113 sizeof(struct ena_com_tx_meta));
116 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
118 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
119 io_sq->llq_info.max_entries_in_tx_burst > 0;
122 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
123 struct ena_com_tx_ctx *ena_tx_ctx)
125 struct ena_com_llq_info *llq_info;
126 int descs_after_first_entry;
127 int num_entries_needed = 1;
130 if (!is_llq_max_tx_burst_exists(io_sq))
133 llq_info = &io_sq->llq_info;
134 num_descs = ena_tx_ctx->num_bufs;
136 if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
139 if (num_descs > llq_info->descs_num_before_header) {
140 descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
141 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
142 llq_info->descs_per_entry);
145 ena_trc_dbg("queue: %d num_descs: %d num_entries_needed: %d\n",
146 io_sq->qid, num_descs, num_entries_needed);
148 return num_entries_needed > io_sq->entries_in_tx_burst_left;
151 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
153 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
154 u16 tail = io_sq->tail;
156 ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
159 ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
161 if (is_llq_max_tx_burst_exists(io_sq)) {
162 ena_trc_dbg("reset available entries in tx burst for queue %d to %d\n",
163 io_sq->qid, max_entries_in_tx_burst);
164 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
170 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
172 u16 unreported_comp, head;
175 if (unlikely(io_cq->cq_head_db_reg)) {
177 unreported_comp = head - io_cq->last_head_update;
178 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
180 if (unlikely(need_update)) {
181 ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
183 ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
184 io_cq->last_head_update = head;
191 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
194 struct ena_eth_io_numa_node_cfg_reg numa_cfg;
196 if (!io_cq->numa_node_cfg_reg)
199 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
200 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
202 ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
205 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
207 io_sq->next_to_comp += elem;
210 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
214 /* Switch phase bit in case of wrap around */
215 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
219 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
222 u8 expected_phase, cdesc_phase;
223 struct ena_eth_io_tx_cdesc *cdesc;
226 masked_head = io_cq->head & (io_cq->q_depth - 1);
227 expected_phase = io_cq->phase;
229 cdesc = (struct ena_eth_io_tx_cdesc *)
230 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
231 (masked_head * io_cq->cdesc_entry_size_in_bytes));
233 /* When the current completion descriptor phase isn't the same as the
234 * expected, it mean that the device still didn't update
237 cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
238 if (cdesc_phase != expected_phase)
239 return ENA_COM_TRY_AGAIN;
243 *req_id = READ_ONCE16(cdesc->req_id);
244 if (unlikely(*req_id >= io_cq->q_depth)) {
245 ena_trc_err("Invalid req id %d\n", cdesc->req_id);
246 return ENA_COM_INVAL;
249 ena_com_cq_inc_head(io_cq);
254 #if defined(__cplusplus)
257 #endif /* ENA_ETH_COM_H_ */