1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
9 #if defined(__cplusplus)
14 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
15 #define ENA_COMP_HEAD_THRESH 4
17 struct ena_com_tx_ctx {
18 struct ena_com_tx_meta ena_meta;
19 struct ena_com_buf *ena_bufs;
20 /* For LLQ, header buffer - pushed to the device mem space */
23 enum ena_eth_io_l3_proto_index l3_proto;
24 enum ena_eth_io_l4_proto_index l4_proto;
27 /* For regular queue, indicate the size of the header
28 * For LLQ, indicate the size of the pushed buffer
37 u8 df; /* Don't fragment */
40 struct ena_com_rx_ctx {
41 struct ena_com_rx_buf_info *ena_bufs;
42 enum ena_eth_io_l3_proto_index l3_proto;
43 enum ena_eth_io_l4_proto_index l4_proto;
47 /* fragmented packet */
54 bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
55 struct ena_com_tx_ctx *ena_tx_ctx);
57 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
58 struct ena_com_tx_ctx *ena_tx_ctx,
61 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
62 struct ena_com_io_sq *io_sq,
63 struct ena_com_rx_ctx *ena_rx_ctx);
65 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
66 struct ena_com_buf *ena_buf,
69 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
71 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
72 struct ena_eth_io_intr_reg *intr_reg)
74 ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
77 static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
79 u16 tail, next_to_comp, cnt;
81 next_to_comp = io_sq->next_to_comp;
83 cnt = tail - next_to_comp;
85 return io_sq->q_depth - 1 - cnt;
88 /* Check if the submission queue has enough space to hold required_buffers */
89 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
94 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
95 return ena_com_free_desc(io_sq) >= required_buffers;
97 /* This calculation doesn't need to be 100% accurate. So to reduce
98 * the calculation overhead just Subtract 2 lines from the free descs
99 * (one for the header line and one to compensate the devision
102 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
104 return ena_com_free_desc(io_sq) > temp;
107 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
109 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
110 io_sq->llq_info.max_entries_in_tx_burst > 0;
113 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
115 u16 tail = io_sq->tail;
116 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
118 ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
121 ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
123 if (is_llq_max_tx_burst_exists(io_sq)) {
124 ena_trc_dbg("reset available entries in tx burst for queue %d to %d\n",
125 io_sq->qid, max_entries_in_tx_burst);
126 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
132 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
134 u16 unreported_comp, head;
138 unreported_comp = head - io_cq->last_head_update;
139 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
141 if (io_cq->cq_head_db_reg && need_update) {
142 ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
144 ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
145 io_cq->last_head_update = head;
151 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
154 struct ena_eth_io_numa_node_cfg_reg numa_cfg;
156 if (!io_cq->numa_node_cfg_reg)
159 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
160 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
162 ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
165 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
167 io_sq->next_to_comp += elem;
170 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
174 /* Switch phase bit in case of wrap around */
175 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
179 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
181 u8 expected_phase, cdesc_phase;
182 struct ena_eth_io_tx_cdesc *cdesc;
185 masked_head = io_cq->head & (io_cq->q_depth - 1);
186 expected_phase = io_cq->phase;
188 cdesc = (struct ena_eth_io_tx_cdesc *)
189 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
190 (masked_head * io_cq->cdesc_entry_size_in_bytes));
192 /* When the current completion descriptor phase isn't the same as the
193 * expected, it mean that the device still didn't update
196 cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
197 if (cdesc_phase != expected_phase)
198 return ENA_COM_TRY_AGAIN;
202 *req_id = READ_ONCE16(cdesc->req_id);
203 if (unlikely(*req_id >= io_cq->q_depth)) {
204 ena_trc_err("Invalid req id %d\n", cdesc->req_id);
205 return ENA_COM_INVAL;
208 ena_com_cq_inc_head(io_cq);
213 #if defined(__cplusplus)
216 #endif /* ENA_ETH_COM_H_ */