1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
9 #include <rte_cycles.h>
11 #include <rte_bus_pci.h>
12 #include <rte_timer.h>
16 #define ENA_REGS_BAR 0
19 #define ENA_MAX_NUM_QUEUES 128
20 #define ENA_MIN_FRAME_LEN 64
21 #define ENA_NAME_MAX_LEN 20
22 #define ENA_PKT_MAX_BUFS 17
23 #define ENA_RX_BUF_MIN_SIZE 1400
24 #define ENA_DEFAULT_RING_SIZE 1024
26 #define ENA_MIN_MTU 128
28 #define ENA_MMIO_DISABLE_REG_READ BIT(0)
30 #define ENA_WD_TIMEOUT_SEC 3
31 #define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
33 /* While processing submitted and completed descriptors (rx and tx path
34 * respectively) in a loop it is desired to:
35 * - perform batch submissions while populating sumbissmion queue
36 * - avoid blocking transmission of other packets during cleanup phase
37 * Hence the utilization ratio of 1/8 of a queue size or max value if the size
38 * of the ring is very big - like 8k Rx rings.
40 #define ENA_REFILL_THRESH_DIVIDER 8
41 #define ENA_REFILL_THRESH_PACKET 256
43 #define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask))
44 #define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask))
53 struct ena_tx_buffer {
54 struct rte_mbuf *mbuf;
55 unsigned int tx_descs;
56 unsigned int num_of_bufs;
57 struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
60 /* Rx buffer holds only pointer to the mbuf - may be expanded in the future */
61 struct ena_rx_buffer {
62 struct rte_mbuf *mbuf;
63 struct ena_com_buf ena_buf;
66 struct ena_calc_queue_size_ctx {
67 struct ena_com_dev_get_features_ctx *get_feat_ctx;
68 struct ena_com_dev *ena_dev;
69 u32 max_rx_queue_size;
70 u32 max_tx_queue_size;
101 enum ena_ring_type type;
102 enum ena_admin_placement_policy_type tx_mem_queue_type;
104 /* Indicate there are Tx packets pushed to the device and wait for db */
105 bool pkts_without_db;
107 /* Holds the empty requests for TX/RX OOO completions */
109 uint16_t *empty_tx_reqs;
110 uint16_t *empty_rx_reqs;
114 struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
115 struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
117 struct rte_mbuf **rx_refill_buffer;
118 unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
119 unsigned int size_mask;
121 struct ena_com_io_cq *ena_com_io_cq;
122 struct ena_com_io_sq *ena_com_io_sq;
124 struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]
127 struct rte_mempool *mb_pool;
128 unsigned int port_id;
130 /* Max length PMD can push to device for LLQ */
131 uint8_t tx_max_header_size;
134 uint8_t *push_buf_intermediate_buf;
136 struct ena_adapter *adapter;
140 bool disable_meta_caching;
143 struct ena_stats_rx rx_stats;
144 struct ena_stats_tx tx_stats;
147 unsigned int numa_socket_id;
148 } __rte_cache_aligned;
150 enum ena_adapter_state {
151 ENA_ADAPTER_STATE_FREE = 0,
152 ENA_ADAPTER_STATE_INIT = 1,
153 ENA_ADAPTER_STATE_RUNNING = 2,
154 ENA_ADAPTER_STATE_STOPPED = 3,
155 ENA_ADAPTER_STATE_CONFIG = 4,
156 ENA_ADAPTER_STATE_CLOSED = 5,
159 struct ena_driver_stats {
160 rte_atomic64_t ierrors;
161 rte_atomic64_t oerrors;
162 rte_atomic64_t rx_nombuf;
166 struct ena_stats_dev {
171 * Tx drops cannot be reported as the driver statistic, because DPDK
172 * rte_eth_stats structure isn't providing appropriate field for that.
173 * As a workaround it is being published as an extended statistic.
178 struct ena_stats_eni {
180 * The number of packets shaped due to inbound aggregate BW
181 * allowance being exceeded
183 uint64_t bw_in_allowance_exceeded;
185 * The number of packets shaped due to outbound aggregate BW
186 * allowance being exceeded
188 uint64_t bw_out_allowance_exceeded;
189 /* The number of packets shaped due to PPS allowance being exceeded */
190 uint64_t pps_allowance_exceeded;
192 * The number of packets shaped due to connection tracking
193 * allowance being exceeded and leading to failure in establishment
196 uint64_t conntrack_allowance_exceeded;
198 * The number of packets shaped due to linklocal packet rate
199 * allowance being exceeded
201 uint64_t linklocal_allowance_exceeded;
204 struct ena_offloads {
206 bool tx_csum_supported;
207 bool rx_csum_supported;
210 /* board specific private data structure */
212 /* OS defined structs */
213 struct rte_pci_device *pdev;
214 struct rte_eth_dev_data *rte_eth_dev_data;
215 struct rte_eth_dev *rte_dev;
217 struct ena_com_dev ena_dev __rte_cache_aligned;
220 struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
221 u32 max_tx_ring_size;
225 struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
226 u32 max_rx_ring_size;
229 u32 max_num_io_queues;
231 struct ena_offloads offloads;
233 /* The admin queue isn't protected by the lock and is used to
234 * retrieve statistics from the device. As there is no guarantee that
235 * application won't try to get statistics from multiple threads, it is
236 * safer to lock the queue to avoid admin queue failure.
238 rte_spinlock_t admin_lock;
241 char name[ENA_NAME_MAX_LEN];
242 u8 mac_addr[RTE_ETHER_ADDR_LEN];
247 struct ena_driver_stats *drv_stats;
248 enum ena_adapter_state state;
250 uint64_t tx_supported_offloads;
251 uint64_t tx_selected_offloads;
252 uint64_t rx_supported_offloads;
253 uint64_t rx_selected_offloads;
257 enum ena_regs_reset_reason_types reset_reason;
259 struct rte_timer timer_wd;
260 uint64_t timestamp_wd;
261 uint64_t keep_alive_timeout;
263 struct ena_stats_dev dev_stats;
264 struct ena_stats_eni eni_stats;
270 bool use_large_llq_hdr;
273 #endif /* _ENA_ETHDEV_H_ */