1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
9 #include <rte_atomic.h>
10 #include <rte_ether.h>
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
13 #include <rte_cycles.h>
15 #include <rte_bus_pci.h>
16 #include <rte_timer.h>
22 #define ENA_REGS_BAR 0
25 #define ENA_MAX_NUM_QUEUES 128
26 #define ENA_MIN_FRAME_LEN 64
27 #define ENA_NAME_MAX_LEN 20
28 #define ENA_PKT_MAX_BUFS 17
29 #define ENA_RX_BUF_MIN_SIZE 1400
30 #define ENA_DEFAULT_RING_SIZE 1024
32 #define ENA_MIN_MTU 128
34 #define ENA_MMIO_DISABLE_REG_READ BIT(0)
36 #define ENA_WD_TIMEOUT_SEC 3
37 #define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
39 /* While processing submitted and completed descriptors (rx and tx path
40 * respectively) in a loop it is desired to:
41 * - perform batch submissions while populating sumbissmion queue
42 * - avoid blocking transmission of other packets during cleanup phase
43 * Hence the utilization ratio of 1/8 of a queue size or max value if the size
44 * of the ring is very big - like 8k Rx rings.
46 #define ENA_REFILL_THRESH_DIVIDER 8
47 #define ENA_REFILL_THRESH_PACKET 256
49 #define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask))
50 #define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask))
52 #define ENA_RX_RSS_TABLE_LOG_SIZE 7
53 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
55 #define ENA_HASH_KEY_SIZE 40
57 #define ENA_ALL_RSS_HF (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
58 ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP)
60 #define ENA_IO_TXQ_IDX(q) (2 * (q))
61 #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
62 /* Reversed version of ENA_IO_RXQ_IDX */
63 #define ENA_IO_RXQ_IDX_REV(q) (((q) - 1) / 2)
65 extern struct ena_shared_data *ena_shared_data;
74 struct ena_tx_buffer {
75 struct rte_mbuf *mbuf;
76 unsigned int tx_descs;
77 unsigned int num_of_bufs;
78 struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
81 /* Rx buffer holds only pointer to the mbuf - may be expanded in the future */
82 struct ena_rx_buffer {
83 struct rte_mbuf *mbuf;
84 struct ena_com_buf ena_buf;
87 struct ena_calc_queue_size_ctx {
88 struct ena_com_dev_get_features_ctx *get_feat_ctx;
89 struct ena_com_dev *ena_dev;
90 u32 max_rx_queue_size;
91 u32 max_tx_queue_size;
101 u64 linearize_failed;
108 struct ena_stats_rx {
122 enum ena_ring_type type;
123 enum ena_admin_placement_policy_type tx_mem_queue_type;
125 /* Indicate there are Tx packets pushed to the device and wait for db */
126 bool pkts_without_db;
128 /* Holds the empty requests for TX/RX OOO completions */
130 uint16_t *empty_tx_reqs;
131 uint16_t *empty_rx_reqs;
135 struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
136 struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
138 struct rte_mbuf **rx_refill_buffer;
139 unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
140 unsigned int size_mask;
142 struct ena_com_io_cq *ena_com_io_cq;
143 struct ena_com_io_sq *ena_com_io_sq;
145 struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]
148 struct rte_mempool *mb_pool;
149 unsigned int port_id;
151 /* Max length PMD can push to device for LLQ */
152 uint8_t tx_max_header_size;
155 uint8_t *push_buf_intermediate_buf;
157 struct ena_adapter *adapter;
161 bool disable_meta_caching;
164 struct ena_stats_rx rx_stats;
165 struct ena_stats_tx tx_stats;
168 unsigned int numa_socket_id;
169 } __rte_cache_aligned;
171 enum ena_adapter_state {
172 ENA_ADAPTER_STATE_FREE = 0,
173 ENA_ADAPTER_STATE_INIT = 1,
174 ENA_ADAPTER_STATE_RUNNING = 2,
175 ENA_ADAPTER_STATE_STOPPED = 3,
176 ENA_ADAPTER_STATE_CONFIG = 4,
177 ENA_ADAPTER_STATE_CLOSED = 5,
180 struct ena_driver_stats {
181 rte_atomic64_t ierrors;
182 rte_atomic64_t oerrors;
183 rte_atomic64_t rx_nombuf;
187 struct ena_stats_dev {
192 * Tx drops cannot be reported as the driver statistic, because DPDK
193 * rte_eth_stats structure isn't providing appropriate field for that.
194 * As a workaround it is being published as an extended statistic.
199 struct ena_stats_eni {
201 * The number of packets shaped due to inbound aggregate BW
202 * allowance being exceeded
204 uint64_t bw_in_allowance_exceeded;
206 * The number of packets shaped due to outbound aggregate BW
207 * allowance being exceeded
209 uint64_t bw_out_allowance_exceeded;
210 /* The number of packets shaped due to PPS allowance being exceeded */
211 uint64_t pps_allowance_exceeded;
213 * The number of packets shaped due to connection tracking
214 * allowance being exceeded and leading to failure in establishment
217 uint64_t conntrack_allowance_exceeded;
219 * The number of packets shaped due to linklocal packet rate
220 * allowance being exceeded
222 uint64_t linklocal_allowance_exceeded;
225 struct ena_offloads {
227 bool tx_csum_supported;
228 bool rx_csum_supported;
229 bool rss_hash_supported;
232 /* board specific private data structure */
234 /* OS defined structs */
235 struct rte_eth_dev_data *edev_data;
237 struct ena_com_dev ena_dev __rte_cache_aligned;
240 struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
241 u32 max_tx_ring_size;
245 struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
246 u32 max_rx_ring_size;
249 u32 max_num_io_queues;
251 struct ena_offloads offloads;
253 /* The admin queue isn't protected by the lock and is used to
254 * retrieve statistics from the device. As there is no guarantee that
255 * application won't try to get statistics from multiple threads, it is
256 * safer to lock the queue to avoid admin queue failure.
258 rte_spinlock_t admin_lock;
261 char name[ENA_NAME_MAX_LEN];
262 u8 mac_addr[RTE_ETHER_ADDR_LEN];
267 struct ena_driver_stats *drv_stats;
268 enum ena_adapter_state state;
270 uint64_t tx_supported_offloads;
271 uint64_t tx_selected_offloads;
272 uint64_t rx_supported_offloads;
273 uint64_t rx_selected_offloads;
277 enum ena_regs_reset_reason_types reset_reason;
279 struct rte_timer timer_wd;
280 uint64_t timestamp_wd;
281 uint64_t keep_alive_timeout;
283 struct ena_stats_dev dev_stats;
284 struct ena_stats_eni eni_stats;
290 bool use_large_llq_hdr;
293 int ena_rss_reta_update(struct rte_eth_dev *dev,
294 struct rte_eth_rss_reta_entry64 *reta_conf,
296 int ena_rss_reta_query(struct rte_eth_dev *dev,
297 struct rte_eth_rss_reta_entry64 *reta_conf,
299 int ena_rss_hash_update(struct rte_eth_dev *dev,
300 struct rte_eth_rss_conf *rss_conf);
301 int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
302 struct rte_eth_rss_conf *rss_conf);
303 int ena_rss_configure(struct ena_adapter *adapter);
305 #endif /* _ENA_ETHDEV_H_ */