#define ENA_MEM_BAR 2
#define ENA_MAX_NUM_QUEUES 128
-#define ENA_DEFAULT_RING_SIZE (1024)
#define ENA_MIN_FRAME_LEN 64
#define ENA_NAME_MAX_LEN 20
#define ENA_PKT_MAX_BUFS 17
+#define ENA_MIN_MTU 128
+
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
#define ENA_WD_TIMEOUT_SEC 3
struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
};
+struct ena_calc_queue_size_ctx {
+ struct ena_com_dev_get_features_ctx *get_feat_ctx;
+ struct ena_com_dev *ena_dev;
+ u16 rx_queue_size;
+ u16 tx_queue_size;
+ u16 max_tx_sgl_size;
+ u16 max_rx_sgl_size;
+};
+
+struct ena_stats_tx {
+ u64 cnt;
+ u64 bytes;
+ u64 prepare_ctx_err;
+ u64 linearize;
+ u64 linearize_failed;
+ u64 tx_poll;
+ u64 doorbells;
+ u64 bad_req_id;
+ u64 available_desc;
+};
+
+struct ena_stats_rx {
+ u64 cnt;
+ u64 bytes;
+ u64 refill_partial;
+ u64 bad_csum;
+ u64 mbuf_alloc_fail;
+ u64 bad_desc_num;
+ u64 bad_req_id;
+};
+
struct ena_ring {
u16 next_to_use;
u16 next_to_clean;
enum ena_ring_type type;
enum ena_admin_placement_policy_type tx_mem_queue_type;
- /* Holds the empty requests for TX OOO completions */
- uint16_t *empty_tx_reqs;
+ /* Holds the empty requests for TX/RX OOO completions */
+ union {
+ uint16_t *empty_tx_reqs;
+ uint16_t *empty_rx_reqs;
+ };
+
union {
struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
};
+ struct rte_mbuf **rx_refill_buffer;
unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
struct ena_com_io_cq *ena_com_io_cq;
/* Max length PMD can push to device for LLQ */
uint8_t tx_max_header_size;
int configured;
+
+ uint8_t *push_buf_intermediate_buf;
+
struct ena_adapter *adapter;
uint64_t offloads;
+ u16 sgl_size;
+
+ union {
+ struct ena_stats_rx rx_stats;
+ struct ena_stats_tx tx_stats;
+ };
} __rte_cache_aligned;
enum ena_adapter_state {
rte_atomic64_t ierrors;
rte_atomic64_t oerrors;
rte_atomic64_t rx_nombuf;
+ rte_atomic64_t rx_drops;
};
struct ena_stats_dev {
- u64 tx_timeout;
- u64 io_suspend;
- u64 io_resume;
u64 wd_expired;
- u64 interface_up;
- u64 interface_down;
- u64 admin_q_pause;
-};
-
-struct ena_stats_tx {
- u64 cnt;
- u64 bytes;
- u64 queue_stop;
- u64 prepare_ctx_err;
- u64 queue_wakeup;
- u64 dma_mapping_err;
- u64 linearize;
- u64 linearize_failed;
- u64 tx_poll;
- u64 doorbells;
- u64 missing_tx_comp;
- u64 bad_req_id;
-};
-
-struct ena_stats_rx {
- u64 cnt;
- u64 bytes;
- u64 refil_partial;
- u64 bad_csum;
- u64 page_alloc_fail;
- u64 skb_alloc_fail;
- u64 dma_mapping_err;
- u64 bad_desc_num;
- u64 small_copy_len_pkt;
+ u64 dev_start;
+ u64 dev_stop;
};
/* board specific private data structure */
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
int tx_ring_size;
+ u16 max_tx_sgl_size;
/* RX */
struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
int rx_ring_size;
+ u16 max_rx_sgl_size;
u16 num_queues;
u16 max_mtu;
uint64_t timestamp_wd;
uint64_t keep_alive_timeout;
+ struct ena_stats_dev dev_stats;
+
bool trigger_reset;
bool wd_state;