u16 max_rx_sgl_size;
};
+struct ena_stats_tx {
+ u64 cnt;
+ u64 bytes;
+ u64 prepare_ctx_err;
+ u64 linearize;
+ u64 linearize_failed;
+ u64 tx_poll;
+ u64 doorbells;
+ u64 bad_req_id;
+ u64 available_desc;
+};
+
+struct ena_stats_rx {
+ u64 cnt;
+ u64 bytes;
+ u64 refill_partial;
+ u64 bad_csum;
+ u64 mbuf_alloc_fail;
+ u64 bad_desc_num;
+ u64 bad_req_id;
+};
+
struct ena_ring {
u16 next_to_use;
u16 next_to_clean;
/* Max length PMD can push to device for LLQ */
uint8_t tx_max_header_size;
int configured;
+
+ uint8_t *push_buf_intermediate_buf;
+
struct ena_adapter *adapter;
uint64_t offloads;
u16 sgl_size;
+
+ union {
+ struct ena_stats_rx rx_stats;
+ struct ena_stats_tx tx_stats;
+ };
} __rte_cache_aligned;
enum ena_adapter_state {
rte_atomic64_t ierrors;
rte_atomic64_t oerrors;
rte_atomic64_t rx_nombuf;
+ rte_atomic64_t rx_drops;
};
struct ena_stats_dev {
- u64 tx_timeout;
- u64 io_suspend;
- u64 io_resume;
u64 wd_expired;
- u64 interface_up;
- u64 interface_down;
- u64 admin_q_pause;
-};
-
-struct ena_stats_tx {
- u64 cnt;
- u64 bytes;
- u64 queue_stop;
- u64 prepare_ctx_err;
- u64 queue_wakeup;
- u64 dma_mapping_err;
- u64 linearize;
- u64 linearize_failed;
- u64 tx_poll;
- u64 doorbells;
- u64 missing_tx_comp;
- u64 bad_req_id;
+ u64 dev_start;
+ u64 dev_stop;
};
-struct ena_stats_rx {
- u64 cnt;
- u64 bytes;
- u64 refil_partial;
- u64 bad_csum;
- u64 page_alloc_fail;
- u64 skb_alloc_fail;
- u64 dma_mapping_err;
- u64 bad_desc_num;
- u64 small_copy_len_pkt;
+struct ena_offloads {
+ bool tso4_supported;
+ bool tx_csum_supported;
+ bool rx_csum_supported;
};
/* board specific private data structure */
u16 num_queues;
u16 max_mtu;
- u8 tso4_supported;
+ struct ena_offloads offloads;
int id_number;
char name[ENA_NAME_MAX_LEN];
uint64_t timestamp_wd;
uint64_t keep_alive_timeout;
+ struct ena_stats_dev dev_stats;
+
bool trigger_reset;
bool wd_state;