#define ENA_WD_TIMEOUT_SEC 3
#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
+/* While processing submitted and completed descriptors (rx and tx path
+ * respectively) in a loop it is desired to:
+ * - perform batch submissions while populating sumbissmion queue
+ * - avoid blocking transmission of other packets during cleanup phase
+ * Hence the utilization ratio of 1/8 of a queue size or max value if the size
+ * of the ring is very big - like 8k Rx rings.
+ */
+#define ENA_REFILL_THRESH_DIVIDER 8
+#define ENA_REFILL_THRESH_PACKET 256
+
+#define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask))
+#define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask))
+
struct ena_adapter;
enum ena_ring_type {
struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
};
+/* Rx buffer holds only pointer to the mbuf - may be expanded in the future */
+struct ena_rx_buffer {
+ struct rte_mbuf *mbuf;
+ struct ena_com_buf ena_buf;
+};
+
struct ena_calc_queue_size_ctx {
struct ena_com_dev_get_features_ctx *get_feat_ctx;
struct ena_com_dev *ena_dev;
enum ena_ring_type type;
enum ena_admin_placement_policy_type tx_mem_queue_type;
+
+ /* Indicate there are Tx packets pushed to the device and wait for db */
+ bool pkts_without_db;
+
/* Holds the empty requests for TX/RX OOO completions */
union {
uint16_t *empty_tx_reqs;
union {
struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
- struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
+ struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
};
struct rte_mbuf **rx_refill_buffer;
unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
+ unsigned int size_mask;
struct ena_com_io_cq *ena_com_io_cq;
struct ena_com_io_sq *ena_com_io_sq;
uint64_t offloads;
u16 sgl_size;
+ bool disable_meta_caching;
+
union {
struct ena_stats_rx rx_stats;
struct ena_stats_tx tx_stats;
u64 tx_drops;
};
+struct ena_stats_eni {
+ /*
+ * The number of packets shaped due to inbound aggregate BW
+ * allowance being exceeded
+ */
+ uint64_t bw_in_allowance_exceeded;
+ /*
+ * The number of packets shaped due to outbound aggregate BW
+ * allowance being exceeded
+ */
+ uint64_t bw_out_allowance_exceeded;
+ /* The number of packets shaped due to PPS allowance being exceeded */
+ uint64_t pps_allowance_exceeded;
+ /*
+ * The number of packets shaped due to connection tracking
+ * allowance being exceeded and leading to failure in establishment
+ * of new connections
+ */
+ uint64_t conntrack_allowance_exceeded;
+ /*
+ * The number of packets shaped due to linklocal packet rate
+ * allowance being exceeded
+ */
+ uint64_t linklocal_allowance_exceeded;
+};
+
struct ena_offloads {
bool tso4_supported;
bool tx_csum_supported;
/* board specific private data structure */
struct ena_adapter {
/* OS defined structs */
- struct rte_pci_device *pdev;
- struct rte_eth_dev_data *rte_eth_dev_data;
- struct rte_eth_dev *rte_dev;
+ struct rte_eth_dev_data *edev_data;
struct ena_com_dev ena_dev __rte_cache_aligned;
u16 max_mtu;
struct ena_offloads offloads;
+ /* The admin queue isn't protected by the lock and is used to
+ * retrieve statistics from the device. As there is no guarantee that
+ * application won't try to get statistics from multiple threads, it is
+ * safer to lock the queue to avoid admin queue failure.
+ */
+ rte_spinlock_t admin_lock;
+
int id_number;
char name[ENA_NAME_MAX_LEN];
u8 mac_addr[RTE_ETHER_ADDR_LEN];
uint64_t keep_alive_timeout;
struct ena_stats_dev dev_stats;
+ struct ena_stats_eni eni_stats;
bool trigger_reset;