#define ENA_WD_TIMEOUT_SEC 3
#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
+/* While processing submitted and completed descriptors (rx and tx path
+ * respectively) in a loop it is desired to:
+ * - perform batch submissions while populating sumbissmion queue
+ * - avoid blocking transmission of other packets during cleanup phase
+ * Hence the utilization ratio of 1/8 of a queue size or max value if the size
+ * of the ring is very big - like 8k Rx rings.
+ */
+#define ENA_REFILL_THRESH_DIVIDER 8
+#define ENA_REFILL_THRESH_PACKET 256
+
+#define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask))
+#define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask))
+
struct ena_adapter;
enum ena_ring_type {
struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
};
+/* Rx buffer holds only pointer to the mbuf - may be expanded in the future */
+struct ena_rx_buffer {
+ struct rte_mbuf *mbuf;
+ struct ena_com_buf ena_buf;
+};
+
struct ena_calc_queue_size_ctx {
struct ena_com_dev_get_features_ctx *get_feat_ctx;
struct ena_com_dev *ena_dev;
union {
struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
- struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
+ struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
};
struct rte_mbuf **rx_refill_buffer;
unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
+ unsigned int size_mask;
struct ena_com_io_cq *ena_com_io_cq;
struct ena_com_io_sq *ena_com_io_sq;