/*reverse version of ENA_IO_RXQ_IDX*/
#define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2)
-/* While processing submitted and completed descriptors (rx and tx path
- * respectively) in a loop it is desired to:
- * - perform batch submissions while populating sumbissmion queue
- * - avoid blocking transmission of other packets during cleanup phase
- * Hence the utilization ratio of 1/8 of a queue size.
- */
-#define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8)
-
#define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
#define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
unsigned int ring_size = rx_ring->ring_size;
unsigned int ring_mask = ring_size - 1;
- unsigned int refill_required;
+ unsigned int free_queue_entries;
+ unsigned int refill_threshold;
uint16_t next_to_clean = rx_ring->next_to_clean;
uint16_t descs_in_use;
struct rte_mbuf *mbuf;
rx_ring->rx_stats.cnt += completed;
rx_ring->next_to_clean = next_to_clean;
- refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
+ free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
+ refill_threshold =
+ RTE_MIN(ring_size / ENA_REFILL_THRESH_DIVIDER,
+ (unsigned int)ENA_REFILL_THRESH_PACKET);
+
/* Burst refill to save doorbells, memory barriers, const interval */
- if (refill_required > ENA_RING_DESCS_RATIO(ring_size)) {
+ if (free_queue_entries > refill_threshold) {
ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
- ena_populate_rx_queue(rx_ring, refill_required);
+ ena_populate_rx_queue(rx_ring, free_queue_entries);
}
return completed;
uint16_t seg_len;
unsigned int ring_size = tx_ring->ring_size;
unsigned int ring_mask = ring_size - 1;
+ unsigned int cleanup_budget;
struct ena_com_tx_ctx ena_tx_ctx;
struct ena_tx_buffer *tx_info;
struct ena_com_buf *ebuf;
/* Put back descriptor to the ring for reuse */
tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
next_to_clean++;
+ cleanup_budget =
+ RTE_MIN(ring_size / ENA_REFILL_THRESH_DIVIDER,
+ (unsigned int)ENA_REFILL_THRESH_PACKET);
/* If too many descs to clean, leave it for another run */
- if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size)))
+ if (unlikely(total_tx_descs > cleanup_budget))
break;
}
tx_ring->tx_stats.available_desc =
#define ENA_WD_TIMEOUT_SEC 3
#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
+/* While processing submitted and completed descriptors (rx and tx path
+ * respectively) in a loop it is desired to:
+ * - perform batch submissions while populating sumbissmion queue
+ * - avoid blocking transmission of other packets during cleanup phase
+ * Hence the utilization ratio of 1/8 of a queue size or max value if the size
+ * of the ring is very big - like 8k Rx rings.
+ */
+#define ENA_REFILL_THRESH_DIVIDER 8
+#define ENA_REFILL_THRESH_PACKET 256
+
struct ena_adapter;
enum ena_ring_type {