net/ena: limit refill threshold by fixed value
authorMichal Krawczyk <mk@semihalf.com>
Wed, 8 Apr 2020 08:29:16 +0000 (10:29 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 21 Apr 2020 11:57:07 +0000 (13:57 +0200)
Divider used for both Tx and Rx cleanup/refill threshold can cause too
big delay in case of the really big rings - for example if the 8k Rx
ring will be used, the refill won't trigger unless 1024 threshold will
be reached. It will also cause driver to try to allocate that much
descriptors.

Limiting it by fixed value - 256 in that case, would limit maximum
time spent in repopulate function.

Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Guy Tzalik <gtzalik@amazon.com>
drivers/net/ena/ena_ethdev.c
drivers/net/ena/ena_ethdev.h

index 9d76ebb..7804a5c 100644 (file)
 /*reverse version of ENA_IO_RXQ_IDX*/
 #define ENA_IO_RXQ_IDX_REV(q)  ((q - 1) / 2)
 
-/* While processing submitted and completed descriptors (rx and tx path
- * respectively) in a loop it is desired to:
- *  - perform batch submissions while populating sumbissmion queue
- *  - avoid blocking transmission of other packets during cleanup phase
- * Hence the utilization ratio of 1/8 of a queue size.
- */
-#define ENA_RING_DESCS_RATIO(ring_size)        (ring_size / 8)
-
 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
 #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
 
@@ -2146,7 +2138,8 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
        unsigned int ring_size = rx_ring->ring_size;
        unsigned int ring_mask = ring_size - 1;
-       unsigned int refill_required;
+       unsigned int free_queue_entries;
+       unsigned int refill_threshold;
        uint16_t next_to_clean = rx_ring->next_to_clean;
        uint16_t descs_in_use;
        struct rte_mbuf *mbuf;
@@ -2215,11 +2208,15 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        rx_ring->rx_stats.cnt += completed;
        rx_ring->next_to_clean = next_to_clean;
 
-       refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
+       free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
+       refill_threshold =
+               RTE_MIN(ring_size / ENA_REFILL_THRESH_DIVIDER,
+               (unsigned int)ENA_REFILL_THRESH_PACKET);
+
        /* Burst refill to save doorbells, memory barriers, const interval */
-       if (refill_required > ENA_RING_DESCS_RATIO(ring_size)) {
+       if (free_queue_entries > refill_threshold) {
                ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
-               ena_populate_rx_queue(rx_ring, refill_required);
+               ena_populate_rx_queue(rx_ring, free_queue_entries);
        }
 
        return completed;
@@ -2358,6 +2355,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint16_t seg_len;
        unsigned int ring_size = tx_ring->ring_size;
        unsigned int ring_mask = ring_size - 1;
+       unsigned int cleanup_budget;
        struct ena_com_tx_ctx ena_tx_ctx;
        struct ena_tx_buffer *tx_info;
        struct ena_com_buf *ebuf;
@@ -2515,9 +2513,12 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                /* Put back descriptor to the ring for reuse */
                tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
                next_to_clean++;
+               cleanup_budget =
+                       RTE_MIN(ring_size / ENA_REFILL_THRESH_DIVIDER,
+                       (unsigned int)ENA_REFILL_THRESH_PACKET);
 
                /* If too many descs to clean, leave it for another run */
-               if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size)))
+               if (unlikely(total_tx_descs > cleanup_budget))
                        break;
        }
        tx_ring->tx_stats.available_desc =
index 6bcca08..13d87d4 100644 (file)
 #define ENA_WD_TIMEOUT_SEC     3
 #define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
 
+/* While processing submitted and completed descriptors (rx and tx path
+ * respectively) in a loop it is desired to:
+ *  - perform batch submissions while populating sumbissmion queue
+ *  - avoid blocking transmission of other packets during cleanup phase
+ * Hence the utilization ratio of 1/8 of a queue size or max value if the size
+ * of the ring is very big - like 8k Rx rings.
+ */
+#define ENA_REFILL_THRESH_DIVIDER      8
+#define ENA_REFILL_THRESH_PACKET       256
+
 struct ena_adapter;
 
 enum ena_ring_type {