net/hns3: report Rx free threshold
authorWei Hu (Xavier) <xavier.huwei@huawei.com>
Wed, 9 Sep 2020 09:23:32 +0000 (17:23 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 21 Sep 2020 16:05:38 +0000 (18:05 +0200)
This patch reports .rx_free_thresh value in the .dev_infos_get ops
implementation function named hns3_dev_infos_get and
hns3vf_dev_infos_get.
In addition, the name of the member variable of struct hns3_rx_queue is
modified and comments are added to improve code readability.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
drivers/net/hns3/hns3_ethdev.c
drivers/net/hns3/hns3_ethdev_vf.c
drivers/net/hns3/hns3_rxtx.c
drivers/net/hns3/hns3_rxtx.h

index b6ae69f..09d96c6 100644 (file)
@@ -2500,12 +2500,14 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
        };
 
        info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
                /*
                 * If there are no available Rx buffer descriptors, incoming
                 * packets are always dropped by hardware based on hns3 network
                 * engine.
                 */
                .rx_drop_en = 1,
+               .offloads = 0,
        };
 
        info->vmdq_queue_num = 0;
index bea3695..c93d35e 100644 (file)
@@ -943,12 +943,14 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
        };
 
        info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
                /*
                 * If there are no available Rx buffer descriptors, incoming
                 * packets are always dropped by hardware based on hns3 network
                 * engine.
                 */
                .rx_drop_en = 1,
+               .offloads = 0,
        };
 
        info->vmdq_queue_num = 0;
index 308d0a6..fe2a7a4 100644 (file)
@@ -652,8 +652,7 @@ hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
        }
 
        rxq->next_to_use = 0;
-       rxq->next_to_clean = 0;
-       rxq->nb_rx_hold = 0;
+       rxq->rx_free_hold = 0;
        hns3_init_rx_queue_hw(rxq);
 
        return 0;
@@ -667,8 +666,7 @@ hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 
        rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
        rxq->next_to_use = 0;
-       rxq->next_to_clean = 0;
-       rxq->nb_rx_hold = 0;
+       rxq->rx_free_hold = 0;
        hns3_init_rx_queue_hw(rxq);
 }
 
@@ -1303,10 +1301,8 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 
        rxq->hns = hns;
        rxq->mb_pool = mp;
-       if (conf->rx_free_thresh <= 0)
-               rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
-       else
-               rxq->rx_free_thresh = conf->rx_free_thresh;
+       rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
+               conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
        rxq->rx_deferred_start = conf->rx_deferred_start;
 
        rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
@@ -1319,8 +1315,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        }
 
        rxq->next_to_use = 0;
-       rxq->next_to_clean = 0;
-       rxq->nb_rx_hold = 0;
+       rxq->rx_free_hold = 0;
        rxq->pkt_first_seg = NULL;
        rxq->pkt_last_seg = NULL;
        rxq->port_id = dev->data->port_id;
@@ -1656,11 +1651,11 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        nb_rx_bd = 0;
        rxq = rx_queue;
 
-       rx_id = rxq->next_to_clean;
+       rx_id = rxq->next_to_use;
        rx_ring = rxq->rx_ring;
+       sw_ring = rxq->sw_ring;
        first_seg = rxq->pkt_first_seg;
        last_seg = rxq->pkt_last_seg;
-       sw_ring = rxq->sw_ring;
 
        while (nb_rx < nb_pkts) {
                rxdp = &rx_ring[rx_id];
@@ -1843,16 +1838,15 @@ pkt_err:
                first_seg = NULL;
        }
 
-       rxq->next_to_clean = rx_id;
+       rxq->next_to_use = rx_id;
        rxq->pkt_first_seg = first_seg;
        rxq->pkt_last_seg = last_seg;
 
-       nb_rx_bd = nb_rx_bd + rxq->nb_rx_hold;
-       if (nb_rx_bd > rxq->rx_free_thresh) {
-               hns3_clean_rx_buffers(rxq, nb_rx_bd);
-               nb_rx_bd = 0;
+       rxq->rx_free_hold += nb_rx_bd;
+       if (rxq->rx_free_hold > rxq->rx_free_thresh) {
+               hns3_clean_rx_buffers(rxq, rxq->rx_free_hold);
+               rxq->rx_free_hold = 0;
        }
-       rxq->nb_rx_hold = nb_rx_bd;
 
        return nb_rx;
 }
index 4b3269b..a2d6514 100644 (file)
@@ -10,6 +10,7 @@
 #define HNS3_DEFAULT_RING_DESC  1024
 #define        HNS3_ALIGN_RING_DESC    32
 #define HNS3_RING_BASE_ALIGN   128
+#define HNS3_DEFAULT_RX_FREE_THRESH    32
 
 #define HNS3_512_BD_BUF_SIZE   512
 #define HNS3_1K_BD_BUF_SIZE    1024
@@ -243,12 +244,14 @@ struct hns3_rx_queue {
        uint16_t queue_id;
        uint16_t port_id;
        uint16_t nb_rx_desc;
-       uint16_t nb_rx_hold;
-       uint16_t rx_tail;
-       uint16_t next_to_clean;
        uint16_t next_to_use;
        uint16_t rx_buf_len;
+       /*
+        * threshold for the number of BDs waited to passed to hardware. If the
+        * number exceeds the threshold, driver will pass these BDs to hardware.
+        */
        uint16_t rx_free_thresh;
+       uint16_t rx_free_hold;   /* num of BDs waited to passed to hardware */
 
        /*
         * port based vlan configuration state.