net/hns3: fix Rx buffer size
authorWei Hu (Xavier) <xavier.huwei@huawei.com>
Wed, 1 Jul 2020 11:54:40 +0000 (19:54 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 7 Jul 2020 21:38:26 +0000 (23:38 +0200)
Currently, rx_buf_size of hns3 PMD driver is fixed on, and it's value
depends on the firmware which will decrease the flexibility of PMD.

The receive side mbufs was allocated from the mempool given by upper
application calling rte_eth_rx_queue_setup API function. So the memory
chunk used for net device DMA is depend on the data room size of the
objects in this mempool. Hns3 PMD driver should set the rx_buf_len
smaller than the data room size of mempool and our hardware only support
the following four specifications: 512, 1024, 2148 and 4096.

Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
drivers/net/hns3/hns3_ethdev.c
drivers/net/hns3/hns3_ethdev.h
drivers/net/hns3/hns3_ethdev_vf.c
drivers/net/hns3/hns3_rxtx.c
drivers/net/hns3/hns3_rxtx.h

index b9868de..13ce324 100644 (file)
@@ -2450,7 +2450,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
        info->max_rx_queues = queue_num;
        info->max_tx_queues = hw->tqps_num;
        info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
-       info->min_rx_bufsize = hw->rx_buf_len;
+       info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
        info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
        info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
        info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
@@ -2848,7 +2848,6 @@ hns3_get_board_configuration(struct hns3_hw *hw)
        hw->mac.media_type = cfg.media_type;
        hw->rss_size_max = cfg.rss_size_max;
        hw->rss_dis_flag = false;
-       hw->rx_buf_len = cfg.rx_buf_len;
        memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
        hw->mac.phy_addr = cfg.phy_addr;
        hw->mac.default_addr_setted = false;
index c390263..3c991f4 100644 (file)
@@ -375,7 +375,6 @@ struct hns3_hw {
        uint16_t tqps_num;          /* num task queue pairs of this function */
        uint16_t intr_tqps_num;     /* num queue pairs mapping interrupt */
        uint16_t rss_size_max;      /* HW defined max RSS task queue */
-       uint16_t rx_buf_len;
        uint16_t num_tx_desc;       /* desc num of per tx queue */
        uint16_t num_rx_desc;       /* desc num of per rx queue */
 
index 9c45ffa..3c5998a 100644 (file)
@@ -902,7 +902,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
        info->max_rx_queues = q_num;
        info->max_tx_queues = hw->tqps_num;
        info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
-       info->min_rx_bufsize = hw->rx_buf_len;
+       info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
        info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
        info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
        info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
@@ -1096,8 +1096,6 @@ hns3vf_check_tqp_info(struct hns3_hw *hw)
                return -EINVAL;
        }
 
-       if (hw->rx_buf_len == 0)
-               hw->rx_buf_len = HNS3_DEFAULT_RX_BUF_LEN;
        hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num);
 
        return 0;
@@ -1162,7 +1160,6 @@ hns3vf_get_queue_info(struct hns3_hw *hw)
 
        memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
        memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
-       memcpy(&hw->rx_buf_len, &resp_msg[4], sizeof(uint16_t));
 
        return hns3vf_check_tqp_info(hw);
 }
index 0f9825f..931d89a 100644 (file)
@@ -909,7 +909,7 @@ hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
        nb_rx_q = dev->data->nb_rx_queues;
        rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
                                (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
-       rxq->rx_buf_len = hw->rx_buf_len;
+       rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
 
        rte_spinlock_lock(&hw->lock);
        hw->fkq_data.rx_queues[idx] = rxq;
@@ -1185,6 +1185,48 @@ hns3_dev_release_mbufs(struct hns3_adapter *hns)
                }
 }
 
+static int
+hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
+{
+       uint16_t vld_buf_size;
+       uint16_t num_hw_specs;
+       uint16_t i;
+
+       /*
+        * hns3 network engine only support to set 4 typical specification, and
+        * different buffer size will affect the max packet_len and the max
+        * number of segmentation when hw gro is turned on in receive side. The
+        * relationship between them is as follows:
+        *      rx_buf_size     |  max_gro_pkt_len  |  max_gro_nb_seg
+        * ---------------------|-------------------|----------------
+        * HNS3_4K_BD_BUF_SIZE  |        60KB       |       15
+        * HNS3_2K_BD_BUF_SIZE  |        62KB       |       31
+        * HNS3_1K_BD_BUF_SIZE  |        63KB       |       63
+        * HNS3_512_BD_BUF_SIZE |      31.5KB       |       63
+        */
+       static const uint16_t hw_rx_buf_size[] = {
+               HNS3_4K_BD_BUF_SIZE,
+               HNS3_2K_BD_BUF_SIZE,
+               HNS3_1K_BD_BUF_SIZE,
+               HNS3_512_BD_BUF_SIZE
+       };
+
+       vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
+                       RTE_PKTMBUF_HEADROOM);
+
+       if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
+               return -EINVAL;
+
+       num_hw_specs = RTE_DIM(hw_rx_buf_size);
+       for (i = 0; i < num_hw_specs; i++) {
+               if (vld_buf_size >= hw_rx_buf_size[i]) {
+                       *rx_buf_len = hw_rx_buf_size[i];
+                       break;
+               }
+       }
+       return 0;
+}
+
 int
 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
                    unsigned int socket_id, const struct rte_eth_rxconf *conf,
@@ -1194,6 +1236,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        struct hns3_hw *hw = &hns->hw;
        struct hns3_queue_info q_info;
        struct hns3_rx_queue *rxq;
+       uint16_t rx_buf_size;
        int rx_entry_len;
 
        if (dev->data->dev_started) {
@@ -1218,6 +1261,15 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        q_info.nb_desc = nb_desc;
        q_info.type = "hns3 RX queue";
        q_info.ring_name = "rx_ring";
+
+       if (hns3_rx_buf_len_calc(mp, &rx_buf_size)) {
+               hns3_err(hw, "rxq mbufs' data room size:%u is not enough! "
+                               "minimal data room size:%u.",
+                               rte_pktmbuf_data_room_size(mp),
+                               HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
+               return -EINVAL;
+       }
+
        rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
        if (rxq == NULL) {
                hns3_err(hw,
@@ -1252,7 +1304,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        rxq->configured = true;
        rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
                                idx * HNS3_TQP_REG_SIZE);
-       rxq->rx_buf_len = hw->rx_buf_len;
+       rxq->rx_buf_len = rx_buf_size;
        rxq->l2_errors = 0;
        rxq->pkt_len_errors = 0;
        rxq->l3_csum_erros = 0;
index b85c64f..ccd508b 100644 (file)
 #define        HNS3_ALIGN_RING_DESC    32
 #define HNS3_RING_BASE_ALIGN   128
 
+#define HNS3_512_BD_BUF_SIZE   512
+#define HNS3_1K_BD_BUF_SIZE    1024
+#define HNS3_2K_BD_BUF_SIZE    2048
+#define HNS3_4K_BD_BUF_SIZE    4096
+
+#define HNS3_MIN_BD_BUF_SIZE   HNS3_512_BD_BUF_SIZE
+#define HNS3_MAX_BD_BUF_SIZE   HNS3_4K_BD_BUF_SIZE
+
 #define HNS3_BD_SIZE_512_TYPE                  0
 #define HNS3_BD_SIZE_1024_TYPE                 1
 #define HNS3_BD_SIZE_2048_TYPE                 2