For the current hns3 PMD driver, there are some RSS related bugs at
the following scenes:
1. Start the application with the number of Rx queues equals 1(--rxq=1),
modify the number of Rx queue to some number greater than 1 during
normal operation. As a result, upper application can't receive packets
from multiple rx queues.
2. Start testpmd application with the option disable-rss and the number
of Rx queue is greater than 1(--disable-rss --rxq=N, N>1). As a result,
upper application still can receive packets from multiple rx queues.
The root cause as below:
There are some error configuration in the RSS indirection table of hns3
network engine.
This patch fixes them with the following modification.
1. When RSS size is changed, we need to update RSS redirection table
maintained by driver and configure them to hardware. Besides, during
the entire reset process, we need to ensure that the RSS table
information are not overwritten and configured directly to the hardware
in the RESET_STAGE_RESTORE stage of the reset process.
2. When sarting testpmd application with the options disable-rss, it
doesn't need to configure RSS redirection table to hardware.
Fixes:
c37ca66f2b27 ("net/hns3: support RSS")
Cc: stable@dpdk.org
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
void
hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
{
+ struct hns3_rss_conf *rss_cfg = &hw->rss_info;
uint16_t rx_qnum_per_tc;
+ int i;
rx_qnum_per_tc = nb_rx_q / hw->num_tc;
rx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc);
hw->alloc_rss_size = rx_qnum_per_tc;
}
hw->used_rx_queues = hw->num_tc * hw->alloc_rss_size;
+
+ /*
+ * When rss size is changed, we need to update rss redirection table
+ * maintained by driver. Besides, during the entire reset process, we
+ * need to ensure that the rss table information are not overwritten
+ * and configured directly to the hardware in the RESET_STAGE_RESTORE
+ * stage of the reset process.
+ */
+ if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] =
+ i % hw->alloc_rss_size;
+ }
}
void
return ret;
}
+static void
+hns3_restore_filter(struct rte_eth_dev *dev)
+{
+ hns3_restore_rss_filter(dev);
+}
+
static int
hns3_dev_start(struct rte_eth_dev *dev)
{
hns3_mp_req_start_rxtx(dev);
rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
+ hns3_restore_filter(dev);
+
hns3_info(hw, "hns3 dev start successful!");
return 0;
}
return ret;
}
+static void
+hns3vf_restore_filter(struct rte_eth_dev *dev)
+{
+ hns3_restore_rss_filter(dev);
+}
+
static int
hns3vf_dev_start(struct rte_eth_dev *dev)
{
hns3_mp_req_start_rxtx(dev);
rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
+ hns3vf_restore_filter(dev);
+
return ret;
}
return hns3_config_rss_filter(dev, &hw->rss_info, false);
}
+/* Restore the rss filter */
+int
+hns3_restore_rss_filter(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+
+ if (hw->rss_info.conf.queue_num == 0)
+ return 0;
+
+ return hns3_config_rss_filter(dev, &hw->rss_info, true);
+}
+
static int
hns3_flow_parse_rss(struct rte_eth_dev *dev,
const struct hns3_rss_conf *conf, bool add)
req->rss_set_bitmap = rte_cpu_to_le_16(HNS3_RSS_SET_BITMAP_MSK);
for (j = 0; j < HNS3_RSS_CFG_TBL_SIZE; j++) {
num = i * HNS3_RSS_CFG_TBL_SIZE + j;
- req->rss_result[j] = indir[num] % hw->alloc_rss_size;
+ req->rss_result[j] = indir[num];
}
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
shift = i % RTE_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] =
- rss_cfg->rss_indirection_tbl[i] % hw->alloc_rss_size;
+ rss_cfg->rss_indirection_tbl[i];
}
rte_spinlock_unlock(&hw->lock);
return 0;
enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
- /* When there is no open RSS, redirect the packet queue 0 */
+ /* When RSS is off, redirect the packet queue 0 */
if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0)
hns3_rss_uninit(hns);
if (ret)
return ret;
- ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
- HNS3_RSS_IND_TBL_SIZE);
- if (ret)
- goto rss_tuple_uninit;
+ /*
+ * When RSS is off, it doesn't need to configure rss redirection table
+ * to hardware.
+ */
+ if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+ ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
+ HNS3_RSS_IND_TBL_SIZE);
+ if (ret)
+ goto rss_tuple_uninit;
+ }
ret = hns3_set_rss_tc_mode(hw);
if (ret)
return ret;
rss_indir_table_uninit:
- ret1 = hns3_rss_reset_indir_table(hw);
- if (ret1 != 0)
- return ret;
+ if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+ ret1 = hns3_rss_reset_indir_table(hw);
+ if (ret1 != 0)
+ return ret;
+ }
rss_tuple_uninit:
hns3_rss_tuple_uninit(hw);
uint64_t rss_hf);
int hns3_set_rss_algo_key(struct hns3_hw *hw, uint8_t hash_algo,
const uint8_t *key);
+int hns3_restore_rss_filter(struct rte_eth_dev *dev);
+
#endif /* _HNS3_RSS_H_ */