Currently, Rx interrupt cannot work normally after reset (such as FLR,
global reset and IMP reset), when running l3fwd-power application based
on hns3 network engine.
The root cause is that the hardware configuration about Rx interrupt
does not recover after reset.
This patch fixes it with the following modification.
1. The internal static function named hns3(vf)_init_ring_with_vector is
moved from hns3_init_pf to hns3(vf)_init_hardware because
hns3(vf)_init_hardware is called both in the initialization and the
RESET_STAGE_DEV_INIT stage of the reset process.
2. The internal static function named hns3(vf)_restore_rx_interrupt is
added in hns3(vf)_restore_conf, it is used to recover hardware
configuration about interrupt vectors of rx queues in the
RESET_STAGE_DEV_INIT stage of the reset process.
3. The internal static function named hns3_dev_all_rx_queue_intr_enable
and hns3_enable_all_queues are added in hns3(vf)_dev_start(which
called in the initialization, so after calling the rte_eth_dev_start
API successfully, the driver is ready to work.
4. The function named hns3_dev_all_rx_queue_intr_enable and
hns3_enable_all_queues are also added in hns3(vf)_start_service(which
called in the RESET_STAGE_DEV_INIT stage of the reset process), so
after start_service, the driver is ready to work.
Note:
1. Because FLR will clear queue's interrupt enable bit hardware
configuration, so we add calling hns3_dev_all_rx_queue_intr_enable to
enable interrupt before enabling queues.
2. After finished the initialization, we can enable queues to work by
calling the internal function named hns3_enable_all_queues.
Fixes:
02a7b55657b2 ("net/hns3: support Rx interrupt")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
goto err_mac_init;
}
+
+ /*
+ * In the initialization clearing the all hardware mapping relationship
+ * configurations between queues and interrupt vectors is needed, so
+ * some error caused by the residual configurations, such as the
+ * unexpected interrupt, can be avoid.
+ */
+ ret = hns3_init_ring_with_vector(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
+ goto err_mac_init;
+ }
+
return 0;
err_mac_init:
goto err_fdir;
}
- /*
- * In the initialization clearing the all hardware mapping relationship
- * configurations between queues and interrupt vectors is needed, so
- * some error caused by the residual configurations, such as the
- * unexpected interrupt, can be avoid.
- */
- ret = hns3_init_ring_with_vector(hw);
- if (ret)
- goto err_fdir;
-
return 0;
err_fdir:
return ret;
}
+static int
+hns3_restore_rx_interrupt(struct hns3_hw *hw)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint16_t q_id;
+ int ret;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return 0;
+
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+ ret = hns3_bind_ring_with_vector(hw,
+ intr_handle->intr_vec[q_id], true,
+ HNS3_RING_TYPE_RX, q_id);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void
hns3_restore_filter(struct rte_eth_dev *dev)
{
rte_spinlock_unlock(&hw->lock);
return ret;
}
+ ret = hns3_map_rx_interrupt(dev);
+ if (ret) {
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
hw->adapter_state = HNS3_NIC_STARTED;
rte_spinlock_unlock(&hw->lock);
- ret = hns3_map_rx_interrupt(dev);
- if (ret)
- return ret;
hns3_set_rxtx_function(dev);
hns3_mp_req_start_rxtx(dev);
rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
hns3_restore_filter(dev);
+ /* Enable interrupt of all rx queues before enabling queues */
+ hns3_dev_all_rx_queue_intr_enable(hw, true);
+ /*
+ * When finished the initialization, enable queues to receive/transmit
+ * packets.
+ */
+ hns3_enable_all_queues(hw, true);
+
hns3_info(hw, "hns3 dev start successful!");
return 0;
}
rte_spinlock_lock(&hw->lock);
if (rte_atomic16_read(&hw->reset.resetting) == 0) {
hns3_do_stop(hns);
+ hns3_unmap_rx_interrupt(dev);
hns3_dev_release_mbufs(hns);
hw->adapter_state = HNS3_NIC_CONFIGURED;
}
rte_eal_alarm_cancel(hns3_service_handler, dev);
rte_spinlock_unlock(&hw->lock);
- hns3_unmap_rx_interrupt(dev);
}
static void
eth_dev = &rte_eth_devices[hw->data->port_id];
hns3_set_rxtx_function(eth_dev);
hns3_mp_req_start_rxtx(eth_dev);
- if (hw->adapter_state == HNS3_NIC_STARTED)
+ if (hw->adapter_state == HNS3_NIC_STARTED) {
hns3_service_handler(eth_dev);
+ /* Enable interrupt of all rx queues before enabling queues */
+ hns3_dev_all_rx_queue_intr_enable(hw, true);
+ /*
+ * When finished the initialization, enable queues to receive
+ * and transmit packets.
+ */
+ hns3_enable_all_queues(hw, true);
+ }
+
return 0;
}
if (ret)
goto err_promisc;
+ ret = hns3_restore_rx_interrupt(hw);
+ if (ret)
+ goto err_promisc;
+
if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
ret = hns3_do_start(hns, false);
if (ret)
goto err_init_hardware;
}
+ /*
+ * In the initialization clearing the all hardware mapping relationship
+ * configurations between queues and interrupt vectors is needed, so
+ * some error caused by the residual configurations, such as the
+ * unexpected interrupt, can be avoid.
+ */
+ ret = hns3vf_init_ring_with_vector(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
+ goto err_init_hardware;
+ }
+
ret = hns3vf_set_alive(hw, true);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
hns3_set_default_rss_args(hw);
- /*
- * In the initialization clearing the all hardware mapping relationship
- * configurations between queues and interrupt vectors is needed, so
- * some error caused by the residual configurations, such as the
- * unexpected interrupt, can be avoid.
- */
- ret = hns3vf_init_ring_with_vector(hw);
- if (ret)
- goto err_get_config;
-
return 0;
err_get_config:
rte_spinlock_lock(&hw->lock);
if (rte_atomic16_read(&hw->reset.resetting) == 0) {
hns3vf_do_stop(hns);
+ hns3vf_unmap_rx_interrupt(dev);
hns3_dev_release_mbufs(hns);
hw->adapter_state = HNS3_NIC_CONFIGURED;
}
rte_eal_alarm_cancel(hns3vf_service_handler, dev);
rte_spinlock_unlock(&hw->lock);
-
- hns3vf_unmap_rx_interrupt(dev);
}
static void
return ret;
}
+static int
+hns3vf_restore_rx_interrupt(struct hns3_hw *hw)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint16_t q_id;
+ int ret;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return 0;
+
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+ ret = hns3vf_bind_ring_with_vector(hw,
+ intr_handle->intr_vec[q_id], true,
+ HNS3_RING_TYPE_RX, q_id);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void
hns3vf_restore_filter(struct rte_eth_dev *dev)
{
rte_spinlock_unlock(&hw->lock);
return ret;
}
+ ret = hns3vf_map_rx_interrupt(dev);
+ if (ret) {
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
hw->adapter_state = HNS3_NIC_STARTED;
rte_spinlock_unlock(&hw->lock);
- ret = hns3vf_map_rx_interrupt(dev);
- if (ret)
- return ret;
hns3_set_rxtx_function(dev);
hns3_mp_req_start_rxtx(dev);
rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
hns3vf_restore_filter(dev);
+ /* Enable interrupt of all rx queues before enabling queues */
+ hns3_dev_all_rx_queue_intr_enable(hw, true);
+ /*
+ * When finished the initialization, enable queues to receive/transmit
+ * packets.
+ */
+ hns3_enable_all_queues(hw, true);
+
return ret;
}
eth_dev = &rte_eth_devices[hw->data->port_id];
hns3_set_rxtx_function(eth_dev);
hns3_mp_req_start_rxtx(eth_dev);
- if (hw->adapter_state == HNS3_NIC_STARTED)
+ if (hw->adapter_state == HNS3_NIC_STARTED) {
hns3vf_service_handler(eth_dev);
+ /* Enable interrupt of all rx queues before enabling queues */
+ hns3_dev_all_rx_queue_intr_enable(hw, true);
+ /*
+ * When finished the initialization, enable queues to receive
+ * and transmit packets.
+ */
+ hns3_enable_all_queues(hw, true);
+ }
+
return 0;
}
if (ret)
goto err_vlan_table;
+ ret = hns3vf_restore_rx_interrupt(hw);
+ if (ret)
+ goto err_vlan_table;
+
if (hw->adapter_state == HNS3_NIC_STARTED) {
ret = hns3vf_do_start(hns, false);
if (ret)
hw->reset.attempts = 0;
hw->reset.stats.success_cnt++;
hw->reset.stage = RESET_STAGE_NONE;
+ rte_spinlock_lock(&hw->lock);
hw->reset.ops->start_service(hns);
+ rte_spinlock_unlock(&hw->lock);
gettimeofday(&tv, NULL);
timersub(&tv, &hw->reset.start_time, &tv_delta);
hns3_warn(hw, "%s reset done fail_cnt:%" PRIx64
HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
}
-static void
+void
hns3_enable_all_queues(struct hns3_hw *hw, bool en)
{
uint16_t nb_rx_q = hw->data->nb_rx_queues;
hns3_write_dev(hw, addr, value);
}
+/*
+ * Enable all rx queue interrupt when in interrupt rx mode.
+ * This api was called before enable queue rx&tx (in normal start or reset
+ * recover scenes), used to fix hardware rx queue interrupt enable was clear
+ * when FLR.
+ */
+void
+hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+ uint16_t nb_rx_q = hw->data->nb_rx_queues;
+ int i;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return;
+
+ for (i = 0; i < nb_rx_q; i++)
+ hns3_queue_intr_enable(hw, i, en);
+}
+
int
hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
hns3_init_tx_ring_tc(hns);
}
+/*
+ * Start all queues.
+ * Note: just init and setup queues, and don't enable queue rx&tx.
+ */
int
hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
{
}
hns3_start_tx_queues(hns);
- hns3_enable_all_queues(hw, true);
return 0;
}
void hns3_dev_tx_queue_release(void *queue);
void hns3_free_all_queues(struct rte_eth_dev *dev);
int hns3_reset_all_queues(struct hns3_adapter *hns);
+void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
+void hns3_enable_all_queues(struct hns3_hw *hw, bool en);
int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue);
int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue);
void hns3_dev_release_mbufs(struct hns3_adapter *hns);