This patch adds reset related process for hns3 PMD driver.
The following three scenarios will trigger the reset process,
and the driver settings will be restored after the reset is
successful:
1. Receive a reset interrupt
2. PF receives a hardware error interrupt
3. VF is notified by PF to reset
Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
#include "hns3_ethdev.h"
#include "hns3_regs.h"
+#include "hns3_intr.h"
#include "hns3_logs.h"
#define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
if (!is_valid_csq_clean_head(csq, head)) {
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ uint32_t global;
+ uint32_t fun_rst;
hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
csq->next_to_use, csq->next_to_clean);
rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ if (hns->is_vf) {
+ global = hns3_read_dev(hw, HNS3_VF_RST_ING);
+ fun_rst = hns3_read_dev(hw, HNS3_FUN_RST_ING);
+ hns3_err(hw, "Delayed VF reset global: %x fun_rst: %x",
+ global, fun_rst);
+ hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
+ } else {
+ global = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
+ fun_rst = hns3_read_dev(hw, HNS3_FUN_RST_ING);
+ hns3_err(hw, "Delayed IMP reset global: %x fun_rst: %x",
+ global, fun_rst);
+ hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
+ }
+
+ hns3_schedule_delayed_reset(hns);
+
return -EIO;
}
static int hns3_cmd_poll_reply(struct hns3_hw *hw)
{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
uint32_t timeout = 0;
do {
return -EBUSY;
}
+ if (is_reset_pending(hns)) {
+ hns3_err(hw, "Don't wait for reply because of reset pending");
+ return -EIO;
+ }
+
rte_delay_us(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
rte_spinlock_unlock(&hw->cmq.crq.lock);
rte_spinlock_unlock(&hw->cmq.csq.lock);
+ /*
+ * Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
+ PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
+ ret = -EBUSY;
+ goto err_cmd_init;
+ }
rte_atomic16_clear(&hw->reset.disable_cmd);
ret = hns3_cmd_query_firmware_version(hw, &hw->fw_version);
#include <stdint.h>
#include <inttypes.h>
#include <unistd.h>
+#include <rte_atomic.h>
#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_cycles.h>
#define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \
| HNS3_FILTER_FE_ROCE_INGRESS_B)
+/* Reset related Registers */
+#define HNS3_GLOBAL_RESET_BIT 0
+#define HNS3_CORE_RESET_BIT 1
+#define HNS3_IMP_RESET_BIT 2
+#define HNS3_FUN_RST_ING_B 0
+
+#define HNS3_VECTOR0_IMP_RESET_INT_B 1
+
+#define HNS3_RESET_WAIT_MS 100
+#define HNS3_RESET_WAIT_CNT 200
+
int hns3_logtype_init;
int hns3_logtype_driver;
HNS3_VECTOR0_EVENT_OTHER,
};
+static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
+ uint64_t *levels);
static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
int on);
* from H/W just for the mailbox.
*/
if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
+ if (clearval) {
+ hw->reset.stats.imp_cnt++;
+ hns3_warn(hw, "IMP reset detected, clear reset status");
+ } else {
+ hns3_schedule_delayed_reset(hns);
+ hns3_warn(hw, "IMP reset detected, don't clear reset status");
+ }
+
ret = HNS3_VECTOR0_EVENT_RST;
goto out;
}
/* Global reset */
if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
+ if (clearval) {
+ hw->reset.stats.global_cnt++;
+ hns3_warn(hw, "Global reset detected, clear reset status");
+ } else {
+ hns3_schedule_delayed_reset(hns);
+ hns3_warn(hw, "Global reset detected, don't clear reset status");
+ }
+
ret = HNS3_VECTOR0_EVENT_RST;
goto out;
}
event_cause = hns3_check_event_cause(hns, &clearval);
+ /* vector 0 interrupt is shared with reset and mailbox source events. */
+ if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
+ hns3_handle_msix_error(hns, &hw->reset.request);
+ hns3_schedule_reset(hns);
+ } else if (event_cause == HNS3_VECTOR0_EVENT_RST)
+ hns3_schedule_reset(hns);
+ else
+ hns3_err(hw, "Received unknown event");
+
hns3_clear_event_cause(hw, event_cause, clearval);
/* Enable interrupt if it is not cause by reset */
hns3_pf_enable_irq0(hw);
LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
}
+static int
+hns3_restore_vlan_table(struct hns3_adapter *hns)
+{
+ struct hns3_user_vlan_table *vlan_entry;
+ struct hns3_pf *pf = &hns->pf;
+ uint16_t vlan_id;
+ int ret = 0;
+
+ if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) {
+ ret = hns3_vlan_pvid_configure(hns, pf->port_base_vlan_cfg.pvid,
+ 1);
+ return ret;
+ }
+
+ LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
+ if (vlan_entry->hd_tbl_status) {
+ vlan_id = vlan_entry->vlan_id;
+ ret = hns3_set_port_vlan_filter(hns, vlan_id, 1);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
static int
hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
{
return hns3_default_vlan_config(hns);
}
+static int
+hns3_restore_vlan_conf(struct hns3_adapter *hns)
+{
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg);
+ if (ret) {
+ hns3_err(hw, "hns3 restore vlan rx conf fail, ret =%d", ret);
+ return ret;
+ }
+
+ ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg);
+ if (ret)
+ hns3_err(hw, "hns3 restore vlan tx conf fail, ret =%d", ret);
+
+ return ret;
+}
+
static int
hns3_dev_configure_vlan(struct rte_eth_dev *dev)
{
return ret;
}
+static int
+hns3_dev_promisc_restore(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ bool en_mc_pmc;
+ bool en_uc_pmc;
+
+ en_uc_pmc = (hw->data->promiscuous == 1) ? true : false;
+ en_mc_pmc = (hw->data->all_multicast == 1) ? true : false;
+
+ return hns3_set_promisc_mode(hw, en_uc_pmc, en_mc_pmc);
+}
+
static int
hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed)
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- hns3_update_speed_duplex(eth_dev);
- hns3_update_link_status(hw);
+ if (!hns3_is_reset_pending(hns)) {
+ hns3_update_speed_duplex(eth_dev);
+ hns3_update_link_status(hw);
+ } else
+ hns3_warn(hw, "Cancel the query when reset is pending");
rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
}
int ret;
PMD_INIT_FUNC_TRACE();
-
+ if (rte_atomic16_read(&hw->reset.resetting))
+ return -EBUSY;
rte_spinlock_lock(&hw->lock);
hw->adapter_state = HNS3_NIC_STARTING;
return ret;
hw->mac.link_status = ETH_LINK_DOWN;
- hns3_configure_all_mac_addr(hns, true);
- reset_queue = true;
+ if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
+ hns3_configure_all_mac_addr(hns, true);
+ reset_queue = true;
+ } else
+ reset_queue = false;
hw->mac.default_addr_setted = false;
return hns3_stop_queues(hns, reset_queue);
}
hns3_set_rxtx_function(eth_dev);
rte_spinlock_lock(&hw->lock);
-
- hns3_do_stop(hns);
- hns3_dev_release_mbufs(hns);
- hw->adapter_state = HNS3_NIC_CONFIGURED;
+ if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ hns3_do_stop(hns);
+ hns3_dev_release_mbufs(hns);
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ }
rte_spinlock_unlock(&hw->lock);
}
hns3_dev_stop(eth_dev);
hw->adapter_state = HNS3_NIC_CLOSING;
+ hns3_reset_abort(hns);
+ hw->adapter_state = HNS3_NIC_CLOSED;
rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
hns3_configure_all_mc_mac_addr(hns, true);
hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
hns3_uninit_pf(eth_dev);
hns3_free_all_queues(eth_dev);
+ rte_free(hw->reset.wait_data);
rte_free(eth_dev->process_private);
eth_dev->process_private = NULL;
- hw->adapter_state = HNS3_NIC_CLOSED;
hns3_warn(hw, "Close port %d finished", hw->data->port_id);
}
return 0;
}
+static int
+hns3_reinit_dev(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_cmd_init(hw);
+ if (ret) {
+ hns3_err(hw, "Failed to init cmd: %d", ret);
+ return ret;
+ }
+
+ ret = hns3_reset_all_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to reset all queues: %d", ret);
+ goto err_init;
+ }
+
+ ret = hns3_init_hardware(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to init hardware: %d", ret);
+ goto err_init;
+ }
+
+ ret = hns3_enable_hw_error_intr(hns, true);
+ if (ret) {
+ hns3_err(hw, "fail to enable hw error interrupts: %d",
+ ret);
+ goto err_mac_init;
+ }
+ hns3_info(hw, "Reset done, driver initialization finished.");
+
+ return 0;
+
+err_mac_init:
+ hns3_uninit_umv_space(hw);
+err_init:
+ hns3_cmd_uninit(hw);
+
+ return ret;
+}
+
+static bool
+is_pf_reset_done(struct hns3_hw *hw)
+{
+ uint32_t val, reg, reg_bit;
+
+ switch (hw->reset.level) {
+ case HNS3_IMP_RESET:
+ reg = HNS3_GLOBAL_RESET_REG;
+ reg_bit = HNS3_IMP_RESET_BIT;
+ break;
+ case HNS3_GLOBAL_RESET:
+ reg = HNS3_GLOBAL_RESET_REG;
+ reg_bit = HNS3_GLOBAL_RESET_BIT;
+ break;
+ case HNS3_FUNC_RESET:
+ reg = HNS3_FUN_RST_ING;
+ reg_bit = HNS3_FUN_RST_ING_B;
+ break;
+ case HNS3_FLR_RESET:
+ default:
+ hns3_err(hw, "Wait for unsupported reset level: %d",
+ hw->reset.level);
+ return true;
+ }
+ val = hns3_read_dev(hw, reg);
+ if (hns3_get_bit(val, reg_bit))
+ return false;
+ else
+ return true;
+}
+
+bool
+hns3_is_reset_pending(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_reset_level reset;
+
+ hns3_check_event_cause(hns, NULL);
+ reset = hns3_get_reset_level(hns, &hw->reset.pending);
+ if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
+ hns3_warn(hw, "High level reset %d is pending", reset);
+ return true;
+ }
+ reset = hns3_get_reset_level(hns, &hw->reset.request);
+ if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
+ hns3_warn(hw, "High level reset %d is request", reset);
+ return true;
+ }
+ return false;
+}
+
+static int
+hns3_wait_hardware_ready(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_wait_data *wait_data = hw->reset.wait_data;
+ struct timeval tv;
+
+ if (wait_data->result == HNS3_WAIT_SUCCESS)
+ return 0;
+ else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
+ gettimeofday(&tv, NULL);
+ hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ return -ETIME;
+ } else if (wait_data->result == HNS3_WAIT_REQUEST)
+ return -EAGAIN;
+
+ wait_data->hns = hns;
+ wait_data->check_completion = is_pf_reset_done;
+ wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT *
+ HNS3_RESET_WAIT_MS + get_timeofday_ms();
+ wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC;
+ wait_data->count = HNS3_RESET_WAIT_CNT;
+ wait_data->result = HNS3_WAIT_REQUEST;
+ rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
+ return -EAGAIN;
+}
+
+static int
+hns3_func_reset_cmd(struct hns3_hw *hw, int func_id)
+{
+ struct hns3_cmd_desc desc;
+ struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
+ hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1);
+ req->fun_reset_vfid = func_id;
+
+ return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
+hns3_imp_reset_cmd(struct hns3_hw *hw)
+{
+ struct hns3_cmd_desc desc;
+
+ hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
+ desc.data[0] = 0xeedd;
+
+ return hns3_cmd_send(hw, &desc, 1);
+}
+
+static void
+hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct timeval tv;
+ uint32_t val;
+
+ gettimeofday(&tv, NULL);
+ if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) ||
+ hns3_read_dev(hw, HNS3_FUN_RST_ING)) {
+ hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ return;
+ }
+
+ switch (reset_level) {
+ case HNS3_IMP_RESET:
+ hns3_imp_reset_cmd(hw);
+ hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ break;
+ case HNS3_GLOBAL_RESET:
+ val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
+ hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1);
+ hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val);
+ hns3_warn(hw, "Global Reset requested time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ break;
+ case HNS3_FUNC_RESET:
+ hns3_warn(hw, "PF Reset requested time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ /* schedule again to check later */
+ hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending);
+ hns3_schedule_reset(hns);
+ break;
+ default:
+ hns3_warn(hw, "Unsupported reset level: %d", reset_level);
+ return;
+ }
+ hns3_atomic_clear_bit(reset_level, &hw->reset.request);
+}
+
+static enum hns3_reset_level
+hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+{
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_reset_level reset_level = HNS3_NONE_RESET;
+
+ /* Return the highest priority reset level amongst all */
+ if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
+ reset_level = HNS3_IMP_RESET;
+ else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
+ reset_level = HNS3_GLOBAL_RESET;
+ else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
+ reset_level = HNS3_FUNC_RESET;
+ else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
+ reset_level = HNS3_FLR_RESET;
+
+ if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
+ return HNS3_NONE_RESET;
+
+ return reset_level;
+}
+
+static int
+hns3_prepare_reset(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t reg_val;
+ int ret;
+
+ switch (hw->reset.level) {
+ case HNS3_FUNC_RESET:
+ ret = hns3_func_reset_cmd(hw, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * After performaning pf reset, it is not necessary to do the
+ * mailbox handling or send any command to firmware, because
+ * any mailbox handling or command to firmware is only valid
+ * after hns3_cmd_init is called.
+ */
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ hw->reset.stats.request_cnt++;
+ break;
+ case HNS3_IMP_RESET:
+ reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+ hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
+ BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int
+hns3_set_rst_done(struct hns3_hw *hw)
+{
+ struct hns3_pf_rst_done_cmd *req;
+ struct hns3_cmd_desc desc;
+
+ req = (struct hns3_pf_rst_done_cmd *)desc.data;
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
+ req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
+ return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
+hns3_stop_service(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = &rte_eth_devices[hw->data->port_id];
+ rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
+ hw->mac.link_status = ETH_LINK_DOWN;
+
+ hns3_set_rxtx_function(eth_dev);
+
+ rte_spinlock_lock(&hw->lock);
+ if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
+ hw->adapter_state == HNS3_NIC_STOPPING) {
+ hns3_do_stop(hns);
+ hw->reset.mbuf_deferred_free = true;
+ } else
+ hw->reset.mbuf_deferred_free = false;
+
+ /*
+ * It is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
+ */
+ if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
+ hns3_configure_all_mc_mac_addr(hns, true);
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_start_service(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_dev *eth_dev;
+
+ if (hw->reset.level == HNS3_IMP_RESET ||
+ hw->reset.level == HNS3_GLOBAL_RESET)
+ hns3_set_rst_done(hw);
+ eth_dev = &rte_eth_devices[hw->data->port_id];
+ hns3_set_rxtx_function(eth_dev);
+ hns3_service_handler(eth_dev);
+ return 0;
+}
+
+static int
+hns3_restore_conf(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_configure_all_mac_addr(hns, false);
+ if (ret)
+ return ret;
+
+ ret = hns3_configure_all_mc_mac_addr(hns, false);
+ if (ret)
+ goto err_mc_mac;
+
+ ret = hns3_dev_promisc_restore(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_vlan_table(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_vlan_conf(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_all_fdir_filter(hns);
+ if (ret)
+ goto err_promisc;
+
+ if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
+ ret = hns3_do_start(hns, false);
+ if (ret)
+ goto err_promisc;
+ hns3_info(hw, "hns3 dev restart successful!");
+ } else if (hw->adapter_state == HNS3_NIC_STOPPING)
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ return 0;
+
+err_promisc:
+ hns3_configure_all_mc_mac_addr(hns, true);
+err_mc_mac:
+ hns3_configure_all_mac_addr(hns, true);
+ return ret;
+}
+
+static void
+hns3_reset_service(void *param)
+{
+ struct hns3_adapter *hns = (struct hns3_adapter *)param;
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_reset_level reset_level;
+ struct timeval tv_delta;
+ struct timeval tv_start;
+ struct timeval tv;
+ uint64_t msec;
+ int ret;
+
+ /*
+ * The interrupt is not triggered within the delay time.
+ * The interrupt may have been lost. It is necessary to handle
+ * the interrupt to recover from the error.
+ */
+ if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
+ rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+ hns3_err(hw, "Handling interrupts in delayed tasks");
+ hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
+ }
+ rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
+
+ /*
+ * Check if there is any ongoing reset in the hardware. This status can
+ * be checked from reset_pending. If there is then, we need to wait for
+ * hardware to complete reset.
+ * a. If we are able to figure out in reasonable time that hardware
+ * has fully resetted then, we can proceed with driver, client
+ * reset.
+ * b. else, we can come back later to check this status so re-sched
+ * now.
+ */
+ reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+ if (reset_level != HNS3_NONE_RESET) {
+ gettimeofday(&tv_start, NULL);
+ ret = hns3_reset_process(hns, reset_level);
+ gettimeofday(&tv, NULL);
+ timersub(&tv, &tv_start, &tv_delta);
+ msec = tv_delta.tv_sec * MSEC_PER_SEC +
+ tv_delta.tv_usec / USEC_PER_MSEC;
+ if (msec > HNS3_RESET_PROCESS_MS)
+ hns3_err(hw, "%d handle long time delta %" PRIx64
+ " ms time=%ld.%.6ld",
+ hw->reset.level, msec,
+ tv.tv_sec, tv.tv_usec);
+ if (ret == -EAGAIN)
+ return;
+ }
+
+ /* Check if we got any *new* reset requests to be honored */
+ reset_level = hns3_get_reset_level(hns, &hw->reset.request);
+ if (reset_level != HNS3_NONE_RESET)
+ hns3_msix_process(hns, reset_level);
+}
+
static const struct eth_dev_ops hns3_eth_dev_ops = {
.dev_start = hns3_dev_start,
.dev_stop = hns3_dev_stop,
.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
};
+static const struct hns3_reset_ops hns3_reset_ops = {
+ .reset_service = hns3_reset_service,
+ .stop_service = hns3_stop_service,
+ .prepare_reset = hns3_prepare_reset,
+ .wait_hardware_ready = hns3_wait_hardware_ready,
+ .reinit_dev = hns3_reinit_dev,
+ .restore_conf = hns3_restore_conf,
+ .start_service = hns3_start_service,
+};
+
static int
hns3_dev_init(struct rte_eth_dev *eth_dev)
{
*/
hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
+ ret = hns3_reset_init(hw);
+ if (ret)
+ goto err_init_reset;
+ hw->reset.ops = &hns3_reset_ops;
+
ret = hns3_init_pf(eth_dev);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
*/
eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+ if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
+ hns3_err(hw, "Reschedule reset service after dev_init");
+ hns3_schedule_reset(hns);
+ } else {
+ /* IMP will wait ready flag before reset */
+ hns3_notify_reset_ready(hw, false);
+ }
+
rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
hns3_info(hw, "hns3 dev initialization successful!");
return 0;
hns3_uninit_pf(eth_dev);
err_init_pf:
+ rte_free(hw->reset.wait_data);
+err_init_reset:
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
int hns3_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
+bool hns3_is_reset_pending(struct hns3_adapter *hns);
+bool hns3vf_is_reset_pending(struct hns3_adapter *hns);
+
+static inline bool
+is_reset_pending(struct hns3_adapter *hns)
+{
+ bool ret;
+ if (hns->is_vf)
+ ret = hns3vf_is_reset_pending(hns);
+ else
+ ret = hns3_is_reset_pending(hns);
+ return ret;
+}
#endif /* _HNS3_ETHDEV_H_ */
#define HNS3VF_RESET_WAIT_MS 20
#define HNS3VF_RESET_WAIT_CNT 2000
+/* Reset related Registers */
+#define HNS3_GLOBAL_RESET_BIT 0
+#define HNS3_CORE_RESET_BIT 1
+#define HNS3_IMP_RESET_BIT 2
+#define HNS3_FUN_RST_ING_B 0
+
enum hns3vf_evt_cause {
HNS3VF_VECTOR0_EVENT_RST,
HNS3VF_VECTOR0_EVENT_MBX,
HNS3VF_VECTOR0_EVENT_OTHER,
};
+static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
+ uint64_t *levels);
static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
return -EBUSY;
}
+ if (rte_atomic16_read(&hw->reset.resetting)) {
+ hns3_err(hw, "Failed to set mtu during resetting");
+ return -EIO;
+ }
+
rte_spinlock_lock(&hw->lock);
ret = hns3vf_config_mtu(hw, mtu);
if (ret) {
struct hns3_hw *hw = &hns->hw;
enum hns3vf_evt_cause ret;
uint32_t cmdq_stat_reg;
+ uint32_t rst_ing_reg;
uint32_t val;
/* Fetch the events from their corresponding regs */
cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
+ if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
+ rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
+ hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
+ hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ val = hns3_read_dev(hw, HNS3_VF_RST_ING);
+ hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
+ val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
+ if (clearval) {
+ hw->reset.stats.global_cnt++;
+ hns3_warn(hw, "Global reset detected, clear reset status");
+ } else {
+ hns3_schedule_delayed_reset(hns);
+ hns3_warn(hw, "Global reset detected, don't clear reset status");
+ }
+
+ ret = HNS3VF_VECTOR0_EVENT_RST;
+ goto out;
+ }
+
/* Check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
event_cause = hns3vf_check_event_cause(hns, &clearval);
switch (event_cause) {
+ case HNS3VF_VECTOR0_EVENT_RST:
+ hns3_schedule_reset(hns);
+ break;
case HNS3VF_VECTOR0_EVENT_MBX:
hns3_dev_handle_mbx_msg(hw);
break;
struct hns3_hw *hw = &hns->hw;
int ret;
+ if (rte_atomic16_read(&hw->reset.resetting)) {
+ hns3_err(hw,
+ "vf set vlan id failed during resetting, vlan_id =%u",
+ vlan_id);
+ return -EIO;
+ }
rte_spinlock_lock(&hw->lock);
ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
rte_spinlock_unlock(&hw->lock);
return 0;
}
+static int
+hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
+{
+ struct rte_vlan_filter_conf *vfc;
+ struct hns3_hw *hw = &hns->hw;
+ uint16_t vlan_id;
+ uint64_t vbit;
+ uint64_t ids;
+ int ret = 0;
+ uint32_t i;
+
+ vfc = &hw->data->vlan_filter_conf;
+ for (i = 0; i < RTE_DIM(vfc->ids); i++) {
+ if (vfc->ids[i] == 0)
+ continue;
+ ids = vfc->ids[i];
+ while (ids) {
+ /*
+ * 64 means the num bits of ids, one bit corresponds to
+ * one vlan id
+ */
+ vlan_id = 64 * i;
+ /* count trailing zeroes */
+ vbit = ~ids & (ids - 1);
+ /* clear least significant bit set */
+ ids ^= (ids ^ (ids - 1)) ^ vbit;
+ for (; vbit;) {
+ vbit >>= 1;
+ vlan_id++;
+ }
+ ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
+ if (ret) {
+ hns3_err(hw,
+ "VF handle vlan table failed, ret =%d, on = %d",
+ ret, on);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int
+hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
+{
+ return hns3vf_handle_all_vlan_table(hns, 0);
+}
+
+static int
+hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_conf *dev_conf;
+ bool en;
+ int ret;
+
+ dev_conf = &hw->data->dev_conf;
+ en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+ : false;
+ ret = hns3vf_en_hw_strip_rxvtag(hw, en);
+ if (ret)
+ hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
+ ret);
+ return ret;
+}
+
static int
hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- hns3vf_request_link_info(hw);
+ /*
+ * The query link status and reset processing are executed in the
+ * interrupt thread.When the IMP reset occurs, IMP will not respond,
+ * and the query operation will time out after 30ms. In the case of
+ * multiple PF/VFs, each query failure timeout causes the IMP reset
+ * interrupt to fail to respond within 100ms.
+ * Before querying the link status, check whether there is a reset
+ * pending, and if so, abandon the query.
+ */
+ if (!hns3vf_is_reset_pending(hns))
+ hns3vf_request_link_info(hw);
+ else
+ hns3_warn(hw, "Cancel the query when reset is pending");
rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
eth_dev);
hns3vf_do_stop(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
+ bool reset_queue;
hw->mac.link_status = ETH_LINK_DOWN;
- hns3vf_configure_mac_addr(hns, true);
-
- return 0;
+ if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
+ hns3vf_configure_mac_addr(hns, true);
+ reset_queue = true;
+ } else
+ reset_queue = false;
+ return hns3_stop_queues(hns, reset_queue);
}
static void
hns3_set_rxtx_function(eth_dev);
rte_spinlock_lock(&hw->lock);
- hns3vf_do_stop(hns);
- hns3_dev_release_mbufs(hns);
- hw->adapter_state = HNS3_NIC_CONFIGURED;
+ if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ hns3vf_do_stop(hns);
+ hns3_dev_release_mbufs(hns);
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ }
rte_spinlock_unlock(&hw->lock);
}
hns3vf_dev_stop(eth_dev);
hw->adapter_state = HNS3_NIC_CLOSING;
+ hns3_reset_abort(hns);
+ hw->adapter_state = HNS3_NIC_CLOSED;
rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
hns3vf_configure_all_mc_mac_addr(hns, true);
+ hns3vf_remove_all_vlan_table(hns);
hns3vf_uninit_vf(eth_dev);
hns3_free_all_queues(eth_dev);
+ rte_free(hw->reset.wait_data);
rte_free(eth_dev->process_private);
eth_dev->process_private = NULL;
- hw->adapter_state = HNS3_NIC_CLOSED;
hns3_warn(hw, "Close port %d finished", hw->data->port_id);
}
int ret;
PMD_INIT_FUNC_TRACE();
+ if (rte_atomic16_read(&hw->reset.resetting))
+ return -EBUSY;
rte_spinlock_lock(&hw->lock);
hw->adapter_state = HNS3_NIC_STARTING;
ret = hns3vf_do_start(hns, true);
return 0;
}
+static bool
+is_vf_reset_done(struct hns3_hw *hw)
+{
+#define HNS3_FUN_RST_ING_BITS \
+ (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
+ BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
+ BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
+ BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
+
+ uint32_t val;
+
+ if (hw->reset.level == HNS3_VF_RESET) {
+ val = hns3_read_dev(hw, HNS3_VF_RST_ING);
+ if (val & HNS3_VF_RST_ING_BIT)
+ return false;
+ } else {
+ val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
+ if (val & HNS3_FUN_RST_ING_BITS)
+ return false;
+ }
+ return true;
+}
+
+bool
+hns3vf_is_reset_pending(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_reset_level reset;
+
+ hns3vf_check_event_cause(hns, NULL);
+ reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
+ if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
+ hns3_warn(hw, "High level reset %d is pending", reset);
+ return true;
+ }
+ return false;
+}
+
+static int
+hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_wait_data *wait_data = hw->reset.wait_data;
+ struct timeval tv;
+
+ if (wait_data->result == HNS3_WAIT_SUCCESS)
+ return 0;
+ else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
+ gettimeofday(&tv, NULL);
+ hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ return -ETIME;
+ } else if (wait_data->result == HNS3_WAIT_REQUEST)
+ return -EAGAIN;
+
+ wait_data->hns = hns;
+ wait_data->check_completion = is_vf_reset_done;
+ wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
+ HNS3VF_RESET_WAIT_MS + get_timeofday_ms();
+ wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
+ wait_data->count = HNS3VF_RESET_WAIT_CNT;
+ wait_data->result = HNS3_WAIT_REQUEST;
+ rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
+ return -EAGAIN;
+}
+
+static int
+hns3vf_prepare_reset(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret = 0;
+
+ if (hw->reset.level == HNS3_VF_FUNC_RESET) {
+ ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
+ 0, true, NULL, 0);
+ }
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+
+ return ret;
+}
+
+static int
+hns3vf_stop_service(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = &rte_eth_devices[hw->data->port_id];
+ rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
+ hw->mac.link_status = ETH_LINK_DOWN;
+
+ hns3_set_rxtx_function(eth_dev);
+
+ rte_spinlock_lock(&hw->lock);
+ if (hw->adapter_state == HNS3_NIC_STARTED ||
+ hw->adapter_state == HNS3_NIC_STOPPING) {
+ hns3vf_do_stop(hns);
+ hw->reset.mbuf_deferred_free = true;
+ } else
+ hw->reset.mbuf_deferred_free = false;
+
+ /*
+ * It is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries.
+ */
+ if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
+ hns3vf_configure_all_mc_mac_addr(hns, true);
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3vf_start_service(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = &rte_eth_devices[hw->data->port_id];
+ hns3_set_rxtx_function(eth_dev);
+
+ hns3vf_service_handler(eth_dev);
+ return 0;
+}
+
+static int
+hns3vf_restore_conf(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3vf_configure_mac_addr(hns, false);
+ if (ret)
+ return ret;
+
+ ret = hns3vf_configure_all_mc_mac_addr(hns, false);
+ if (ret)
+ goto err_mc_mac;
+
+ ret = hns3vf_restore_vlan_conf(hns);
+ if (ret)
+ goto err_vlan_table;
+
+ if (hw->adapter_state == HNS3_NIC_STARTED) {
+ ret = hns3vf_do_start(hns, false);
+ if (ret)
+ goto err_vlan_table;
+ hns3_info(hw, "hns3vf dev restart successful!");
+ } else if (hw->adapter_state == HNS3_NIC_STOPPING)
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ return 0;
+
+err_vlan_table:
+ hns3vf_configure_all_mc_mac_addr(hns, true);
+err_mc_mac:
+ hns3vf_configure_mac_addr(hns, true);
+ return ret;
+}
+
+static enum hns3_reset_level
+hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+{
+ enum hns3_reset_level reset_level;
+
+ /* return the highest priority reset level amongst all */
+ if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
+ reset_level = HNS3_VF_RESET;
+ else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
+ reset_level = HNS3_VF_FULL_RESET;
+ else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
+ reset_level = HNS3_VF_PF_FUNC_RESET;
+ else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
+ reset_level = HNS3_VF_FUNC_RESET;
+ else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
+ reset_level = HNS3_FLR_RESET;
+ else
+ reset_level = HNS3_NONE_RESET;
+
+ if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
+ return HNS3_NONE_RESET;
+
+ return reset_level;
+}
+
+static void
+hns3vf_reset_service(void *param)
+{
+ struct hns3_adapter *hns = (struct hns3_adapter *)param;
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_reset_level reset_level;
+ struct timeval tv_delta;
+ struct timeval tv_start;
+ struct timeval tv;
+ uint64_t msec;
+
+ /*
+ * The interrupt is not triggered within the delay time.
+ * The interrupt may have been lost. It is necessary to handle
+ * the interrupt to recover from the error.
+ */
+ if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
+ rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+ hns3_err(hw, "Handling interrupts in delayed tasks");
+ hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
+ }
+ rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
+
+ /*
+ * Hardware reset has been notified, we now have to poll & check if
+ * hardware has actually completed the reset sequence.
+ */
+ reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
+ if (reset_level != HNS3_NONE_RESET) {
+ gettimeofday(&tv_start, NULL);
+ hns3_reset_process(hns, reset_level);
+ gettimeofday(&tv, NULL);
+ timersub(&tv, &tv_start, &tv_delta);
+ msec = tv_delta.tv_sec * MSEC_PER_SEC +
+ tv_delta.tv_usec / USEC_PER_MSEC;
+ if (msec > HNS3_RESET_PROCESS_MS)
+ hns3_err(hw, "%d handle long time delta %" PRIx64
+ " ms time=%ld.%.6ld",
+ hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
+ }
+}
+
+static int
+hns3vf_reinit_dev(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ /* Firmware command initialize */
+ ret = hns3_cmd_init(hw);
+ if (ret) {
+ hns3_err(hw, "Failed to init cmd: %d", ret);
+ return ret;
+ }
+
+ ret = hns3_reset_all_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to reset all queues: %d", ret);
+ goto err_init;
+ }
+
+ ret = hns3vf_init_hardware(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to init hardware: %d", ret);
+ goto err_init;
+ }
+
+ return 0;
+
+err_init:
+ hns3_cmd_uninit(hw);
+ return ret;
+}
+
static const struct eth_dev_ops hns3vf_eth_dev_ops = {
.dev_start = hns3vf_dev_start,
.dev_stop = hns3vf_dev_stop,
.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
};
+static const struct hns3_reset_ops hns3vf_reset_ops = {
+ .reset_service = hns3vf_reset_service,
+ .stop_service = hns3vf_stop_service,
+ .prepare_reset = hns3vf_prepare_reset,
+ .wait_hardware_ready = hns3vf_wait_hardware_ready,
+ .reinit_dev = hns3vf_reinit_dev,
+ .restore_conf = hns3vf_restore_conf,
+ .start_service = hns3vf_start_service,
+};
+
static int
hns3vf_dev_init(struct rte_eth_dev *eth_dev)
{
hns->is_vf = true;
hw->data = eth_dev->data;
+ ret = hns3_reset_init(hw);
+ if (ret)
+ goto err_init_reset;
+ hw->reset.ops = &hns3vf_reset_ops;
+
ret = hns3vf_init_vf(eth_dev);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
*/
eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+ if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
+ hns3_err(hw, "Reschedule reset service after dev_init");
+ hns3_schedule_reset(hns);
+ } else {
+ /* IMP will wait ready flag before reset */
+ hns3_notify_reset_ready(hw, false);
+ }
rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
eth_dev);
rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
hns3vf_uninit_vf(eth_dev);
err_init_vf:
+ rte_free(hw->reset.wait_data);
+
+err_init_reset:
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
hw->reset.stats.merge_cnt++; \
} while (0)
+static const char *reset_string[HNS3_MAX_RESET] = {
+ "none", "vf_func", "vf_pf_func", "vf_full", "flr",
+ "vf_global", "pf_func", "global", "IMP",
+};
+
const struct hns3_hw_error mac_afifo_tnl_int[] = {
{ .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
.reset_level = HNS3_NONE_RESET },
out:
rte_free(desc);
}
+
+int
+hns3_reset_init(struct hns3_hw *hw)
+{
+ rte_spinlock_init(&hw->lock);
+ hw->reset.level = HNS3_NONE_RESET;
+ hw->reset.stage = RESET_STAGE_NONE;
+ hw->reset.request = 0;
+ hw->reset.pending = 0;
+ rte_atomic16_init(&hw->reset.resetting);
+ rte_atomic16_init(&hw->reset.disable_cmd);
+ hw->reset.wait_data = rte_zmalloc("wait_data",
+ sizeof(struct hns3_wait_data), 0);
+ if (!hw->reset.wait_data) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for wait_data");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void
+hns3_schedule_reset(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+
+ /* Reschedule the reset process after successful initialization */
+ if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
+ rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_PENDING);
+ return;
+ }
+
+ if (hw->adapter_state >= HNS3_NIC_CLOSED)
+ return;
+
+ /* Schedule restart alarm if it is not scheduled yet */
+ if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_REQUESTED)
+ return;
+ if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED)
+ rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
+ rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+
+ rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
+}
+
+void
+hns3_schedule_delayed_reset(struct hns3_adapter *hns)
+{
+#define DEFERRED_SCHED_US (3 * MSEC_PER_SEC * USEC_PER_MSEC)
+ struct hns3_hw *hw = &hns->hw;
+
+ /* Do nothing if it is uninited or closed */
+ if (hw->adapter_state == HNS3_NIC_UNINITIALIZED ||
+ hw->adapter_state >= HNS3_NIC_CLOSED) {
+ return;
+ }
+
+ if (rte_atomic16_read(&hns->hw.reset.schedule) != SCHEDULE_NONE)
+ return;
+ rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_DEFERRED);
+ rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
+}
+
+void
+hns3_wait_callback(void *param)
+{
+ struct hns3_wait_data *data = (struct hns3_wait_data *)param;
+ struct hns3_adapter *hns = data->hns;
+ struct hns3_hw *hw = &hns->hw;
+ uint64_t msec;
+ bool done;
+
+ data->count--;
+ if (data->check_completion) {
+ /*
+ * Check if the current time exceeds the deadline
+ * or a pending reset coming, or reset during close.
+ */
+ msec = get_timeofday_ms();
+ if (msec > data->end_ms || is_reset_pending(hns) ||
+ hw->adapter_state == HNS3_NIC_CLOSING) {
+ done = false;
+ data->count = 0;
+ } else
+ done = data->check_completion(hw);
+ } else
+ done = true;
+
+ if (!done && data->count > 0) {
+ rte_eal_alarm_set(data->interval, hns3_wait_callback, data);
+ return;
+ }
+ if (done)
+ data->result = HNS3_WAIT_SUCCESS;
+ else {
+ hns3_err(hw, "%s wait timeout at stage %d",
+ reset_string[hw->reset.level], hw->reset.stage);
+ data->result = HNS3_WAIT_TIMEOUT;
+ }
+ hns3_schedule_reset(hns);
+}
+
+void
+hns3_notify_reset_ready(struct hns3_hw *hw, bool enable)
+{
+ uint32_t reg_val;
+
+ reg_val = hns3_read_dev(hw, HNS3_CMDQ_TX_DEPTH_REG);
+ if (enable)
+ reg_val |= HNS3_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HNS3_NIC_SW_RST_RDY;
+
+ hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, reg_val);
+}
+
+int
+hns3_reset_req_hw_reset(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+
+ if (hw->reset.wait_data->result == HNS3_WAIT_UNKNOWN) {
+ hw->reset.wait_data->hns = hns;
+ hw->reset.wait_data->check_completion = NULL;
+ hw->reset.wait_data->interval = HNS3_RESET_SYNC_US;
+ hw->reset.wait_data->count = 1;
+ hw->reset.wait_data->result = HNS3_WAIT_REQUEST;
+ rte_eal_alarm_set(hw->reset.wait_data->interval,
+ hns3_wait_callback, hw->reset.wait_data);
+ return -EAGAIN;
+ } else if (hw->reset.wait_data->result == HNS3_WAIT_REQUEST)
+ return -EAGAIN;
+
+ /* inform hardware that preparatory work is done */
+ hns3_notify_reset_ready(hw, true);
+ return 0;
+}
+
+static void
+hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+{
+ uint64_t merge_cnt = hw->reset.stats.merge_cnt;
+ int64_t tmp;
+
+ switch (hw->reset.level) {
+ case HNS3_IMP_RESET:
+ hns3_atomic_clear_bit(HNS3_IMP_RESET, levels);
+ tmp = hns3_test_and_clear_bit(HNS3_GLOBAL_RESET, levels);
+ HNS3_CHECK_MERGE_CNT(tmp);
+ tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels);
+ HNS3_CHECK_MERGE_CNT(tmp);
+ break;
+ case HNS3_GLOBAL_RESET:
+ hns3_atomic_clear_bit(HNS3_GLOBAL_RESET, levels);
+ tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels);
+ HNS3_CHECK_MERGE_CNT(tmp);
+ break;
+ case HNS3_FUNC_RESET:
+ hns3_atomic_clear_bit(HNS3_FUNC_RESET, levels);
+ break;
+ case HNS3_VF_RESET:
+ hns3_atomic_clear_bit(HNS3_VF_RESET, levels);
+ tmp = hns3_test_and_clear_bit(HNS3_VF_PF_FUNC_RESET, levels);
+ HNS3_CHECK_MERGE_CNT(tmp);
+ tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels);
+ HNS3_CHECK_MERGE_CNT(tmp);
+ break;
+ case HNS3_VF_FULL_RESET:
+ hns3_atomic_clear_bit(HNS3_VF_FULL_RESET, levels);
+ tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels);
+ HNS3_CHECK_MERGE_CNT(tmp);
+ break;
+ case HNS3_VF_PF_FUNC_RESET:
+ hns3_atomic_clear_bit(HNS3_VF_PF_FUNC_RESET, levels);
+ tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels);
+ HNS3_CHECK_MERGE_CNT(tmp);
+ break;
+ case HNS3_VF_FUNC_RESET:
+ hns3_atomic_clear_bit(HNS3_VF_FUNC_RESET, levels);
+ break;
+ case HNS3_FLR_RESET:
+ hns3_atomic_clear_bit(HNS3_FLR_RESET, levels);
+ break;
+ case HNS3_NONE_RESET:
+ default:
+ return;
+ };
+ if (merge_cnt != hw->reset.stats.merge_cnt)
+ hns3_warn(hw,
+ "No need to do low-level reset after %s reset. "
+ "merge cnt: %" PRIx64 " total merge cnt: %" PRIx64,
+ reset_string[hw->reset.level],
+ hw->reset.stats.merge_cnt - merge_cnt,
+ hw->reset.stats.merge_cnt);
+}
+
+static bool
+hns3_reset_err_handle(struct hns3_adapter *hns)
+{
+#define MAX_RESET_FAIL_CNT 5
+
+ struct hns3_hw *hw = &hns->hw;
+
+ if (hw->adapter_state == HNS3_NIC_CLOSING)
+ goto reset_fail;
+
+ if (is_reset_pending(hns)) {
+ hw->reset.attempts = 0;
+ hw->reset.stats.fail_cnt++;
+ hns3_warn(hw, "%s reset fail because new Reset is pending "
+ "attempts:%" PRIx64,
+ reset_string[hw->reset.level],
+ hw->reset.stats.fail_cnt);
+ hw->reset.level = HNS3_NONE_RESET;
+ return true;
+ }
+
+ hw->reset.attempts++;
+ if (hw->reset.attempts < MAX_RESET_FAIL_CNT) {
+ hns3_atomic_set_bit(hw->reset.level, &hw->reset.pending);
+ hns3_warn(hw, "%s retry to reset attempts: %d",
+ reset_string[hw->reset.level],
+ hw->reset.attempts);
+ return true;
+ }
+
+ if (rte_atomic16_read(&hw->reset.disable_cmd))
+ hns3_cmd_init(hw);
+reset_fail:
+ hw->reset.attempts = 0;
+ hw->reset.stats.fail_cnt++;
+ hns3_warn(hw, "%s reset fail fail_cnt:%" PRIx64 " success_cnt:%" PRIx64
+ " global_cnt:%" PRIx64 " imp_cnt:%" PRIx64
+ " request_cnt:%" PRIx64 " exec_cnt:%" PRIx64
+ " merge_cnt:%" PRIx64,
+ reset_string[hw->reset.level], hw->reset.stats.fail_cnt,
+ hw->reset.stats.success_cnt, hw->reset.stats.global_cnt,
+ hw->reset.stats.imp_cnt, hw->reset.stats.request_cnt,
+ hw->reset.stats.exec_cnt, hw->reset.stats.merge_cnt);
+
+ /* IMP no longer waiting the ready flag */
+ hns3_notify_reset_ready(hw, true);
+ return false;
+}
+
+static int
+hns3_reset_pre(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct timeval tv;
+ int ret;
+
+ if (hw->reset.stage == RESET_STAGE_NONE) {
+ rte_atomic16_set(&hns->hw.reset.resetting, 1);
+ hw->reset.stage = RESET_STAGE_DOWN;
+ ret = hw->reset.ops->stop_service(hns);
+ gettimeofday(&tv, NULL);
+ if (ret) {
+ hns3_warn(hw, "Reset step1 down fail=%d time=%ld.%.6ld",
+ ret, tv.tv_sec, tv.tv_usec);
+ return ret;
+ }
+ hns3_warn(hw, "Reset step1 down success time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ hw->reset.stage = RESET_STAGE_PREWAIT;
+ }
+ if (hw->reset.stage == RESET_STAGE_PREWAIT) {
+ ret = hw->reset.ops->prepare_reset(hns);
+ gettimeofday(&tv, NULL);
+ if (ret) {
+ hns3_warn(hw,
+ "Reset step2 prepare wait fail=%d time=%ld.%.6ld",
+ ret, tv.tv_sec, tv.tv_usec);
+ return ret;
+ }
+ hns3_warn(hw, "Reset step2 prepare wait success time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ hw->reset.stage = RESET_STAGE_REQ_HW_RESET;
+ hw->reset.wait_data->result = HNS3_WAIT_UNKNOWN;
+ }
+ return 0;
+}
+
+static int
+hns3_reset_post(struct hns3_adapter *hns)
+{
+#define TIMEOUT_RETRIES_CNT 5
+ struct hns3_hw *hw = &hns->hw;
+ struct timeval tv_delta;
+ struct timeval tv;
+ int ret = 0;
+
+ if (hw->adapter_state == HNS3_NIC_CLOSING) {
+ hns3_warn(hw, "Don't do reset_post during closing, just uninit cmd");
+ hns3_cmd_uninit(hw);
+ return -EPERM;
+ }
+
+ if (hw->reset.stage == RESET_STAGE_DEV_INIT) {
+ rte_spinlock_lock(&hw->lock);
+ if (hw->reset.mbuf_deferred_free) {
+ hns3_dev_release_mbufs(hns);
+ hw->reset.mbuf_deferred_free = false;
+ }
+ ret = hw->reset.ops->reinit_dev(hns);
+ rte_spinlock_unlock(&hw->lock);
+ gettimeofday(&tv, NULL);
+ if (ret) {
+ hns3_warn(hw, "Reset step5 devinit fail=%d retries=%d",
+ ret, hw->reset.retries);
+ goto err;
+ }
+ hns3_warn(hw, "Reset step5 devinit success time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ hw->reset.retries = 0;
+ hw->reset.stage = RESET_STAGE_RESTORE;
+ rte_eal_alarm_set(SWITCH_CONTEXT_US,
+ hw->reset.ops->reset_service, hns);
+ return -EAGAIN;
+ }
+ if (hw->reset.stage == RESET_STAGE_RESTORE) {
+ rte_spinlock_lock(&hw->lock);
+ ret = hw->reset.ops->restore_conf(hns);
+ rte_spinlock_unlock(&hw->lock);
+ gettimeofday(&tv, NULL);
+ if (ret) {
+ hns3_warn(hw,
+ "Reset step6 restore fail=%d retries=%d",
+ ret, hw->reset.retries);
+ goto err;
+ }
+ hns3_warn(hw, "Reset step6 restore success time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ hw->reset.retries = 0;
+ hw->reset.stage = RESET_STAGE_DONE;
+ }
+ if (hw->reset.stage == RESET_STAGE_DONE) {
+ /* IMP will wait ready flag before reset */
+ hns3_notify_reset_ready(hw, false);
+ hns3_clear_reset_level(hw, &hw->reset.pending);
+ rte_atomic16_clear(&hns->hw.reset.resetting);
+ hw->reset.attempts = 0;
+ hw->reset.stats.success_cnt++;
+ hw->reset.stage = RESET_STAGE_NONE;
+ hw->reset.ops->start_service(hns);
+ gettimeofday(&tv, NULL);
+ timersub(&tv, &hw->reset.start_time, &tv_delta);
+ hns3_warn(hw, "%s reset done fail_cnt:%" PRIx64
+ " success_cnt:%" PRIx64 " global_cnt:%" PRIx64
+ " imp_cnt:%" PRIx64 " request_cnt:%" PRIx64
+ " exec_cnt:%" PRIx64 " merge_cnt:%" PRIx64,
+ reset_string[hw->reset.level],
+ hw->reset.stats.fail_cnt, hw->reset.stats.success_cnt,
+ hw->reset.stats.global_cnt, hw->reset.stats.imp_cnt,
+ hw->reset.stats.request_cnt, hw->reset.stats.exec_cnt,
+ hw->reset.stats.merge_cnt);
+ hns3_warn(hw,
+ "%s reset done delta %ld ms time=%ld.%.6ld",
+ reset_string[hw->reset.level],
+ tv_delta.tv_sec * MSEC_PER_SEC +
+ tv_delta.tv_usec / USEC_PER_MSEC,
+ tv.tv_sec, tv.tv_usec);
+ hw->reset.level = HNS3_NONE_RESET;
+ }
+ return 0;
+
+err:
+ if (ret == -ETIME) {
+ hw->reset.retries++;
+ if (hw->reset.retries < TIMEOUT_RETRIES_CNT) {
+ rte_eal_alarm_set(HNS3_RESET_SYNC_US,
+ hw->reset.ops->reset_service, hns);
+ return -EAGAIN;
+ }
+ }
+ hw->reset.retries = 0;
+ return -EIO;
+}
+
+/*
+ * There are three scenarios as follows:
+ * When the reset is not in progress, the reset process starts.
+ * During the reset process, if the reset level has not changed,
+ * the reset process continues; otherwise, the reset process is aborted.
+ * hw->reset.level new_level action
+ * HNS3_NONE_RESET HNS3_XXXX_RESET start reset
+ * HNS3_XXXX_RESET HNS3_XXXX_RESET continue reset
+ * HNS3_LOW_RESET HNS3_HIGH_RESET abort
+ */
+int
+hns3_reset_process(struct hns3_adapter *hns, enum hns3_reset_level new_level)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct timeval tv_delta;
+ struct timeval tv;
+ int ret;
+
+ if (hw->reset.level == HNS3_NONE_RESET) {
+ hw->reset.level = new_level;
+ hw->reset.stats.exec_cnt++;
+ gettimeofday(&hw->reset.start_time, NULL);
+ hns3_warn(hw, "Start %s reset time=%ld.%.6ld",
+ reset_string[hw->reset.level],
+ hw->reset.start_time.tv_sec,
+ hw->reset.start_time.tv_usec);
+ }
+
+ if (is_reset_pending(hns)) {
+ gettimeofday(&tv, NULL);
+ hns3_warn(hw,
+ "%s reset is aborted by high level time=%ld.%.6ld",
+ reset_string[hw->reset.level], tv.tv_sec, tv.tv_usec);
+ if (hw->reset.wait_data->result == HNS3_WAIT_REQUEST)
+ rte_eal_alarm_cancel(hns3_wait_callback,
+ hw->reset.wait_data);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ret = hns3_reset_pre(hns);
+ if (ret)
+ goto err;
+
+ if (hw->reset.stage == RESET_STAGE_REQ_HW_RESET) {
+ ret = hns3_reset_req_hw_reset(hns);
+ if (ret == -EAGAIN)
+ return ret;
+ gettimeofday(&tv, NULL);
+ hns3_warn(hw,
+ "Reset step3 request IMP reset success time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ hw->reset.stage = RESET_STAGE_WAIT;
+ hw->reset.wait_data->result = HNS3_WAIT_UNKNOWN;
+ }
+ if (hw->reset.stage == RESET_STAGE_WAIT) {
+ ret = hw->reset.ops->wait_hardware_ready(hns);
+ if (ret)
+ goto retry;
+ gettimeofday(&tv, NULL);
+ hns3_warn(hw, "Reset step4 reset wait success time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ hw->reset.stage = RESET_STAGE_DEV_INIT;
+ }
+
+ ret = hns3_reset_post(hns);
+ if (ret)
+ goto retry;
+
+ return 0;
+retry:
+ if (ret == -EAGAIN)
+ return ret;
+err:
+ hns3_clear_reset_level(hw, &hw->reset.pending);
+ if (hns3_reset_err_handle(hns)) {
+ hw->reset.stage = RESET_STAGE_PREWAIT;
+ hns3_schedule_reset(hns);
+ } else {
+ rte_spinlock_lock(&hw->lock);
+ if (hw->reset.mbuf_deferred_free) {
+ hns3_dev_release_mbufs(hns);
+ hw->reset.mbuf_deferred_free = false;
+ }
+ rte_spinlock_unlock(&hw->lock);
+ rte_atomic16_clear(&hns->hw.reset.resetting);
+ hw->reset.stage = RESET_STAGE_NONE;
+ gettimeofday(&tv, NULL);
+ timersub(&tv, &hw->reset.start_time, &tv_delta);
+ hns3_warn(hw, "%s reset fail delta %ld ms time=%ld.%.6ld",
+ reset_string[hw->reset.level],
+ tv_delta.tv_sec * MSEC_PER_SEC +
+ tv_delta.tv_usec / USEC_PER_MSEC,
+ tv.tv_sec, tv.tv_usec);
+ hw->reset.level = HNS3_NONE_RESET;
+ }
+
+ return -EIO;
+}
+
+/*
+ * The reset process can only be terminated after handshake with IMP(step3),
+ * so that IMP can complete the reset process normally.
+ */
+void
+hns3_reset_abort(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct timeval tv;
+ int i;
+
+ for (i = 0; i < HNS3_QUIT_RESET_CNT; i++) {
+ if (hw->reset.level == HNS3_NONE_RESET)
+ break;
+ rte_delay_ms(HNS3_QUIT_RESET_DELAY_MS);
+ }
+
+ /* IMP no longer waiting the ready flag */
+ hns3_notify_reset_ready(hw, true);
+
+ rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
+ rte_eal_alarm_cancel(hns3_wait_callback, hw->reset.wait_data);
+
+ if (hw->reset.level != HNS3_NONE_RESET) {
+ gettimeofday(&tv, NULL);
+ hns3_err(hw, "Failed to terminate reset: %s time=%ld.%.6ld",
+ reset_string[hw->reset.level], tv.tv_sec, tv.tv_usec);
+ }
+}
#define HNS3_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0)
#define HNS3_SSU_PORT_INT_MSIX_MASK 0x7BFF
+#define HNS3_RESET_PROCESS_MS 200
+
struct hns3_hw_blk {
const char *name;
int (*enable_err_intr)(struct hns3_adapter *hns, bool en);
void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
void hns3_intr_unregister(const struct rte_intr_handle *hdl,
rte_intr_callback_fn cb_fn, void *cb_arg);
+void hns3_notify_reset_ready(struct hns3_hw *hw, bool enable);
+int hns3_reset_init(struct hns3_hw *hw);
+void hns3_wait_callback(void *param);
+void hns3_schedule_reset(struct hns3_adapter *hns);
+void hns3_schedule_delayed_reset(struct hns3_adapter *hns);
+int hns3_reset_req_hw_reset(struct hns3_adapter *hns);
+int hns3_reset_process(struct hns3_adapter *hns,
+ enum hns3_reset_level reset_level);
+void hns3_reset_abort(struct hns3_adapter *hns);
#endif /* _HNS3_INTR_H_ */
uint8_t *resp_data, uint16_t resp_len)
{
#define HNS3_MAX_RETRY_MS 500
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
struct hns3_mbx_resp_status *mbx_resp;
bool in_irq = false;
uint64_t now;
end = now + HNS3_MAX_RETRY_MS;
while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
(now < end)) {
+ if (rte_atomic16_read(&hw->reset.disable_cmd)) {
+ hns3_err(hw, "Don't wait for mbx respone because of "
+ "disable_cmd");
+ return -EBUSY;
+ }
+
+ if (is_reset_pending(hns)) {
+ hw->mbx_resp.req_msg_data = 0;
+ hns3_err(hw, "Don't wait for mbx respone because of "
+ "reset pending");
+ return -EIO;
+ }
+
/*
* The mbox response is running on the interrupt thread.
* Sending mbox in the interrupt thread cannot wait for the
hns3_warn(hw, "PF inform reset level %d", reset_level);
hw->reset.stats.request_cnt++;
+ hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
break;
default:
hns3_err(hw, "Fetched unsupported(%d) message from arq",
return nb_tx;
}
+static uint16_t
+hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
+{
+ return 0;
+}
+
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
{
- eth_dev->rx_pkt_burst = hns3_recv_pkts;
- eth_dev->tx_pkt_burst = hns3_xmit_pkts;
- eth_dev->tx_pkt_prepare = hns3_prep_pkts;
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+
+ if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
+ rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
+ eth_dev->rx_pkt_burst = hns3_recv_pkts;
+ eth_dev->tx_pkt_burst = hns3_xmit_pkts;
+ eth_dev->tx_pkt_prepare = hns3_prep_pkts;
+ } else {
+ eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
+ eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
+ eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
+ }
}