#include <stdint.h>
#include <inttypes.h>
#include <unistd.h>
+#include <rte_atomic.h>
#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
+#include <rte_interrupts.h>
#include <rte_io.h>
#include <rte_log.h>
#include <rte_pci.h>
#include "hns3_ethdev.h"
#include "hns3_logs.h"
+#include "hns3_rxtx.h"
+#include "hns3_intr.h"
#include "hns3_regs.h"
#include "hns3_dcb.h"
+#include "hns3_mp.h"
#define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32
#define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1
#define HNS3_SERVICE_INTERVAL 1000000 /* us */
+#define HNS3_PORT_BASE_VLAN_DISABLE 0
+#define HNS3_PORT_BASE_VLAN_ENABLE 1
+#define HNS3_INVLID_PVID 0xFFFF
+
+#define HNS3_FILTER_TYPE_VF 0
+#define HNS3_FILTER_TYPE_PORT 1
+#define HNS3_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \
+ | HNS3_FILTER_FE_ROCE_EGRESS_B)
+#define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \
+ | HNS3_FILTER_FE_ROCE_INGRESS_B)
+
+/* Reset related Registers */
+#define HNS3_GLOBAL_RESET_BIT 0
+#define HNS3_CORE_RESET_BIT 1
+#define HNS3_IMP_RESET_BIT 2
+#define HNS3_FUN_RST_ING_B 0
+
+#define HNS3_VECTOR0_IMP_RESET_INT_B 1
+
+#define HNS3_RESET_WAIT_MS 100
+#define HNS3_RESET_WAIT_CNT 200
int hns3_logtype_init;
int hns3_logtype_driver;
+enum hns3_evt_cause {
+ HNS3_VECTOR0_EVENT_RST,
+ HNS3_VECTOR0_EVENT_MBX,
+ HNS3_VECTOR0_EVENT_ERR,
+ HNS3_VECTOR0_EVENT_OTHER,
+};
+
+static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
+ uint64_t *levels);
+static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
+ int on);
+static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev);
+
+static void
+hns3_pf_disable_irq0(struct hns3_hw *hw)
+{
+ hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
+}
+
+static void
+hns3_pf_enable_irq0(struct hns3_hw *hw)
+{
+ hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
+}
+
+static enum hns3_evt_cause
+hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
+{
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t vector0_int_stats;
+ uint32_t cmdq_src_val;
+ uint32_t val;
+ enum hns3_evt_cause ret;
+
+ /* fetch the events from their corresponding regs */
+ vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
+ cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
+
+ /*
+ * Assumption: If by any chance reset and mailbox events are reported
+ * together then we will only process reset event and defer the
+ * processing of the mailbox events. Since, we would have not cleared
+ * RX CMDQ event this time we would receive again another interrupt
+ * from H/W just for the mailbox.
+ */
+ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
+ val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
+ if (clearval) {
+ hw->reset.stats.imp_cnt++;
+ hns3_warn(hw, "IMP reset detected, clear reset status");
+ } else {
+ hns3_schedule_delayed_reset(hns);
+ hns3_warn(hw, "IMP reset detected, don't clear reset status");
+ }
+
+ ret = HNS3_VECTOR0_EVENT_RST;
+ goto out;
+ }
+
+ /* Global reset */
+ if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
+ val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
+ if (clearval) {
+ hw->reset.stats.global_cnt++;
+ hns3_warn(hw, "Global reset detected, clear reset status");
+ } else {
+ hns3_schedule_delayed_reset(hns);
+ hns3_warn(hw, "Global reset detected, don't clear reset status");
+ }
+
+ ret = HNS3_VECTOR0_EVENT_RST;
+ goto out;
+ }
+
+ /* check for vector0 msix event source */
+ if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK) {
+ val = vector0_int_stats;
+ ret = HNS3_VECTOR0_EVENT_ERR;
+ goto out;
+ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) {
+ cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
+ val = cmdq_src_val;
+ ret = HNS3_VECTOR0_EVENT_MBX;
+ goto out;
+ }
+
+ if (clearval && (vector0_int_stats || cmdq_src_val))
+ hns3_warn(hw, "surprise irq ector0_int_stats:0x%x cmdq_src_val:0x%x",
+ vector0_int_stats, cmdq_src_val);
+ val = vector0_int_stats;
+ ret = HNS3_VECTOR0_EVENT_OTHER;
+out:
+
+ if (clearval)
+ *clearval = val;
+ return ret;
+}
+
+static void
+hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
+{
+ if (event_type == HNS3_VECTOR0_EVENT_RST)
+ hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
+ else if (event_type == HNS3_VECTOR0_EVENT_MBX)
+ hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
+}
+
+static void
+hns3_clear_all_event_cause(struct hns3_hw *hw)
+{
+ uint32_t vector0_int_stats;
+ vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
+
+ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats)
+ hns3_warn(hw, "Probe during IMP reset interrupt");
+
+ if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats)
+ hns3_warn(hw, "Probe during Global reset interrupt");
+
+ hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST,
+ BIT(HNS3_VECTOR0_IMPRESET_INT_B) |
+ BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
+ BIT(HNS3_VECTOR0_CORERESET_INT_B));
+ hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
+}
+
+static void
+hns3_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_evt_cause event_cause;
+ uint32_t clearval = 0;
+
+ /* Disable interrupt */
+ hns3_pf_disable_irq0(hw);
+
+ event_cause = hns3_check_event_cause(hns, &clearval);
+
+ /* vector 0 interrupt is shared with reset and mailbox source events. */
+ if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
+ hns3_handle_msix_error(hns, &hw->reset.request);
+ hns3_schedule_reset(hns);
+ } else if (event_cause == HNS3_VECTOR0_EVENT_RST)
+ hns3_schedule_reset(hns);
+ else if (event_cause == HNS3_VECTOR0_EVENT_MBX)
+ hns3_dev_handle_mbx_msg(hw);
+ else
+ hns3_err(hw, "Received unknown event");
+
+ hns3_clear_event_cause(hw, event_cause, clearval);
+ /* Enable interrupt if it is not cause by reset */
+ hns3_pf_enable_irq0(hw);
+}
+
+static int
+hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
+{
+#define HNS3_VLAN_OFFSET_160 160
+ struct hns3_vlan_filter_pf_cfg_cmd *req;
+ struct hns3_hw *hw = &hns->hw;
+ uint8_t vlan_offset_byte_val;
+ struct hns3_cmd_desc desc;
+ uint8_t vlan_offset_byte;
+ uint8_t vlan_offset_160;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
+
+ vlan_offset_160 = vlan_id / HNS3_VLAN_OFFSET_160;
+ vlan_offset_byte = (vlan_id % HNS3_VLAN_OFFSET_160) / 8;
+ vlan_offset_byte_val = 1 << (vlan_id % 8);
+
+ req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
+ req->vlan_offset = vlan_offset_160;
+ req->vlan_cfg = on ? 0 : 1;
+ req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
+ vlan_id, ret);
+
+ return ret;
+}
+
+static void
+hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
+{
+ struct hns3_user_vlan_table *vlan_entry;
+ struct hns3_pf *pf = &hns->pf;
+
+ LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
+ if (vlan_entry->vlan_id == vlan_id) {
+ if (vlan_entry->hd_tbl_status)
+ hns3_set_port_vlan_filter(hns, vlan_id, 0);
+ LIST_REMOVE(vlan_entry, next);
+ rte_free(vlan_entry);
+ break;
+ }
+ }
+}
+
+static void
+hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
+ bool writen_to_tbl)
+{
+ struct hns3_user_vlan_table *vlan_entry;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+
+ LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
+ if (vlan_entry->vlan_id == vlan_id)
+ return;
+ }
+
+ vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
+ if (vlan_entry == NULL) {
+ hns3_err(hw, "Failed to malloc hns3 vlan table");
+ return;
+ }
+
+ vlan_entry->hd_tbl_status = writen_to_tbl;
+ vlan_entry->vlan_id = vlan_id;
+
+ LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
+}
+
+static int
+hns3_restore_vlan_table(struct hns3_adapter *hns)
+{
+ struct hns3_user_vlan_table *vlan_entry;
+ struct hns3_pf *pf = &hns->pf;
+ uint16_t vlan_id;
+ int ret = 0;
+
+ if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) {
+ ret = hns3_vlan_pvid_configure(hns, pf->port_base_vlan_cfg.pvid,
+ 1);
+ return ret;
+ }
+
+ LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
+ if (vlan_entry->hd_tbl_status) {
+ vlan_id = vlan_entry->vlan_id;
+ ret = hns3_set_port_vlan_filter(hns, vlan_id, 1);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
+{
+ struct hns3_pf *pf = &hns->pf;
+ bool writen_to_tbl = false;
+ int ret = 0;
+
+ /*
+ * When vlan filter is enabled, hardware regards vlan id 0 as the entry
+ * for normal packet, deleting vlan id 0 is not allowed.
+ */
+ if (on == 0 && vlan_id == 0)
+ return 0;
+
+ /*
+ * When port base vlan enabled, we use port base vlan as the vlan
+ * filter condition. In this case, we don't update vlan filter table
+ * when user add new vlan or remove exist vlan, just update the
+ * vlan list. The vlan id in vlan list will be writen in vlan filter
+ * table until port base vlan disabled
+ */
+ if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
+ ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
+ writen_to_tbl = true;
+ }
+
+ if (ret == 0 && vlan_id) {
+ if (on)
+ hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
+ else
+ hns3_rm_dev_vlan_table(hns, vlan_id);
+ }
+ return ret;
+}
+
+static int
+hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_vlan_filter_configure(hns, vlan_id, on);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+}
+
+static int
+hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct hns3_rx_vlan_type_cfg_cmd *rx_req;
+ struct hns3_tx_vlan_type_cfg_cmd *tx_req;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ if ((vlan_type != ETH_VLAN_TYPE_INNER &&
+ vlan_type != ETH_VLAN_TYPE_OUTER)) {
+ hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
+ return -EINVAL;
+ }
+
+ if (tpid != RTE_ETHER_TYPE_VLAN) {
+ hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
+ return -EINVAL;
+ }
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
+ rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
+
+ if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+ rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
+ rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
+ } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+ rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
+ rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
+ rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
+ rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
+ }
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
+ ret);
+ return ret;
+ }
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
+
+ tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
+ tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
+ tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
+ ret);
+ return ret;
+}
+
+static int
+hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+}
+
+static int
+hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
+ struct hns3_rx_vtag_cfg *vcfg)
+{
+ struct hns3_vport_vtag_rx_cfg_cmd *req;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_cmd_desc desc;
+ uint16_t vport_id;
+ uint8_t bitmap;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
+
+ req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+ /*
+ * In current version VF is not supported when PF is driven by DPDK
+ * driver, the PF-related vf_id is 0, just need to configure parameters
+ * for vport_id 0.
+ */
+ vport_id = 0;
+ req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
+ bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
+ req->vf_bitmap[req->vf_offset] = bitmap;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
+ return ret;
+}
+
+static void
+hns3_update_rx_offload_cfg(struct hns3_adapter *hns,
+ struct hns3_rx_vtag_cfg *vcfg)
+{
+ struct hns3_pf *pf = &hns->pf;
+ memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg));
+}
+
+static void
+hns3_update_tx_offload_cfg(struct hns3_adapter *hns,
+ struct hns3_tx_vtag_cfg *vcfg)
+{
+ struct hns3_pf *pf = &hns->pf;
+ memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg));
+}
+
+static int
+hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
+{
+ struct hns3_rx_vtag_cfg rxvlan_cfg;
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
+ rxvlan_cfg.strip_tag1_en = false;
+ rxvlan_cfg.strip_tag2_en = enable;
+ } else {
+ rxvlan_cfg.strip_tag1_en = enable;
+ rxvlan_cfg.strip_tag2_en = true;
+ }
+
+ rxvlan_cfg.vlan1_vlan_prionly = false;
+ rxvlan_cfg.vlan2_vlan_prionly = false;
+ rxvlan_cfg.rx_vlan_offload_en = enable;
+
+ ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
+ if (ret) {
+ hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret);
+ return ret;
+ }
+
+ hns3_update_rx_offload_cfg(hns, &rxvlan_cfg);
+
+ return ret;
+}
+
+static int
+hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
+ uint8_t fe_type, bool filter_en, uint8_t vf_id)
+{
+ struct hns3_vlan_filter_ctrl_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
+
+ req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
+ req->vlan_type = vlan_type;
+ req->vlan_fe = filter_en ? fe_type : 0;
+ req->vf_id = vf_id;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "set vlan filter fail, ret =%d", ret);
+
+ return ret;
+}
+
+static int
+hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
+ HNS3_FILTER_FE_EGRESS, false, 0);
+ if (ret) {
+ hns3_err(hw, "hns3 enable filter fail, ret =%d", ret);
+ return ret;
+ }
+
+ ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
+ HNS3_FILTER_FE_INGRESS, enable, 0);
+ if (ret)
+ hns3_err(hw, "hns3 enable filter fail, ret =%d", ret);
+
+ return ret;
+}
+
+static int
+hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_rxmode *rxmode;
+ unsigned int tmp_mask;
+ bool enable;
+ int ret = 0;
+
+ rte_spinlock_lock(&hw->lock);
+ rxmode = &dev->data->dev_conf.rxmode;
+ tmp_mask = (unsigned int)mask;
+ if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
+ true : false;
+
+ ret = hns3_en_hw_strip_rxvtag(hns, enable);
+ if (ret) {
+ rte_spinlock_unlock(&hw->lock);
+ hns3_err(hw, "failed to enable rx strip, ret =%d", ret);
+ return ret;
+ }
+ }
+
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
+ struct hns3_tx_vtag_cfg *vcfg)
+{
+ struct hns3_vport_vtag_tx_cfg_cmd *req;
+ struct hns3_cmd_desc desc;
+ struct hns3_hw *hw = &hns->hw;
+ uint16_t vport_id;
+ uint8_t bitmap;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
+
+ req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->def_vlan_tag1 = vcfg->default_tag1;
+ req->def_vlan_tag2 = vcfg->default_tag2;
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
+ vcfg->accept_tag1 ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
+ vcfg->accept_untag1 ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
+ vcfg->accept_tag2 ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
+ vcfg->accept_untag2 ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
+
+ /*
+ * In current version VF is not supported when PF is driven by DPDK
+ * driver, the PF-related vf_id is 0, just need to configure parameters
+ * for vport_id 0.
+ */
+ vport_id = 0;
+ req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
+ bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
+ req->vf_bitmap[req->vf_offset] = bitmap;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
+
+ return ret;
+}
+
+static int
+hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
+ uint16_t pvid)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_tx_vtag_cfg txvlan_cfg;
+ int ret;
+
+ if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
+ txvlan_cfg.accept_tag1 = true;
+ txvlan_cfg.insert_tag1_en = false;
+ txvlan_cfg.default_tag1 = 0;
+ } else {
+ txvlan_cfg.accept_tag1 = false;
+ txvlan_cfg.insert_tag1_en = true;
+ txvlan_cfg.default_tag1 = pvid;
+ }
+
+ txvlan_cfg.accept_untag1 = true;
+ txvlan_cfg.accept_tag2 = true;
+ txvlan_cfg.accept_untag2 = true;
+ txvlan_cfg.insert_tag2_en = false;
+ txvlan_cfg.default_tag2 = 0;
+
+ ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
+ if (ret) {
+ hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
+ ret);
+ return ret;
+ }
+
+ hns3_update_tx_offload_cfg(hns, &txvlan_cfg);
+ return ret;
+}
+
+static void
+hns3_store_port_base_vlan_info(struct hns3_adapter *hns, uint16_t pvid, int on)
+{
+ struct hns3_pf *pf = &hns->pf;
+
+ pf->port_base_vlan_cfg.state = on ?
+ HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
+
+ pf->port_base_vlan_cfg.pvid = pvid;
+}
+
+static void
+hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
+{
+ struct hns3_user_vlan_table *vlan_entry;
+ struct hns3_pf *pf = &hns->pf;
+
+ LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
+ if (vlan_entry->hd_tbl_status)
+ hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
+
+ vlan_entry->hd_tbl_status = false;
+ }
+
+ if (is_del_list) {
+ vlan_entry = LIST_FIRST(&pf->vlan_list);
+ while (vlan_entry) {
+ LIST_REMOVE(vlan_entry, next);
+ rte_free(vlan_entry);
+ vlan_entry = LIST_FIRST(&pf->vlan_list);
+ }
+ }
+}
+
+static void
+hns3_add_all_vlan_table(struct hns3_adapter *hns)
+{
+ struct hns3_user_vlan_table *vlan_entry;
+ struct hns3_pf *pf = &hns->pf;
+
+ LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
+ if (!vlan_entry->hd_tbl_status)
+ hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
+
+ vlan_entry->hd_tbl_status = true;
+ }
+}
+
+static void
+hns3_remove_all_vlan_table(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ int ret;
+
+ hns3_rm_all_vlan_table(hns, true);
+ if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) {
+ ret = hns3_set_port_vlan_filter(hns,
+ pf->port_base_vlan_cfg.pvid, 0);
+ if (ret) {
+ hns3_err(hw, "Failed to remove all vlan table, ret =%d",
+ ret);
+ return;
+ }
+ }
+}
+
+static int
+hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
+ uint16_t port_base_vlan_state,
+ uint16_t new_pvid, uint16_t old_pvid)
+{
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ int ret = 0;
+
+ if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
+ if (old_pvid != HNS3_INVLID_PVID && old_pvid != 0) {
+ ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
+ if (ret) {
+ hns3_err(hw,
+ "Failed to clear clear old pvid filter, ret =%d",
+ ret);
+ return ret;
+ }
+ }
+
+ hns3_rm_all_vlan_table(hns, false);
+ return hns3_set_port_vlan_filter(hns, new_pvid, 1);
+ }
+
+ if (new_pvid != 0) {
+ ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
+ if (ret) {
+ hns3_err(hw, "Failed to set port vlan filter, ret =%d",
+ ret);
+ return ret;
+ }
+ }
+
+ if (new_pvid == pf->port_base_vlan_cfg.pvid)
+ hns3_add_all_vlan_table(hns);
+
+ return ret;
+}
+
+static int
+hns3_en_rx_strip_all(struct hns3_adapter *hns, int on)
+{
+ struct hns3_rx_vtag_cfg rx_vlan_cfg;
+ struct hns3_hw *hw = &hns->hw;
+ bool rx_strip_en;
+ int ret;
+
+ rx_strip_en = on ? true : false;
+ rx_vlan_cfg.strip_tag1_en = rx_strip_en;
+ rx_vlan_cfg.strip_tag2_en = rx_strip_en;
+ rx_vlan_cfg.vlan1_vlan_prionly = false;
+ rx_vlan_cfg.vlan2_vlan_prionly = false;
+ rx_vlan_cfg.rx_vlan_offload_en = rx_strip_en;
+
+ ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
+ if (ret) {
+ hns3_err(hw, "enable strip rx failed, ret =%d", ret);
+ return ret;
+ }
+
+ hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg);
+ return ret;
+}
+
+static int
+hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
+{
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ uint16_t port_base_vlan_state;
+ uint16_t old_pvid;
+ int ret;
+
+ if (on == 0 && pvid != pf->port_base_vlan_cfg.pvid) {
+ if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID)
+ hns3_warn(hw, "Invalid operation! As current pvid set "
+ "is %u, disable pvid %u is invalid",
+ pf->port_base_vlan_cfg.pvid, pvid);
+ return 0;
+ }
+
+ port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
+ HNS3_PORT_BASE_VLAN_DISABLE;
+ ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
+ if (ret) {
+ hns3_err(hw, "Failed to config tx vlan, ret =%d", ret);
+ return ret;
+ }
+
+ ret = hns3_en_rx_strip_all(hns, on);
+ if (ret) {
+ hns3_err(hw, "Failed to config rx vlan strip, ret =%d", ret);
+ return ret;
+ }
+
+ if (pvid == HNS3_INVLID_PVID)
+ goto out;
+ old_pvid = pf->port_base_vlan_cfg.pvid;
+ ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid,
+ old_pvid);
+ if (ret) {
+ hns3_err(hw, "Failed to update vlan filter entries, ret =%d",
+ ret);
+ return ret;
+ }
+
+out:
+ hns3_store_port_base_vlan_info(hns, pvid, on);
+ return ret;
+}
+
+static int
+hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_vlan_pvid_configure(hns, pvid, on);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+}
+
+static void
+init_port_base_vlan_info(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = &hns->pf;
+
+ pf->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
+ pf->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID;
+}
+
+static int
+hns3_default_vlan_config(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_set_port_vlan_filter(hns, 0, 1);
+ if (ret)
+ hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
+ return ret;
+}
+
+static int
+hns3_init_vlan_config(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ /*
+ * This function can be called in the initialization and reset process,
+ * when in reset process, it means that hardware had been reseted
+ * successfully and we need to restore the hardware configuration to
+ * ensure that the hardware configuration remains unchanged before and
+ * after reset.
+ */
+ if (rte_atomic16_read(&hw->reset.resetting) == 0)
+ init_port_base_vlan_info(hw);
+
+ ret = hns3_enable_vlan_filter(hns, true);
+ if (ret) {
+ hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
+ return ret;
+ }
+
+ ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
+ RTE_ETHER_TYPE_VLAN);
+ if (ret) {
+ hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
+ return ret;
+ }
+
+ /*
+ * When in the reinit dev stage of the reset process, the following
+ * vlan-related configurations may differ from those at initialization,
+ * we will restore configurations to hardware in hns3_restore_vlan_table
+ * and hns3_restore_vlan_conf later.
+ */
+ if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ ret = hns3_vlan_pvid_configure(hns, HNS3_INVLID_PVID, 0);
+ if (ret) {
+ hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
+ return ret;
+ }
+
+ ret = hns3_en_hw_strip_rxvtag(hns, false);
+ if (ret) {
+ hns3_err(hw, "rx strip configure fail in pf, ret =%d",
+ ret);
+ return ret;
+ }
+ }
+
+ return hns3_default_vlan_config(hns);
+}
+
+static int
+hns3_restore_vlan_conf(struct hns3_adapter *hns)
+{
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg);
+ if (ret) {
+ hns3_err(hw, "hns3 restore vlan rx conf fail, ret =%d", ret);
+ return ret;
+ }
+
+ ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg);
+ if (ret)
+ hns3_err(hw, "hns3 restore vlan tx conf fail, ret =%d", ret);
+
+ return ret;
+}
+
+static int
+hns3_dev_configure_vlan(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_txmode *txmode;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ txmode = &data->dev_conf.txmode;
+ if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
+ hns3_warn(hw,
+ "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
+ "configuration is not supported! Ignore these two "
+ "parameters: hw_vlan_reject_tagged(%d), "
+ "hw_vlan_reject_untagged(%d)",
+ txmode->hw_vlan_reject_tagged,
+ txmode->hw_vlan_reject_untagged);
+
+ /* Apply vlan offload setting */
+ ret = hns3_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+ if (ret) {
+ hns3_err(hw, "dev config vlan Strip failed, ret =%d", ret);
+ return ret;
+ }
+
+ /* Apply pvid setting */
+ ret = hns3_vlan_pvid_set(dev, txmode->pvid,
+ txmode->hw_vlan_insert_pvid);
+ if (ret)
+ hns3_err(hw, "dev config vlan pvid(%d) failed, ret =%d",
+ txmode->pvid, ret);
+
+ return ret;
+}
+
static int
hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
unsigned int tso_mss_max)
goto err_add_uc_addr;
}
+ ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
+ if (ret) {
+ hns3_err(hw, "Failed to configure mac pause address: %d", ret);
+ goto err_pause_addr_cfg;
+ }
+
rte_ether_addr_copy(mac_addr,
(struct rte_ether_addr *)hw->mac.mac_addr);
hw->mac.default_addr_setted = true;
return 0;
+err_pause_addr_cfg:
+ ret_val = hns3_remove_uc_addr_common(hw, mac_addr);
+ if (ret_val) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_warn(hw,
+ "Failed to roll back to del setted mac addr(%s): %d",
+ mac_str, ret_val);
+ }
+
err_add_uc_addr:
if (rm_succes) {
ret_val = hns3_add_uc_addr_common(hw, oaddr);
return ret;
}
+static int
+hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_ether_addr *addr;
+ int err = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) {
+ addr = &hw->data->mac_addrs[i];
+ if (!rte_is_valid_assigned_ether_addr(addr))
+ continue;
+ if (del)
+ ret = hns3_remove_uc_addr_common(hw, addr);
+ else
+ ret = hns3_add_uc_addr_common(hw, addr);
+ if (ret) {
+ err = ret;
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ addr);
+ hns3_dbg(hw,
+ "Failed to %s mac addr(%s). ret:%d i:%d",
+ del ? "remove" : "restore", mac_str, ret, i);
+ }
+ }
+ return err;
+}
+
static void
hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
{
return ret;
}
- num = reserved_addr_num + i;
- rte_ether_addr_copy(addr, &hw->mc_addrs[num]);
- hw->mc_addrs_num++;
+ num = reserved_addr_num + i;
+ rte_ether_addr_copy(addr, &hw->mc_addrs[num]);
+ hw->mc_addrs_num++;
+ }
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_ether_addr *addr;
+ int err = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < hw->mc_addrs_num; i++) {
+ addr = &hw->mc_addrs[i];
+ if (!rte_is_multicast_ether_addr(addr))
+ continue;
+ if (del)
+ ret = hns3_remove_mc_addr(hw, addr);
+ else
+ ret = hns3_add_mc_addr(hw, addr);
+ if (ret) {
+ err = ret;
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ addr);
+ hns3_dbg(hw, "%s mc mac addr: %s failed",
+ del ? "Remove" : "Restore", mac_str);
+ }
+ }
+ return err;
+}
+
+static int
+hns3_check_mq_mode(struct rte_eth_dev *dev)
+{
+ enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_dcb_rx_conf *dcb_rx_conf;
+ struct rte_eth_dcb_tx_conf *dcb_tx_conf;
+ uint8_t num_tc;
+ int max_tc = 0;
+ int i;
+
+ dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+
+ if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+ hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. "
+ "rx_mq_mode = %d", rx_mq_mode);
+ return -EINVAL;
+ }
+
+ if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB ||
+ tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB "
+ "is not supported. rx_mq_mode = %d, tx_mq_mode = %d",
+ rx_mq_mode, tx_mq_mode);
+ return -EINVAL;
+ }
+
+ if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) {
+ if (dcb_rx_conf->nb_tcs > pf->tc_max) {
+ hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
+ dcb_rx_conf->nb_tcs, pf->tc_max);
+ return -EINVAL;
+ }
+
+ if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
+ dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
+ hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+ "nb_tcs(%d) != %d or %d in rx direction.",
+ dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
+ return -EINVAL;
+ }
+
+ if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
+ hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
+ dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
+ if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
+ hns3_err(hw, "dcb_tc[%d] = %d in rx direction, "
+ "is not equal to one in tx direction.",
+ i, dcb_rx_conf->dcb_tc[i]);
+ return -EINVAL;
+ }
+ if (dcb_rx_conf->dcb_tc[i] > max_tc)
+ max_tc = dcb_rx_conf->dcb_tc[i];
+ }
+
+ num_tc = max_tc + 1;
+ if (num_tc > dcb_rx_conf->nb_tcs) {
+ hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
+ num_tc, dcb_rx_conf->nb_tcs);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+hns3_check_dcb_cfg(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!hns3_dev_dcb_supported(hw)) {
+ hns3_err(hw, "this port does not support dcb configurations.");
+ return -EOPNOTSUPP;
+ }
+
+ if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
+ hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
+ return -EOPNOTSUPP;
+ }
+
+ /* Check multiple queue mode */
+ return hns3_check_mq_mode(dev);
+}
+
+static int
+hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
+ bool mmap, uint16_t queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_cmd_desc desc;
+ struct hns3_ctrl_vector_chain_cmd *req =
+ (struct hns3_ctrl_vector_chain_cmd *)desc.data;
+ enum hns3_cmd_status status;
+ enum hns3_opcode_type op;
+ uint16_t tqp_type_and_id = 0;
+
+ op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
+ hns3_cmd_setup_basic_desc(&desc, op, false);
+ req->int_vector_id = vector_id;
+
+ hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
+ HNS3_RING_TYPE_RX);
+ hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
+ hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
+ HNS3_RING_GL_RX);
+ req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
+
+ req->int_cause_num = 1;
+ status = hns3_cmd_send(hw, &desc, 1);
+ if (status) {
+ hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.",
+ queue_id, vector_id, status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+hns3_dev_configure(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_rss_conf *rss_cfg = &hw->rss_info;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t mtu;
+ int ret;
+
+ /*
+ * Hardware does not support individually enable/disable/reset the Tx or
+ * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
+ * and Rx queues at the same time. When the numbers of Tx queues
+ * allocated by upper applications are not equal to the numbers of Rx
+ * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
+ * of Tx/Rx queues. otherwise, network engine can not work as usual. But
+ * these fake queues are imperceptible, and can not be used by upper
+ * applications.
+ */
+ ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+ if (ret) {
+ hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
+ return ret;
+ }
+
+ hw->adapter_state = HNS3_NIC_CONFIGURING;
+ if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ hns3_err(hw, "setting link speed/duplex not supported");
+ ret = -EINVAL;
+ goto cfg_err;
+ }
+
+ if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ ret = hns3_check_dcb_cfg(dev);
+ if (ret)
+ goto cfg_err;
+ }
+
+ /* When RSS is not configured, redirect the packet queue 0 */
+ if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ rss_conf = conf->rx_adv_conf.rss_conf;
+ if (rss_conf.rss_key == NULL) {
+ rss_conf.rss_key = rss_cfg->key;
+ rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
+ }
+
+ ret = hns3_dev_rss_hash_update(dev, &rss_conf);
+ if (ret)
+ goto cfg_err;
+ }
+
+ /*
+ * If jumbo frames are enabled, MTU needs to be refreshed
+ * according to the maximum RX packet length.
+ */
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ /*
+ * Security of max_rx_pkt_len is guaranteed in dpdk frame.
+ * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
+ * can safely assign to "uint16_t" type variable.
+ */
+ mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
+ ret = hns3_dev_mtu_set(dev, mtu);
+ if (ret)
+ goto cfg_err;
+ dev->data->mtu = mtu;
}
- rte_spinlock_unlock(&hw->lock);
+
+ ret = hns3_dev_configure_vlan(dev);
+ if (ret)
+ goto cfg_err;
+
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
return 0;
-}
-static int
-hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
-{
- char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
- struct hns3_hw *hw = &hns->hw;
- struct rte_ether_addr *addr;
- int err = 0;
- int ret;
- int i;
+cfg_err:
+ (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
+ hw->adapter_state = HNS3_NIC_INITIALIZED;
- for (i = 0; i < hw->mc_addrs_num; i++) {
- addr = &hw->mc_addrs[i];
- if (!rte_is_multicast_ether_addr(addr))
- continue;
- if (del)
- ret = hns3_remove_mc_addr(hw, addr);
- else
- ret = hns3_add_mc_addr(hw, addr);
- if (ret) {
- err = ret;
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
- addr);
- hns3_dbg(hw, "%s mc mac addr: %s failed",
- del ? "Remove" : "Restore", mac_str);
- }
- }
- return err;
+ return ret;
}
static int
req = (struct hns3_config_max_frm_size_cmd *)desc.data;
req->max_frm_size = rte_cpu_to_le_16(new_mps);
- req->min_frm_size = HNS3_MIN_FRAME_LEN;
+ req->min_frm_size = RTE_ETHER_MIN_LEN;
return hns3_cmd_send(hw, &desc, 1);
}
DEV_TX_OFFLOAD_MULTI_SEGS |
info->tx_queue_offload_capa);
+ info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = HNS3_MAX_RING_DESC,
+ .nb_min = HNS3_MIN_RING_DESC,
+ .nb_align = HNS3_ALIGN_RING_DESC,
+ };
+
+ info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = HNS3_MAX_RING_DESC,
+ .nb_min = HNS3_MIN_RING_DESC,
+ .nb_align = HNS3_ALIGN_RING_DESC,
+ };
+
info->vmdq_queue_num = 0;
info->reta_size = HNS3_RSS_IND_TBL_SIZE;
info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
+ info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
+ info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
return 0;
}
struct hns3_mac *mac = &hw->mac;
struct rte_eth_link new_link;
+ if (!hns3_is_reset_pending(hns)) {
+ hns3_update_speed_duplex(eth_dev);
+ hns3_update_link_status(hw);
+ }
+
memset(&new_link, 0, sizeof(new_link));
switch (mac->link_speed) {
case ETH_SPEED_NUM_10M:
hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num);
pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
+ pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number);
if (req->tx_buf_size)
pf->tx_buf_size =
uint16_t tqps_num = hw->total_tqps_num;
uint16_t func_id;
uint16_t tqp_id;
+ bool is_pf;
int num;
int ret;
int i;
tqp_id = 0;
num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
for (func_id = 0; func_id < num; func_id++) {
+ is_pf = func_id == 0 ? true : false;
for (i = 0;
i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) {
ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i,
- true);
+ is_pf);
if (ret)
return ret;
}
return 0;
}
+static int
+hns3_clear_all_vfs_promisc_mode(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_promisc_param param;
+ uint16_t func_id;
+ int ret;
+
+ /* func_id 0 is denoted PF, the VFs start from 1 */
+ for (func_id = 1; func_id < pf->func_num; func_id++) {
+ hns3_promisc_param_init(¶m, false, false, false, func_id);
+ ret = hns3_cmd_set_promisc_mode(hw, ¶m);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_set_promisc_mode(hw, true, en_mc_pmc);
+ rte_spinlock_unlock(&hw->lock);
+ if (ret)
+ hns3_err(hw, "Failed to enable promiscuous mode: %d", ret);
+
+ return ret;
+}
+
+static int
+hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
+ int ret;
+
+ /* If now in all_multicast mode, must remain in all_multicast mode. */
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_set_promisc_mode(hw, false, en_mc_pmc);
+ rte_spinlock_unlock(&hw->lock);
+ if (ret)
+ hns3_err(hw, "Failed to disable promiscuous mode: %d", ret);
+
+ return ret;
+}
+
+static int
+hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_set_promisc_mode(hw, en_uc_pmc, true);
+ rte_spinlock_unlock(&hw->lock);
+ if (ret)
+ hns3_err(hw, "Failed to enable allmulticast mode: %d", ret);
+
+ return ret;
+}
+
+static int
+hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
+ int ret;
+
+ /* If now in promiscuous mode, must remain in all_multicast mode. */
+ if (dev->data->promiscuous == 1)
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_set_promisc_mode(hw, en_uc_pmc, false);
+ rte_spinlock_unlock(&hw->lock);
+ if (ret)
+ hns3_err(hw, "Failed to disable allmulticast mode: %d", ret);
+
+ return ret;
+}
+
+static int
+hns3_dev_promisc_restore(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ bool en_mc_pmc;
+ bool en_uc_pmc;
+
+ en_uc_pmc = (hw->data->promiscuous == 1) ? true : false;
+ en_mc_pmc = (hw->data->all_multicast == 1) ? true : false;
+
+ return hns3_set_promisc_mode(hw, en_uc_pmc, en_mc_pmc);
+}
+
static int
hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed)
{
return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX);
}
+static int
+hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
+{
+ struct hns3_config_mac_mode_cmd *req;
+ struct hns3_cmd_desc desc;
+ uint32_t loop_en = 0;
+ uint8_t val = 0;
+ int ret;
+
+ req = (struct hns3_config_mac_mode_cmd *)desc.data;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
+ if (enable)
+ val = 1;
+ hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
+ hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
+ hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
+ hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
+ hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
+ req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
+
+ return ret;
+}
+
static int
hns3_get_mac_link_status(struct hns3_hw *hw)
{
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
hns3_err(hw, "get link status cmd failed %d", ret);
- return ret;
+ return ETH_LINK_DOWN;
}
req = (struct hns3_link_status_cmd *)desc.data;
return !!link_status;
}
-static void
+void
hns3_update_link_status(struct hns3_hw *hw)
{
int state;
state = hns3_get_mac_link_status(hw);
- if (state != hw->mac.link_status)
+ if (state != hw->mac.link_status) {
hw->mac.link_status = state;
+ hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
+ }
}
static void
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- hns3_update_speed_duplex(eth_dev);
- hns3_update_link_status(hw);
+ if (!hns3_is_reset_pending(hns)) {
+ hns3_update_speed_duplex(eth_dev);
+ hns3_update_link_status(hw);
+ } else
+ hns3_warn(hw, "Cancel the query when reset is pending");
rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
}
goto err_mac_init;
}
+ ret = hns3_clear_all_vfs_promisc_mode(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to clear all vfs promisc mode: %d",
+ ret);
+ goto err_mac_init;
+ }
+
+ ret = hns3_init_vlan_config(hns);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
+ goto err_mac_init;
+ }
+
ret = hns3_dcb_init(hw);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
goto err_cmd_init_queue;
}
+ hns3_clear_all_event_cause(hw);
+
/* Firmware command initialize */
ret = hns3_cmd_init(hw);
if (ret) {
goto err_cmd_init;
}
+ ret = rte_intr_callback_register(&pci_dev->intr_handle,
+ hns3_interrupt_handler,
+ eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
+ goto err_intr_callback_register;
+ }
+
+ /* Enable interrupt */
+ rte_intr_enable(&pci_dev->intr_handle);
+ hns3_pf_enable_irq0(hw);
+
/* Get configuration */
ret = hns3_get_configuration(hw);
if (ret) {
goto err_get_config;
}
- ret = hns3_init_hardware(hns);
- if (ret) {
- PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
- goto err_get_config;
- }
+ ret = hns3_init_hardware(hns);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
+ goto err_get_config;
+ }
+
+ /* Initialize flow director filter list & hash */
+ ret = hns3_fdir_filter_init(hns);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
+ goto err_hw_init;
+ }
+
+ hns3_set_default_rss_args(hw);
+
+ ret = hns3_enable_hw_error_intr(hns, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d",
+ ret);
+ goto err_fdir;
+ }
+
+ return 0;
+
+err_fdir:
+ hns3_fdir_filter_uninit(hns);
+err_hw_init:
+ hns3_uninit_umv_space(hw);
+
+err_get_config:
+ hns3_pf_disable_irq0(hw);
+ rte_intr_disable(&pci_dev->intr_handle);
+ hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
+ eth_dev);
+
+err_intr_callback_register:
+ hns3_cmd_uninit(hw);
+
+err_cmd_init:
+ hns3_cmd_destroy_queue(hw);
+
+err_cmd_init_queue:
+ hw->io_base = NULL;
+
+ return ret;
+}
+
+static void
+hns3_uninit_pf(struct rte_eth_dev *eth_dev)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct rte_device *dev = eth_dev->device;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
+ struct hns3_hw *hw = &hns->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hns3_enable_hw_error_intr(hns, false);
+ hns3_rss_uninit(hns);
+ hns3_fdir_filter_uninit(hns);
+ hns3_uninit_umv_space(hw);
+ hns3_pf_disable_irq0(hw);
+ rte_intr_disable(&pci_dev->intr_handle);
+ hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
+ eth_dev);
+ hns3_cmd_uninit(hw);
+ hns3_cmd_destroy_queue(hw);
+ hw->io_base = NULL;
+}
+
+static int
+hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_dcb_cfg_update(hns);
+ if (ret)
+ return ret;
+
+ /* Enable queues */
+ ret = hns3_start_queues(hns, reset_queue);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to start queues: %d", ret);
+ return ret;
+ }
+
+ /* Enable MAC */
+ ret = hns3_cfg_mac_mode(hw, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to enable MAC: %d", ret);
+ goto err_config_mac_mode;
+ }
+ return 0;
+
+err_config_mac_mode:
+ hns3_stop_queues(hns, true);
+ return ret;
+}
+
+static int
+hns3_map_rx_interrupt(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t intr_vector;
+ uint8_t base = 0;
+ uint8_t vec = 0;
+ uint16_t q_id;
+ int ret;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return 0;
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* check and configure queue intr-vector mapping */
+ if (rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) {
+ intr_vector = hw->used_rx_queues;
+ /* creates event fd for each intr vector when MSIX is used */
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -EINVAL;
+ }
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ hw->used_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ hns3_err(hw, "Failed to allocate %d rx_queues"
+ " intr_vec", hw->used_rx_queues);
+ ret = -ENOMEM;
+ goto alloc_intr_vec_error;
+ }
+ }
+
+ if (rte_intr_allow_others(intr_handle)) {
+ vec = RTE_INTR_VEC_RXTX_OFFSET;
+ base = RTE_INTR_VEC_RXTX_OFFSET;
+ }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+ ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
+ if (ret)
+ goto bind_vector_error;
+ intr_handle->intr_vec[q_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+ }
+ rte_intr_enable(intr_handle);
+ return 0;
+
+bind_vector_error:
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+ return ret;
+alloc_intr_vec_error:
+ rte_intr_efd_disable(intr_handle);
+ return ret;
+}
+
+static int
+hns3_dev_start(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ if (rte_atomic16_read(&hw->reset.resetting))
+ return -EBUSY;
+
+ rte_spinlock_lock(&hw->lock);
+ hw->adapter_state = HNS3_NIC_STARTING;
- /* Initialize flow director filter list & hash */
- ret = hns3_fdir_filter_init(hns);
+ ret = hns3_do_start(hns, true);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
- goto err_hw_init;
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
}
- hns3_set_default_rss_args(hw);
+ hw->adapter_state = HNS3_NIC_STARTED;
+ rte_spinlock_unlock(&hw->lock);
+ ret = hns3_map_rx_interrupt(dev);
+ if (ret)
+ return ret;
+ hns3_set_rxtx_function(dev);
+ hns3_mp_req_start_rxtx(dev);
+ rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
+
+ hns3_info(hw, "hns3 dev start successful!");
return 0;
+}
-err_hw_init:
- hns3_uninit_umv_space(hw);
+static int
+hns3_do_stop(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ bool reset_queue;
+ int ret;
-err_get_config:
- hns3_cmd_uninit(hw);
+ ret = hns3_cfg_mac_mode(hw, false);
+ if (ret)
+ return ret;
+ hw->mac.link_status = ETH_LINK_DOWN;
-err_cmd_init:
- hns3_cmd_destroy_queue(hw);
+ if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
+ hns3_configure_all_mac_addr(hns, true);
+ reset_queue = true;
+ } else
+ reset_queue = false;
+ hw->mac.default_addr_setted = false;
+ return hns3_stop_queues(hns, reset_queue);
+}
-err_cmd_init_queue:
- hw->io_base = NULL;
+static void
+hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ uint8_t base = 0;
+ uint8_t vec = 0;
+ uint16_t q_id;
- return ret;
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return;
+
+ /* unmap the ring with vector */
+ if (rte_intr_allow_others(intr_handle)) {
+ vec = RTE_INTR_VEC_RXTX_OFFSET;
+ base = RTE_INTR_VEC_RXTX_OFFSET;
+ }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+ (void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+ }
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
}
static void
-hns3_uninit_pf(struct rte_eth_dev *eth_dev)
+hns3_dev_stop(struct rte_eth_dev *dev)
{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
PMD_INIT_FUNC_TRACE();
- hns3_rss_uninit(hns);
- hns3_fdir_filter_uninit(hns);
- hns3_uninit_umv_space(hw);
- hns3_cmd_uninit(hw);
- hns3_cmd_destroy_queue(hw);
- hw->io_base = NULL;
+ hw->adapter_state = HNS3_NIC_STOPPING;
+ hns3_set_rxtx_function(dev);
+ rte_wmb();
+ /* Disable datapath on secondary process. */
+ hns3_mp_req_stop_rxtx(dev);
+ /* Prevent crashes when queues are still in use. */
+ rte_delay_ms(hw->tqps_num);
+
+ rte_spinlock_lock(&hw->lock);
+ if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ hns3_do_stop(hns);
+ hns3_dev_release_mbufs(hns);
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ }
+ rte_eal_alarm_cancel(hns3_service_handler, dev);
+ rte_spinlock_unlock(&hw->lock);
+ hns3_unmap_rx_interrupt(dev);
}
static void
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ rte_free(eth_dev->process_private);
+ eth_dev->process_private = NULL;
+ return;
+ }
+
+ if (hw->adapter_state == HNS3_NIC_STARTED)
+ hns3_dev_stop(eth_dev);
+
hw->adapter_state = HNS3_NIC_CLOSING;
- rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
+ hns3_reset_abort(hns);
+ hw->adapter_state = HNS3_NIC_CLOSED;
hns3_configure_all_mc_mac_addr(hns, true);
+ hns3_remove_all_vlan_table(hns);
+ hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
hns3_uninit_pf(eth_dev);
+ hns3_free_all_queues(eth_dev);
+ rte_free(hw->reset.wait_data);
rte_free(eth_dev->process_private);
eth_dev->process_private = NULL;
- hw->adapter_state = HNS3_NIC_CLOSED;
+ hns3_mp_uninit_primary();
+ hns3_warn(hw, "Close port %d finished", hw->data->port_id);
}
static int
for (i = 0; i < dcb_info->nb_tcs; i++)
dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
- for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
- dcb_info->tc_queue.tc_rxq[0][i].base =
- hw->tc_queue[i].tqp_offset;
+ for (i = 0; i < hw->num_tc; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;
dcb_info->tc_queue.tc_txq[0][i].base =
- hw->tc_queue[i].tqp_offset;
- dcb_info->tc_queue.tc_rxq[0][i].nb_queue =
- hw->tc_queue[i].tqp_count;
+ hw->tc_queue[i].tqp_offset;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;
dcb_info->tc_queue.tc_txq[0][i].nb_queue =
- hw->tc_queue[i].tqp_count;
+ hw->tc_queue[i].tqp_count;
+ }
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_reinit_dev(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_cmd_init(hw);
+ if (ret) {
+ hns3_err(hw, "Failed to init cmd: %d", ret);
+ return ret;
+ }
+
+ ret = hns3_reset_all_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to reset all queues: %d", ret);
+ goto err_init;
+ }
+
+ ret = hns3_init_hardware(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to init hardware: %d", ret);
+ goto err_init;
+ }
+
+ ret = hns3_enable_hw_error_intr(hns, true);
+ if (ret) {
+ hns3_err(hw, "fail to enable hw error interrupts: %d",
+ ret);
+ goto err_mac_init;
+ }
+ hns3_info(hw, "Reset done, driver initialization finished.");
+
+ return 0;
+
+err_mac_init:
+ hns3_uninit_umv_space(hw);
+err_init:
+ hns3_cmd_uninit(hw);
+
+ return ret;
+}
+
+static bool
+is_pf_reset_done(struct hns3_hw *hw)
+{
+ uint32_t val, reg, reg_bit;
+
+ switch (hw->reset.level) {
+ case HNS3_IMP_RESET:
+ reg = HNS3_GLOBAL_RESET_REG;
+ reg_bit = HNS3_IMP_RESET_BIT;
+ break;
+ case HNS3_GLOBAL_RESET:
+ reg = HNS3_GLOBAL_RESET_REG;
+ reg_bit = HNS3_GLOBAL_RESET_BIT;
+ break;
+ case HNS3_FUNC_RESET:
+ reg = HNS3_FUN_RST_ING;
+ reg_bit = HNS3_FUN_RST_ING_B;
+ break;
+ case HNS3_FLR_RESET:
+ default:
+ hns3_err(hw, "Wait for unsupported reset level: %d",
+ hw->reset.level);
+ return true;
+ }
+ val = hns3_read_dev(hw, reg);
+ if (hns3_get_bit(val, reg_bit))
+ return false;
+ else
+ return true;
+}
+
+bool
+hns3_is_reset_pending(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_reset_level reset;
+
+ hns3_check_event_cause(hns, NULL);
+ reset = hns3_get_reset_level(hns, &hw->reset.pending);
+ if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
+ hns3_warn(hw, "High level reset %d is pending", reset);
+ return true;
+ }
+ reset = hns3_get_reset_level(hns, &hw->reset.request);
+ if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
+ hns3_warn(hw, "High level reset %d is request", reset);
+ return true;
+ }
+ return false;
+}
+
+static int
+hns3_wait_hardware_ready(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_wait_data *wait_data = hw->reset.wait_data;
+ struct timeval tv;
+
+ if (wait_data->result == HNS3_WAIT_SUCCESS)
+ return 0;
+ else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
+ gettimeofday(&tv, NULL);
+ hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ return -ETIME;
+ } else if (wait_data->result == HNS3_WAIT_REQUEST)
+ return -EAGAIN;
+
+ wait_data->hns = hns;
+ wait_data->check_completion = is_pf_reset_done;
+ wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT *
+ HNS3_RESET_WAIT_MS + get_timeofday_ms();
+ wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC;
+ wait_data->count = HNS3_RESET_WAIT_CNT;
+ wait_data->result = HNS3_WAIT_REQUEST;
+ rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
+ return -EAGAIN;
+}
+
+static int
+hns3_func_reset_cmd(struct hns3_hw *hw, int func_id)
+{
+ struct hns3_cmd_desc desc;
+ struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
+ hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1);
+ req->fun_reset_vfid = func_id;
+
+ return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
+hns3_imp_reset_cmd(struct hns3_hw *hw)
+{
+ struct hns3_cmd_desc desc;
+
+ hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
+ desc.data[0] = 0xeedd;
+
+ return hns3_cmd_send(hw, &desc, 1);
+}
+
+static void
+hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct timeval tv;
+ uint32_t val;
+
+ gettimeofday(&tv, NULL);
+ if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) ||
+ hns3_read_dev(hw, HNS3_FUN_RST_ING)) {
+ hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ return;
+ }
+
+ switch (reset_level) {
+ case HNS3_IMP_RESET:
+ hns3_imp_reset_cmd(hw);
+ hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ break;
+ case HNS3_GLOBAL_RESET:
+ val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
+ hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1);
+ hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val);
+ hns3_warn(hw, "Global Reset requested time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ break;
+ case HNS3_FUNC_RESET:
+ hns3_warn(hw, "PF Reset requested time=%ld.%.6ld",
+ tv.tv_sec, tv.tv_usec);
+ /* schedule again to check later */
+ hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending);
+ hns3_schedule_reset(hns);
+ break;
+ default:
+ hns3_warn(hw, "Unsupported reset level: %d", reset_level);
+ return;
+ }
+ hns3_atomic_clear_bit(reset_level, &hw->reset.request);
+}
+
+static enum hns3_reset_level
+hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+{
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_reset_level reset_level = HNS3_NONE_RESET;
+
+ /* Return the highest priority reset level amongst all */
+ if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
+ reset_level = HNS3_IMP_RESET;
+ else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
+ reset_level = HNS3_GLOBAL_RESET;
+ else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
+ reset_level = HNS3_FUNC_RESET;
+ else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
+ reset_level = HNS3_FLR_RESET;
+
+ if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
+ return HNS3_NONE_RESET;
+
+ return reset_level;
+}
+
+static int
+hns3_prepare_reset(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t reg_val;
+ int ret;
+
+ switch (hw->reset.level) {
+ case HNS3_FUNC_RESET:
+ ret = hns3_func_reset_cmd(hw, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * After performaning pf reset, it is not necessary to do the
+ * mailbox handling or send any command to firmware, because
+ * any mailbox handling or command to firmware is only valid
+ * after hns3_cmd_init is called.
+ */
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ hw->reset.stats.request_cnt++;
+ break;
+ case HNS3_IMP_RESET:
+ reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+ hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
+ BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
+ break;
+ default:
+ break;
}
+ return 0;
+}
+
+static int
+hns3_set_rst_done(struct hns3_hw *hw)
+{
+ struct hns3_pf_rst_done_cmd *req;
+ struct hns3_cmd_desc desc;
+
+ req = (struct hns3_pf_rst_done_cmd *)desc.data;
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
+ req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
+ return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
+hns3_stop_service(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = &rte_eth_devices[hw->data->port_id];
+ if (hw->adapter_state == HNS3_NIC_STARTED)
+ rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
+ hw->mac.link_status = ETH_LINK_DOWN;
+
+ hns3_set_rxtx_function(eth_dev);
+ rte_wmb();
+ /* Disable datapath on secondary process. */
+ hns3_mp_req_stop_rxtx(eth_dev);
+ rte_delay_ms(hw->tqps_num);
+
+ rte_spinlock_lock(&hw->lock);
+ if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
+ hw->adapter_state == HNS3_NIC_STOPPING) {
+ hns3_do_stop(hns);
+ hw->reset.mbuf_deferred_free = true;
+ } else
+ hw->reset.mbuf_deferred_free = false;
+
+ /*
+ * It is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
+ */
+ if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
+ hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
return 0;
}
+static int
+hns3_start_service(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_dev *eth_dev;
+
+ if (hw->reset.level == HNS3_IMP_RESET ||
+ hw->reset.level == HNS3_GLOBAL_RESET)
+ hns3_set_rst_done(hw);
+ eth_dev = &rte_eth_devices[hw->data->port_id];
+ hns3_set_rxtx_function(eth_dev);
+ hns3_mp_req_start_rxtx(eth_dev);
+ if (hw->adapter_state == HNS3_NIC_STARTED)
+ hns3_service_handler(eth_dev);
+
+ return 0;
+}
+
+static int
+hns3_restore_conf(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_configure_all_mac_addr(hns, false);
+ if (ret)
+ return ret;
+
+ ret = hns3_configure_all_mc_mac_addr(hns, false);
+ if (ret)
+ goto err_mc_mac;
+
+ ret = hns3_dev_promisc_restore(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_vlan_table(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_vlan_conf(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_all_fdir_filter(hns);
+ if (ret)
+ goto err_promisc;
+
+ if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
+ ret = hns3_do_start(hns, false);
+ if (ret)
+ goto err_promisc;
+ hns3_info(hw, "hns3 dev restart successful!");
+ } else if (hw->adapter_state == HNS3_NIC_STOPPING)
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ return 0;
+
+err_promisc:
+ hns3_configure_all_mc_mac_addr(hns, true);
+err_mc_mac:
+ hns3_configure_all_mac_addr(hns, true);
+ return ret;
+}
+
+static void
+hns3_reset_service(void *param)
+{
+ struct hns3_adapter *hns = (struct hns3_adapter *)param;
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_reset_level reset_level;
+ struct timeval tv_delta;
+ struct timeval tv_start;
+ struct timeval tv;
+ uint64_t msec;
+ int ret;
+
+ /*
+ * The interrupt is not triggered within the delay time.
+ * The interrupt may have been lost. It is necessary to handle
+ * the interrupt to recover from the error.
+ */
+ if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
+ rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+ hns3_err(hw, "Handling interrupts in delayed tasks");
+ hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
+ reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+ if (reset_level == HNS3_NONE_RESET) {
+ hns3_err(hw, "No reset level is set, try IMP reset");
+ hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
+ }
+ }
+ rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
+
+ /*
+ * Check if there is any ongoing reset in the hardware. This status can
+ * be checked from reset_pending. If there is then, we need to wait for
+ * hardware to complete reset.
+ * a. If we are able to figure out in reasonable time that hardware
+ * has fully resetted then, we can proceed with driver, client
+ * reset.
+ * b. else, we can come back later to check this status so re-sched
+ * now.
+ */
+ reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+ if (reset_level != HNS3_NONE_RESET) {
+ gettimeofday(&tv_start, NULL);
+ ret = hns3_reset_process(hns, reset_level);
+ gettimeofday(&tv, NULL);
+ timersub(&tv, &tv_start, &tv_delta);
+ msec = tv_delta.tv_sec * MSEC_PER_SEC +
+ tv_delta.tv_usec / USEC_PER_MSEC;
+ if (msec > HNS3_RESET_PROCESS_MS)
+ hns3_err(hw, "%d handle long time delta %" PRIx64
+ " ms time=%ld.%.6ld",
+ hw->reset.level, msec,
+ tv.tv_sec, tv.tv_usec);
+ if (ret == -EAGAIN)
+ return;
+ }
+
+ /* Check if we got any *new* reset requests to be honored */
+ reset_level = hns3_get_reset_level(hns, &hw->reset.request);
+ if (reset_level != HNS3_NONE_RESET)
+ hns3_msix_process(hns, reset_level);
+}
+
static const struct eth_dev_ops hns3_eth_dev_ops = {
+ .dev_start = hns3_dev_start,
+ .dev_stop = hns3_dev_stop,
.dev_close = hns3_dev_close,
+ .promiscuous_enable = hns3_dev_promiscuous_enable,
+ .promiscuous_disable = hns3_dev_promiscuous_disable,
+ .allmulticast_enable = hns3_dev_allmulticast_enable,
+ .allmulticast_disable = hns3_dev_allmulticast_disable,
.mtu_set = hns3_dev_mtu_set,
+ .stats_get = hns3_stats_get,
+ .stats_reset = hns3_stats_reset,
+ .xstats_get = hns3_dev_xstats_get,
+ .xstats_get_names = hns3_dev_xstats_get_names,
+ .xstats_reset = hns3_dev_xstats_reset,
+ .xstats_get_by_id = hns3_dev_xstats_get_by_id,
+ .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
.dev_infos_get = hns3_dev_infos_get,
.fw_version_get = hns3_fw_version_get,
+ .rx_queue_setup = hns3_rx_queue_setup,
+ .tx_queue_setup = hns3_tx_queue_setup,
+ .rx_queue_release = hns3_dev_rx_queue_release,
+ .tx_queue_release = hns3_dev_tx_queue_release,
+ .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
+ .dev_configure = hns3_dev_configure,
.flow_ctrl_get = hns3_flow_ctrl_get,
.flow_ctrl_set = hns3_flow_ctrl_set,
.priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
.reta_update = hns3_dev_rss_reta_update,
.reta_query = hns3_dev_rss_reta_query,
.filter_ctrl = hns3_dev_filter_ctrl,
+ .vlan_filter_set = hns3_vlan_filter_set,
+ .vlan_tpid_set = hns3_vlan_tpid_set,
+ .vlan_offload_set = hns3_vlan_offload_set,
+ .vlan_pvid_set = hns3_vlan_pvid_set,
+ .get_reg = hns3_get_regs,
.get_dcb_info = hns3_get_dcb_info,
+ .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
+};
+
+static const struct hns3_reset_ops hns3_reset_ops = {
+ .reset_service = hns3_reset_service,
+ .stop_service = hns3_stop_service,
+ .prepare_reset = hns3_prepare_reset,
+ .wait_hardware_ready = hns3_wait_hardware_ready,
+ .reinit_dev = hns3_reinit_dev,
+ .restore_conf = hns3_restore_conf,
+ .start_service = hns3_start_service,
};
static int
/* initialize flow filter lists */
hns3_filterlist_init(eth_dev);
+ hns3_set_rxtx_function(eth_dev);
eth_dev->dev_ops = &hns3_eth_dev_ops;
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ hns3_mp_init_secondary();
+ hw->secondary_cnt++;
return 0;
+ }
+ hns3_mp_init_primary();
hw->adapter_state = HNS3_NIC_UNINITIALIZED;
if (device_id == HNS3_DEV_ID_25GE_RDMA ||
*/
hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
+ ret = hns3_reset_init(hw);
+ if (ret)
+ goto err_init_reset;
+ hw->reset.ops = &hns3_reset_ops;
+
ret = hns3_init_pf(eth_dev);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
*/
eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
- rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
+ if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
+ hns3_err(hw, "Reschedule reset service after dev_init");
+ hns3_schedule_reset(hns);
+ } else {
+ /* IMP will wait ready flag before reset */
+ hns3_notify_reset_ready(hw, false);
+ }
+
hns3_info(hw, "hns3 dev initialization successful!");
return 0;
hns3_uninit_pf(eth_dev);
err_init_pf:
+ rte_free(hw->reset.wait_data);
+err_init_reset:
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;