#include "hns3_ethdev.h"
#include "hns3_logs.h"
+#include "hns3_rxtx.h"
#include "hns3_regs.h"
+#include "hns3_dcb.h"
#define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32
#define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1
+#define HNS3_SERVICE_INTERVAL 1000000 /* us */
+#define HNS3_PORT_BASE_VLAN_DISABLE 0
+#define HNS3_PORT_BASE_VLAN_ENABLE 1
+#define HNS3_INVLID_PVID 0xFFFF
+
+#define HNS3_FILTER_TYPE_VF 0
+#define HNS3_FILTER_TYPE_PORT 1
+#define HNS3_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \
+ | HNS3_FILTER_FE_ROCE_EGRESS_B)
+#define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \
+ | HNS3_FILTER_FE_ROCE_INGRESS_B)
+
int hns3_logtype_init;
int hns3_logtype_driver;
+static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
+ int on);
+
+static int
+hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
+{
+#define HNS3_VLAN_OFFSET_160 160
+ struct hns3_vlan_filter_pf_cfg_cmd *req;
+ struct hns3_hw *hw = &hns->hw;
+ uint8_t vlan_offset_byte_val;
+ struct hns3_cmd_desc desc;
+ uint8_t vlan_offset_byte;
+ uint8_t vlan_offset_160;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
+
+ vlan_offset_160 = vlan_id / HNS3_VLAN_OFFSET_160;
+ vlan_offset_byte = (vlan_id % HNS3_VLAN_OFFSET_160) / 8;
+ vlan_offset_byte_val = 1 << (vlan_id % 8);
+
+ req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
+ req->vlan_offset = vlan_offset_160;
+ req->vlan_cfg = on ? 0 : 1;
+ req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
+ vlan_id, ret);
+
+ return ret;
+}
+
+static void
+hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
+{
+ struct hns3_user_vlan_table *vlan_entry;
+ struct hns3_pf *pf = &hns->pf;
+
+ LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
+ if (vlan_entry->vlan_id == vlan_id) {
+ if (vlan_entry->hd_tbl_status)
+ hns3_set_port_vlan_filter(hns, vlan_id, 0);
+ LIST_REMOVE(vlan_entry, next);
+ rte_free(vlan_entry);
+ break;
+ }
+ }
+}
+
+static void
+hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
+ bool writen_to_tbl)
+{
+ struct hns3_user_vlan_table *vlan_entry;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+
+ vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
+ if (vlan_entry == NULL) {
+ hns3_err(hw, "Failed to malloc hns3 vlan table");
+ return;
+ }
+
+ vlan_entry->hd_tbl_status = writen_to_tbl;
+ vlan_entry->vlan_id = vlan_id;
+
+ LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
+}
+
+static int
+hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
+{
+ struct hns3_pf *pf = &hns->pf;
+ bool writen_to_tbl = false;
+ int ret = 0;
+
+ /*
+ * When vlan filter is enabled, hardware regards vlan id 0 as the entry
+ * for normal packet, deleting vlan id 0 is not allowed.
+ */
+ if (on == 0 && vlan_id == 0)
+ return 0;
+
+ /*
+ * When port base vlan enabled, we use port base vlan as the vlan
+ * filter condition. In this case, we don't update vlan filter table
+ * when user add new vlan or remove exist vlan, just update the
+ * vlan list. The vlan id in vlan list will be writen in vlan filter
+ * table until port base vlan disabled
+ */
+ if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
+ ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
+ writen_to_tbl = true;
+ }
+
+ if (ret == 0 && vlan_id) {
+ if (on)
+ hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
+ else
+ hns3_rm_dev_vlan_table(hns, vlan_id);
+ }
+ return ret;
+}
+
+static int
+hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_vlan_filter_configure(hns, vlan_id, on);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+}
+
+static int
+hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct hns3_rx_vlan_type_cfg_cmd *rx_req;
+ struct hns3_tx_vlan_type_cfg_cmd *tx_req;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ if ((vlan_type != ETH_VLAN_TYPE_INNER &&
+ vlan_type != ETH_VLAN_TYPE_OUTER)) {
+ hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
+ return -EINVAL;
+ }
+
+ if (tpid != RTE_ETHER_TYPE_VLAN) {
+ hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
+ return -EINVAL;
+ }
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
+ rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
+
+ if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+ rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
+ rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
+ } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+ rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
+ rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
+ rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
+ rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
+ }
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
+ ret);
+ return ret;
+ }
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
+
+ tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
+ tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
+ tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
+ ret);
+ return ret;
+}
+
+static int
+hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+}
+
+static int
+hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
+ struct hns3_rx_vtag_cfg *vcfg)
+{
+ struct hns3_vport_vtag_rx_cfg_cmd *req;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_cmd_desc desc;
+ uint16_t vport_id;
+ uint8_t bitmap;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
+
+ req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+ /*
+ * In current version VF is not supported when PF is driven by DPDK
+ * driver, the PF-related vf_id is 0, just need to configure parameters
+ * for vport_id 0.
+ */
+ vport_id = 0;
+ req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
+ bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
+ req->vf_bitmap[req->vf_offset] = bitmap;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
+ return ret;
+}
+
+static void
+hns3_update_rx_offload_cfg(struct hns3_adapter *hns,
+ struct hns3_rx_vtag_cfg *vcfg)
+{
+ struct hns3_pf *pf = &hns->pf;
+ memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg));
+}
+
+static void
+hns3_update_tx_offload_cfg(struct hns3_adapter *hns,
+ struct hns3_tx_vtag_cfg *vcfg)
+{
+ struct hns3_pf *pf = &hns->pf;
+ memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg));
+}
+
+static int
+hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
+{
+ struct hns3_rx_vtag_cfg rxvlan_cfg;
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
+ rxvlan_cfg.strip_tag1_en = false;
+ rxvlan_cfg.strip_tag2_en = enable;
+ } else {
+ rxvlan_cfg.strip_tag1_en = enable;
+ rxvlan_cfg.strip_tag2_en = true;
+ }
+
+ rxvlan_cfg.vlan1_vlan_prionly = false;
+ rxvlan_cfg.vlan2_vlan_prionly = false;
+ rxvlan_cfg.rx_vlan_offload_en = enable;
+
+ ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
+ if (ret) {
+ hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret);
+ return ret;
+ }
+
+ hns3_update_rx_offload_cfg(hns, &rxvlan_cfg);
+
+ return ret;
+}
+
+static int
+hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
+ uint8_t fe_type, bool filter_en, uint8_t vf_id)
+{
+ struct hns3_vlan_filter_ctrl_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
+
+ req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
+ req->vlan_type = vlan_type;
+ req->vlan_fe = filter_en ? fe_type : 0;
+ req->vf_id = vf_id;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "set vlan filter fail, ret =%d", ret);
+
+ return ret;
+}
+
+static int
+hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
+ HNS3_FILTER_FE_EGRESS, false, 0);
+ if (ret) {
+ hns3_err(hw, "hns3 enable filter fail, ret =%d", ret);
+ return ret;
+ }
+
+ ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
+ HNS3_FILTER_FE_INGRESS, enable, 0);
+ if (ret)
+ hns3_err(hw, "hns3 enable filter fail, ret =%d", ret);
+
+ return ret;
+}
+
+static int
+hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_rxmode *rxmode;
+ unsigned int tmp_mask;
+ bool enable;
+ int ret = 0;
+
+ rte_spinlock_lock(&hw->lock);
+ rxmode = &dev->data->dev_conf.rxmode;
+ tmp_mask = (unsigned int)mask;
+ if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
+ true : false;
+
+ ret = hns3_en_hw_strip_rxvtag(hns, enable);
+ if (ret) {
+ rte_spinlock_unlock(&hw->lock);
+ hns3_err(hw, "failed to enable rx strip, ret =%d", ret);
+ return ret;
+ }
+ }
+
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
+ struct hns3_tx_vtag_cfg *vcfg)
+{
+ struct hns3_vport_vtag_tx_cfg_cmd *req;
+ struct hns3_cmd_desc desc;
+ struct hns3_hw *hw = &hns->hw;
+ uint16_t vport_id;
+ uint8_t bitmap;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
+
+ req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->def_vlan_tag1 = vcfg->default_tag1;
+ req->def_vlan_tag2 = vcfg->default_tag2;
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
+ vcfg->accept_tag1 ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
+ vcfg->accept_untag1 ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
+ vcfg->accept_tag2 ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
+ vcfg->accept_untag2 ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
+
+ /*
+ * In current version VF is not supported when PF is driven by DPDK
+ * driver, the PF-related vf_id is 0, just need to configure parameters
+ * for vport_id 0.
+ */
+ vport_id = 0;
+ req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
+ bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
+ req->vf_bitmap[req->vf_offset] = bitmap;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
+
+ return ret;
+}
+
+static int
+hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
+ uint16_t pvid)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_tx_vtag_cfg txvlan_cfg;
+ int ret;
+
+ if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
+ txvlan_cfg.accept_tag1 = true;
+ txvlan_cfg.insert_tag1_en = false;
+ txvlan_cfg.default_tag1 = 0;
+ } else {
+ txvlan_cfg.accept_tag1 = false;
+ txvlan_cfg.insert_tag1_en = true;
+ txvlan_cfg.default_tag1 = pvid;
+ }
+
+ txvlan_cfg.accept_untag1 = true;
+ txvlan_cfg.accept_tag2 = true;
+ txvlan_cfg.accept_untag2 = true;
+ txvlan_cfg.insert_tag2_en = false;
+ txvlan_cfg.default_tag2 = 0;
+
+ ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
+ if (ret) {
+ hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
+ ret);
+ return ret;
+ }
+
+ hns3_update_tx_offload_cfg(hns, &txvlan_cfg);
+ return ret;
+}
+
+static void
+hns3_store_port_base_vlan_info(struct hns3_adapter *hns, uint16_t pvid, int on)
+{
+ struct hns3_pf *pf = &hns->pf;
+
+ pf->port_base_vlan_cfg.state = on ?
+ HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
+
+ pf->port_base_vlan_cfg.pvid = pvid;
+}
+
+static void
+hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
+{
+ struct hns3_user_vlan_table *vlan_entry;
+ struct hns3_pf *pf = &hns->pf;
+
+ LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
+ if (vlan_entry->hd_tbl_status)
+ hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
+
+ vlan_entry->hd_tbl_status = false;
+ }
+
+ if (is_del_list) {
+ vlan_entry = LIST_FIRST(&pf->vlan_list);
+ while (vlan_entry) {
+ LIST_REMOVE(vlan_entry, next);
+ rte_free(vlan_entry);
+ vlan_entry = LIST_FIRST(&pf->vlan_list);
+ }
+ }
+}
+
+static void
+hns3_add_all_vlan_table(struct hns3_adapter *hns)
+{
+ struct hns3_user_vlan_table *vlan_entry;
+ struct hns3_pf *pf = &hns->pf;
+
+ LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
+ if (!vlan_entry->hd_tbl_status)
+ hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
+
+ vlan_entry->hd_tbl_status = true;
+ }
+}
+
+static void
+hns3_remove_all_vlan_table(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ int ret;
+
+ hns3_rm_all_vlan_table(hns, true);
+ if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) {
+ ret = hns3_set_port_vlan_filter(hns,
+ pf->port_base_vlan_cfg.pvid, 0);
+ if (ret) {
+ hns3_err(hw, "Failed to remove all vlan table, ret =%d",
+ ret);
+ return;
+ }
+ }
+}
+
+static int
+hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
+ uint16_t port_base_vlan_state,
+ uint16_t new_pvid, uint16_t old_pvid)
+{
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ int ret = 0;
+
+ if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
+ if (old_pvid != HNS3_INVLID_PVID && old_pvid != 0) {
+ ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
+ if (ret) {
+ hns3_err(hw,
+ "Failed to clear clear old pvid filter, ret =%d",
+ ret);
+ return ret;
+ }
+ }
+
+ hns3_rm_all_vlan_table(hns, false);
+ return hns3_set_port_vlan_filter(hns, new_pvid, 1);
+ }
+
+ if (new_pvid != 0) {
+ ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
+ if (ret) {
+ hns3_err(hw, "Failed to set port vlan filter, ret =%d",
+ ret);
+ return ret;
+ }
+ }
+
+ if (new_pvid == pf->port_base_vlan_cfg.pvid)
+ hns3_add_all_vlan_table(hns);
+
+ return ret;
+}
+
+static int
+hns3_en_rx_strip_all(struct hns3_adapter *hns, int on)
+{
+ struct hns3_rx_vtag_cfg rx_vlan_cfg;
+ struct hns3_hw *hw = &hns->hw;
+ bool rx_strip_en;
+ int ret;
+
+ rx_strip_en = on ? true : false;
+ rx_vlan_cfg.strip_tag1_en = rx_strip_en;
+ rx_vlan_cfg.strip_tag2_en = rx_strip_en;
+ rx_vlan_cfg.vlan1_vlan_prionly = false;
+ rx_vlan_cfg.vlan2_vlan_prionly = false;
+ rx_vlan_cfg.rx_vlan_offload_en = rx_strip_en;
+
+ ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
+ if (ret) {
+ hns3_err(hw, "enable strip rx failed, ret =%d", ret);
+ return ret;
+ }
+
+ hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg);
+ return ret;
+}
+
+static int
+hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
+{
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ uint16_t port_base_vlan_state;
+ uint16_t old_pvid;
+ int ret;
+
+ if (on == 0 && pvid != pf->port_base_vlan_cfg.pvid) {
+ if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID)
+ hns3_warn(hw, "Invalid operation! As current pvid set "
+ "is %u, disable pvid %u is invalid",
+ pf->port_base_vlan_cfg.pvid, pvid);
+ return 0;
+ }
+
+ port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
+ HNS3_PORT_BASE_VLAN_DISABLE;
+ ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
+ if (ret) {
+ hns3_err(hw, "Failed to config tx vlan, ret =%d", ret);
+ return ret;
+ }
+
+ ret = hns3_en_rx_strip_all(hns, on);
+ if (ret) {
+ hns3_err(hw, "Failed to config rx vlan strip, ret =%d", ret);
+ return ret;
+ }
+
+ if (pvid == HNS3_INVLID_PVID)
+ goto out;
+ old_pvid = pf->port_base_vlan_cfg.pvid;
+ ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid,
+ old_pvid);
+ if (ret) {
+ hns3_err(hw, "Failed to update vlan filter entries, ret =%d",
+ ret);
+ return ret;
+ }
+
+out:
+ hns3_store_port_base_vlan_info(hns, pvid, on);
+ return ret;
+}
+
+static int
+hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_vlan_pvid_configure(hns, pvid, on);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+}
+
+static void
+init_port_base_vlan_info(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = &hns->pf;
+
+ pf->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
+ pf->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID;
+}
+
+static int
+hns3_default_vlan_config(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_set_port_vlan_filter(hns, 0, 1);
+ if (ret)
+ hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
+ return ret;
+}
+
+static int
+hns3_init_vlan_config(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ /*
+ * This function can be called in the initialization and reset process,
+ * when in reset process, it means that hardware had been reseted
+ * successfully and we need to restore the hardware configuration to
+ * ensure that the hardware configuration remains unchanged before and
+ * after reset.
+ */
+ if (rte_atomic16_read(&hw->reset.resetting) == 0)
+ init_port_base_vlan_info(hw);
+
+ ret = hns3_enable_vlan_filter(hns, true);
+ if (ret) {
+ hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
+ return ret;
+ }
+
+ ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
+ RTE_ETHER_TYPE_VLAN);
+ if (ret) {
+ hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
+ return ret;
+ }
+
+ /*
+ * When in the reinit dev stage of the reset process, the following
+ * vlan-related configurations may differ from those at initialization,
+ * we will restore configurations to hardware in hns3_restore_vlan_table
+ * and hns3_restore_vlan_conf later.
+ */
+ if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ ret = hns3_vlan_pvid_configure(hns, HNS3_INVLID_PVID, 0);
+ if (ret) {
+ hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
+ return ret;
+ }
+
+ ret = hns3_en_hw_strip_rxvtag(hns, false);
+ if (ret) {
+ hns3_err(hw, "rx strip configure fail in pf, ret =%d",
+ ret);
+ return ret;
+ }
+ }
+
+ return hns3_default_vlan_config(hns);
+}
+
+static int
+hns3_dev_configure_vlan(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_txmode *txmode;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ txmode = &data->dev_conf.txmode;
+ if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
+ hns3_warn(hw,
+ "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
+ "configuration is not supported! Ignore these two "
+ "parameters: hw_vlan_reject_tagged(%d), "
+ "hw_vlan_reject_untagged(%d)",
+ txmode->hw_vlan_reject_tagged,
+ txmode->hw_vlan_reject_untagged);
+
+ /* Apply vlan offload setting */
+ ret = hns3_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+ if (ret) {
+ hns3_err(hw, "dev config vlan Strip failed, ret =%d", ret);
+ return ret;
+ }
+
+ /* Apply pvid setting */
+ ret = hns3_vlan_pvid_set(dev, txmode->pvid,
+ txmode->hw_vlan_insert_pvid);
+ if (ret)
+ hns3_err(hw, "dev config vlan pvid(%d) failed, ret =%d",
+ txmode->pvid, ret);
+
+ return ret;
+}
+
static int
hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
unsigned int tso_mss_max)
goto err_add_uc_addr;
}
+ ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
+ if (ret) {
+ hns3_err(hw, "Failed to configure mac pause address: %d", ret);
+ goto err_pause_addr_cfg;
+ }
+
rte_ether_addr_copy(mac_addr,
(struct rte_ether_addr *)hw->mac.mac_addr);
hw->mac.default_addr_setted = true;
return 0;
+err_pause_addr_cfg:
+ ret_val = hns3_remove_uc_addr_common(hw, mac_addr);
+ if (ret_val) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_warn(hw,
+ "Failed to roll back to del setted mac addr(%s): %d",
+ mac_str, ret_val);
+ }
+
err_add_uc_addr:
if (rm_succes) {
ret_val = hns3_add_uc_addr_common(hw, oaddr);
return ret;
}
+static int
+hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_ether_addr *addr;
+ int err = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) {
+ addr = &hw->data->mac_addrs[i];
+ if (!rte_is_valid_assigned_ether_addr(addr))
+ continue;
+ if (del)
+ ret = hns3_remove_uc_addr_common(hw, addr);
+ else
+ ret = hns3_add_uc_addr_common(hw, addr);
+ if (ret) {
+ err = ret;
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ addr);
+ hns3_dbg(hw,
+ "Failed to %s mac addr(%s). ret:%d i:%d",
+ del ? "remove" : "restore", mac_str, ret, i);
+ }
+ }
+ return err;
+}
+
static void
hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
{
rte_ether_addr_copy(addr, &hw->mc_addrs[num]);
hw->mc_addrs_num++;
}
- rte_spinlock_unlock(&hw->lock);
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_ether_addr *addr;
+ int err = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < hw->mc_addrs_num; i++) {
+ addr = &hw->mc_addrs[i];
+ if (!rte_is_multicast_ether_addr(addr))
+ continue;
+ if (del)
+ ret = hns3_remove_mc_addr(hw, addr);
+ else
+ ret = hns3_add_mc_addr(hw, addr);
+ if (ret) {
+ err = ret;
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ addr);
+ hns3_dbg(hw, "%s mc mac addr: %s failed",
+ del ? "Remove" : "Restore", mac_str);
+ }
+ }
+ return err;
+}
+
+static int
+hns3_check_mq_mode(struct rte_eth_dev *dev)
+{
+ enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_dcb_rx_conf *dcb_rx_conf;
+ struct rte_eth_dcb_tx_conf *dcb_tx_conf;
+ uint8_t num_tc;
+ int max_tc = 0;
+ int i;
+
+ dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+
+ if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+ hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. "
+ "rx_mq_mode = %d", rx_mq_mode);
+ return -EINVAL;
+ }
+
+ if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB ||
+ tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB "
+ "is not supported. rx_mq_mode = %d, tx_mq_mode = %d",
+ rx_mq_mode, tx_mq_mode);
+ return -EINVAL;
+ }
+
+ if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) {
+ if (dcb_rx_conf->nb_tcs > pf->tc_max) {
+ hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
+ dcb_rx_conf->nb_tcs, pf->tc_max);
+ return -EINVAL;
+ }
+
+ if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
+ dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
+ hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+ "nb_tcs(%d) != %d or %d in rx direction.",
+ dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
+ return -EINVAL;
+ }
+
+ if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
+ hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
+ dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
+ if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
+ hns3_err(hw, "dcb_tc[%d] = %d in rx direction, "
+ "is not equal to one in tx direction.",
+ i, dcb_rx_conf->dcb_tc[i]);
+ return -EINVAL;
+ }
+ if (dcb_rx_conf->dcb_tc[i] > max_tc)
+ max_tc = dcb_rx_conf->dcb_tc[i];
+ }
+
+ num_tc = max_tc + 1;
+ if (num_tc > dcb_rx_conf->nb_tcs) {
+ hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
+ num_tc, dcb_rx_conf->nb_tcs);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+hns3_check_dcb_cfg(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!hns3_dev_dcb_supported(hw)) {
+ hns3_err(hw, "this port does not support dcb configurations.");
+ return -EOPNOTSUPP;
+ }
+
+ if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
+ hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
+ return -EOPNOTSUPP;
+ }
+
+ /* Check multiple queue mode */
+ return hns3_check_mq_mode(dev);
+}
+
+static int
+hns3_dev_configure(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rss_conf *rss_cfg = &hw->rss_info;
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t mtu;
+ int ret;
+
+ /*
+ * Hardware does not support where the number of rx and tx queues is
+ * not equal in hip08.
+ */
+ if (nb_rx_q != nb_tx_q) {
+ hns3_err(hw,
+ "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
+ "Hardware does not support this configuration!",
+ nb_rx_q, nb_tx_q);
+ return -EINVAL;
+ }
- return 0;
-}
+ if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ hns3_err(hw, "setting link speed/duplex not supported");
+ return -EINVAL;
+ }
-static int
-hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
-{
- char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
- struct hns3_hw *hw = &hns->hw;
- struct rte_ether_addr *addr;
- int err = 0;
- int ret;
- int i;
+ hw->adapter_state = HNS3_NIC_CONFIGURING;
+ if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ ret = hns3_check_dcb_cfg(dev);
+ if (ret)
+ goto cfg_err;
+ }
- for (i = 0; i < hw->mc_addrs_num; i++) {
- addr = &hw->mc_addrs[i];
- if (!rte_is_multicast_ether_addr(addr))
- continue;
- if (del)
- ret = hns3_remove_mc_addr(hw, addr);
- else
- ret = hns3_add_mc_addr(hw, addr);
- if (ret) {
- err = ret;
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
- addr);
- hns3_dbg(hw, "%s mc mac addr: %s failed",
- del ? "Remove" : "Restore", mac_str);
+ /* When RSS is not configured, redirect the packet queue 0 */
+ if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ rss_conf = conf->rx_adv_conf.rss_conf;
+ if (rss_conf.rss_key == NULL) {
+ rss_conf.rss_key = rss_cfg->key;
+ rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
}
+
+ ret = hns3_dev_rss_hash_update(dev, &rss_conf);
+ if (ret)
+ goto cfg_err;
}
- return err;
+
+ /*
+ * If jumbo frames are enabled, MTU needs to be refreshed
+ * according to the maximum RX packet length.
+ */
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ /*
+ * Security of max_rx_pkt_len is guaranteed in dpdk frame.
+ * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
+ * can safely assign to "uint16_t" type variable.
+ */
+ mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
+ ret = hns3_dev_mtu_set(dev, mtu);
+ if (ret)
+ goto cfg_err;
+ dev->data->mtu = mtu;
+ }
+
+ ret = hns3_dev_configure_vlan(dev);
+ if (ret)
+ goto cfg_err;
+
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+
+ return 0;
+
+cfg_err:
+ hw->adapter_state = HNS3_NIC_INITIALIZED;
+ return ret;
}
static int
return 0;
}
+static int
+hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
+ struct hns3_hw *hw = &hns->hw;
+ bool is_jumbo_frame;
+ int ret;
+
+ if (dev->data->dev_started) {
+ hns3_err(hw, "Failed to set mtu, port %u must be stopped "
+ "before configuration", dev->data->port_id);
+ return -EBUSY;
+ }
+
+ rte_spinlock_lock(&hw->lock);
+ is_jumbo_frame = frame_size > RTE_ETHER_MAX_LEN ? true : false;
+ frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
+
+ /*
+ * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
+ * assign to "uint16_t" type variable.
+ */
+ ret = hns3_config_mtu(hw, (uint16_t)frame_size);
+ if (ret) {
+ rte_spinlock_unlock(&hw->lock);
+ hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
+ dev->data->port_id, mtu, ret);
+ return ret;
+ }
+ hns->pf.mps = (uint16_t)frame_size;
+ if (is_jumbo_frame)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+
+ info->max_rx_queues = hw->tqps_num;
+ info->max_tx_queues = hw->tqps_num;
+ info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
+ info->min_rx_bufsize = hw->rx_buf_len;
+ info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
+ info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
+ info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_SCTP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_JUMBO_FRAME);
+ info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ info->tx_queue_offload_capa);
+
+ info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = HNS3_MAX_RING_DESC,
+ .nb_min = HNS3_MIN_RING_DESC,
+ .nb_align = HNS3_ALIGN_RING_DESC,
+ };
+
+ info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = HNS3_MAX_RING_DESC,
+ .nb_min = HNS3_MIN_RING_DESC,
+ .nb_align = HNS3_ALIGN_RING_DESC,
+ };
+
+ info->vmdq_queue_num = 0;
+
+ info->reta_size = HNS3_RSS_IND_TBL_SIZE;
+ info->hash_key_size = HNS3_RSS_KEY_SIZE;
+ info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
+
+ info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
+ info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
+ info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
+ info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
+ info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
+ info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
+
+ return 0;
+}
+
+static int
+hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+ size_t fw_size)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = snprintf(fw_version, fw_size, "0x%08x", hw->fw_version);
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static int
+hns3_dev_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_mac *mac = &hw->mac;
+ struct rte_eth_link new_link;
+
+ memset(&new_link, 0, sizeof(new_link));
+ switch (mac->link_speed) {
+ case ETH_SPEED_NUM_10M:
+ case ETH_SPEED_NUM_100M:
+ case ETH_SPEED_NUM_1G:
+ case ETH_SPEED_NUM_10G:
+ case ETH_SPEED_NUM_25G:
+ case ETH_SPEED_NUM_40G:
+ case ETH_SPEED_NUM_50G:
+ case ETH_SPEED_NUM_100G:
+ new_link.link_speed = mac->link_speed;
+ break;
+ default:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ }
+
+ new_link.link_duplex = mac->link_duplex;
+ new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ new_link.link_autoneg =
+ !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+
+ return rte_eth_linkstatus_set(eth_dev, &new_link);
+}
+
static int
hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
{
int ret;
int i;
- memset(mgr_table, 0, sizeof(mgr_table));
- hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
- for (i = 0; i < table_item_num; i++) {
- ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
- if (ret) {
- PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
- ret);
- return ret;
- }
- }
+ memset(mgr_table, 0, sizeof(mgr_table));
+ hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
+ for (i = 0; i < table_item_num; i++) {
+ ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
+ bool en_mc, bool en_bc, int vport_id)
+{
+ if (!param)
+ return;
+
+ memset(param, 0, sizeof(struct hns3_promisc_param));
+ if (en_uc)
+ param->enable = HNS3_PROMISC_EN_UC;
+ if (en_mc)
+ param->enable |= HNS3_PROMISC_EN_MC;
+ if (en_bc)
+ param->enable |= HNS3_PROMISC_EN_BC;
+ param->vf_id = vport_id;
+}
+
+static int
+hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
+{
+ struct hns3_promisc_cfg_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
+
+ req = (struct hns3_promisc_cfg_cmd *)desc.data;
+ req->vf_id = param->vf_id;
+ req->flag = (param->enable << HNS3_PROMISC_EN_B) |
+ HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Set promisc mode fail, status is %d", ret);
+
+ return ret;
+}
+
+static int
+hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
+{
+ struct hns3_promisc_param param;
+ bool en_bc_pmc = true;
+ uint8_t vf_id;
+ int ret;
+
+ /*
+ * In current version VF is not supported when PF is driven by DPDK
+ * driver, the PF-related vf_id is 0, just need to configure parameters
+ * for vf_id 0.
+ */
+ vf_id = 0;
+
+ hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
+ ret = hns3_cmd_set_promisc_mode(hw, ¶m);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
+ int ret = 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_set_promisc_mode(hw, true, en_mc_pmc);
+ rte_spinlock_unlock(&hw->lock);
+ if (ret)
+ hns3_err(hw, "Failed to enable promiscuous mode: %d", ret);
+
+ return ret;
+}
+
+static int
+hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
+ int ret = 0;
+
+ /* If now in all_multicast mode, must remain in all_multicast mode. */
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_set_promisc_mode(hw, false, en_mc_pmc);
+ rte_spinlock_unlock(&hw->lock);
+ if (ret)
+ hns3_err(hw, "Failed to disable promiscuous mode: %d", ret);
+
+ return ret;
+}
+
+static int
+hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
+ int ret = 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_set_promisc_mode(hw, en_uc_pmc, true);
+ rte_spinlock_unlock(&hw->lock);
+ if (ret)
+ hns3_err(hw, "Failed to enable allmulticast mode: %d", ret);
+
+ return ret;
+}
+
+static int
+hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
+ int ret = 0;
+
+ /* If now in promiscuous mode, must remain in all_multicast mode. */
+ if (dev->data->promiscuous == 1)
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_set_promisc_mode(hw, en_uc_pmc, false);
+ rte_spinlock_unlock(&hw->lock);
+ if (ret)
+ hns3_err(hw, "Failed to disable allmulticast mode: %d", ret);
+
+ return ret;
+}
+
+static int
+hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed)
+{
+ struct hns3_sfp_speed_cmd *resp;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true);
+ resp = (struct hns3_sfp_speed_cmd *)desc.data;
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ hns3_err(hw, "IMP do not support get SFP speed %d", ret);
+ return ret;
+ } else if (ret) {
+ hns3_err(hw, "get sfp speed failed %d", ret);
+ return ret;
+ }
+
+ *speed = resp->sfp_speed;
+
+ return 0;
+}
+
+static uint8_t
+hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
+{
+ if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
+ duplex = ETH_LINK_FULL_DUPLEX;
+
+ return duplex;
+}
+
+static int
+hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
+{
+ struct hns3_mac *mac = &hw->mac;
+ int ret;
+
+ duplex = hns3_check_speed_dup(duplex, speed);
+ if (mac->link_speed == speed && mac->link_duplex == duplex)
+ return 0;
+
+ ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex);
+ if (ret)
+ return ret;
+
+ mac->link_speed = speed;
+ mac->link_duplex = duplex;
return 0;
}
-static void
-hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
- bool en_mc, bool en_bc, int vport_id)
+static int
+hns3_update_speed_duplex(struct rte_eth_dev *eth_dev)
{
- if (!param)
- return;
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ uint32_t speed;
+ int ret;
- memset(param, 0, sizeof(struct hns3_promisc_param));
- if (en_uc)
- param->enable = HNS3_PROMISC_EN_UC;
- if (en_mc)
- param->enable |= HNS3_PROMISC_EN_MC;
- if (en_bc)
- param->enable |= HNS3_PROMISC_EN_BC;
- param->vf_id = vport_id;
+ /* If IMP do not support get SFP/qSFP speed, return directly */
+ if (!pf->support_sfp_query)
+ return 0;
+
+ ret = hns3_get_sfp_speed(hw, &speed);
+ if (ret == -EOPNOTSUPP) {
+ pf->support_sfp_query = false;
+ return ret;
+ } else if (ret)
+ return ret;
+
+ if (speed == ETH_SPEED_NUM_NONE)
+ return 0; /* do nothing if no SFP */
+
+ /* Config full duplex for SFP */
+ return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX);
}
static int
-hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
+hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
{
- struct hns3_promisc_cfg_cmd *req;
+ struct hns3_config_mac_mode_cmd *req;
struct hns3_cmd_desc desc;
+ uint32_t loop_en = 0;
+ uint8_t val = 0;
int ret;
- hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
-
- req = (struct hns3_promisc_cfg_cmd *)desc.data;
- req->vf_id = param->vf_id;
- req->flag = (param->enable << HNS3_PROMISC_EN_B) |
- HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
+ req = (struct hns3_config_mac_mode_cmd *)desc.data;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
+ if (enable)
+ val = 1;
+ hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
+ hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
+ hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
+ hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
+ hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
+ hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
+ req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret)
- PMD_INIT_LOG(ERR, "Set promisc mode fail, status is %d", ret);
+ PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
return ret;
}
static int
-hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
+hns3_get_mac_link_status(struct hns3_hw *hw)
{
- struct hns3_promisc_param param;
- bool en_bc_pmc = true;
- uint8_t vf_id;
+ struct hns3_link_status_cmd *req;
+ struct hns3_cmd_desc desc;
+ int link_status;
int ret;
- /*
- * In current version VF is not supported when PF is driven by DPDK
- * driver, the PF-related vf_id is 0, just need to configure parameters
- * for vf_id 0.
- */
- vf_id = 0;
-
- hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
- ret = hns3_cmd_set_promisc_mode(hw, ¶m);
- if (ret)
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true);
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "get link status cmd failed %d", ret);
return ret;
+ }
- return 0;
+ req = (struct hns3_link_status_cmd *)desc.data;
+ link_status = req->status & HNS3_LINK_STATUS_UP_M;
+
+ return !!link_status;
+}
+
+static void
+hns3_update_link_status(struct hns3_hw *hw)
+{
+ int state;
+
+ state = hns3_get_mac_link_status(hw);
+ if (state != hw->mac.link_status)
+ hw->mac.link_status = state;
+}
+
+static void
+hns3_service_handler(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+
+ hns3_update_speed_duplex(eth_dev);
+ hns3_update_link_status(hw);
+
+ rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
}
static int
goto err_mac_init;
}
+ ret = hns3_init_vlan_config(hns);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
+ goto err_mac_init;
+ }
+
+ ret = hns3_dcb_init(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
+ goto err_mac_init;
+ }
+
+ ret = hns3_init_fd_config(hns);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
+ goto err_mac_init;
+ }
+
ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
goto err_get_config;
}
+ /* Initialize flow director filter list & hash */
+ ret = hns3_fdir_filter_init(hns);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
+ goto err_hw_init;
+ }
+
+ hns3_set_default_rss_args(hw);
+
return 0;
+err_hw_init:
+ hns3_uninit_umv_space(hw);
+
err_get_config:
hns3_cmd_uninit(hw);
PMD_INIT_FUNC_TRACE();
+ hns3_rss_uninit(hns);
+ hns3_fdir_filter_uninit(hns);
hns3_uninit_umv_space(hw);
hns3_cmd_uninit(hw);
hns3_cmd_destroy_queue(hw);
hw->io_base = NULL;
}
+static int
+hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_dcb_cfg_update(hns);
+ if (ret)
+ return ret;
+
+ /* Enable queues */
+ ret = hns3_start_queues(hns, reset_queue);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to start queues: %d", ret);
+ return ret;
+ }
+
+ /* Enable MAC */
+ ret = hns3_cfg_mac_mode(hw, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to enable MAC: %d", ret);
+ goto err_config_mac_mode;
+ }
+ return 0;
+
+err_config_mac_mode:
+ hns3_stop_queues(hns, true);
+ return ret;
+}
+
+static int
+hns3_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_spinlock_lock(&hw->lock);
+ hw->adapter_state = HNS3_NIC_STARTING;
+
+ ret = hns3_do_start(hns, true);
+ if (ret) {
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ hw->adapter_state = HNS3_NIC_STARTED;
+ rte_spinlock_unlock(&hw->lock);
+ hns3_set_rxtx_function(eth_dev);
+
+ hns3_info(hw, "hns3 dev start successful!");
+ return 0;
+}
+
+static int
+hns3_do_stop(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ bool reset_queue;
+ int ret;
+
+ ret = hns3_cfg_mac_mode(hw, false);
+ if (ret)
+ return ret;
+ hw->mac.link_status = ETH_LINK_DOWN;
+
+ hns3_configure_all_mac_addr(hns, true);
+ reset_queue = true;
+ hw->mac.default_addr_setted = false;
+ return hns3_stop_queues(hns, reset_queue);
+}
+
+static void
+hns3_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->adapter_state = HNS3_NIC_STOPPING;
+ hns3_set_rxtx_function(eth_dev);
+
+ rte_spinlock_lock(&hw->lock);
+
+ hns3_do_stop(hns);
+ hns3_dev_release_mbufs(hns);
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+}
+
static void
hns3_dev_close(struct rte_eth_dev *eth_dev)
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ if (hw->adapter_state == HNS3_NIC_STARTED)
+ hns3_dev_stop(eth_dev);
+
hw->adapter_state = HNS3_NIC_CLOSING;
+ rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
hns3_configure_all_mc_mac_addr(hns, true);
+ hns3_remove_all_vlan_table(hns);
+ hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
hns3_uninit_pf(eth_dev);
+ hns3_free_all_queues(eth_dev);
+ rte_free(eth_dev->process_private);
+ eth_dev->process_private = NULL;
hw->adapter_state = HNS3_NIC_CLOSED;
+ hns3_warn(hw, "Close port %d finished", hw->data->port_id);
+}
+
+static int
+hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ fc_conf->pause_time = pf->pause_time;
+
+ /* return fc current mode */
+ switch (hw->current_mode) {
+ case HNS3_FC_FULL:
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ case HNS3_FC_TX_PAUSE:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case HNS3_FC_RX_PAUSE:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case HNS3_FC_NONE:
+ default:
+ fc_conf->mode = RTE_FC_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+static void
+hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
+{
+ switch (mode) {
+ case RTE_FC_NONE:
+ hw->requested_mode = HNS3_FC_NONE;
+ break;
+ case RTE_FC_RX_PAUSE:
+ hw->requested_mode = HNS3_FC_RX_PAUSE;
+ break;
+ case RTE_FC_TX_PAUSE:
+ hw->requested_mode = HNS3_FC_TX_PAUSE;
+ break;
+ case RTE_FC_FULL:
+ hw->requested_mode = HNS3_FC_FULL;
+ break;
+ default:
+ hw->requested_mode = HNS3_FC_NONE;
+ hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
+ "configured to RTE_FC_NONE", mode);
+ break;
+ }
+}
+
+static int
+hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ if (fc_conf->high_water || fc_conf->low_water ||
+ fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
+ hns3_err(hw, "Unsupported flow control settings specified, "
+ "high_water(%u), low_water(%u), send_xon(%u) and "
+ "mac_ctrl_frame_fwd(%u) must be set to '0'",
+ fc_conf->high_water, fc_conf->low_water,
+ fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);
+ return -EINVAL;
+ }
+ if (fc_conf->autoneg) {
+ hns3_err(hw, "Unsupported fc auto-negotiation setting.");
+ return -EINVAL;
+ }
+ if (!fc_conf->pause_time) {
+ hns3_err(hw, "Invalid pause time %d setting.",
+ fc_conf->pause_time);
+ return -EINVAL;
+ }
+
+ if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
+ hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {
+ hns3_err(hw, "PFC is enabled. Cannot set MAC pause. "
+ "current_fc_status = %d", hw->current_fc_status);
+ return -EOPNOTSUPP;
+ }
+
+ hns3_get_fc_mode(hw, fc_conf->mode);
+ if (hw->requested_mode == hw->current_mode &&
+ pf->pause_time == fc_conf->pause_time)
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_fc_enable(dev, fc_conf);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint8_t priority;
+ int ret;
+
+ if (!hns3_dev_dcb_supported(hw)) {
+ hns3_err(hw, "This port does not support dcb configurations.");
+ return -EOPNOTSUPP;
+ }
+
+ if (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||
+ pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {
+ hns3_err(hw, "Unsupported flow control settings specified, "
+ "high_water(%u), low_water(%u), send_xon(%u) and "
+ "mac_ctrl_frame_fwd(%u) must be set to '0'",
+ pfc_conf->fc.high_water, pfc_conf->fc.low_water,
+ pfc_conf->fc.send_xon,
+ pfc_conf->fc.mac_ctrl_frame_fwd);
+ return -EINVAL;
+ }
+ if (pfc_conf->fc.autoneg) {
+ hns3_err(hw, "Unsupported fc auto-negotiation setting.");
+ return -EINVAL;
+ }
+ if (pfc_conf->fc.pause_time == 0) {
+ hns3_err(hw, "Invalid pause time %d setting.",
+ pfc_conf->fc.pause_time);
+ return -EINVAL;
+ }
+
+ if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
+ hw->current_fc_status == HNS3_FC_STATUS_PFC)) {
+ hns3_err(hw, "MAC pause is enabled. Cannot set PFC."
+ "current_fc_status = %d", hw->current_fc_status);
+ return -EOPNOTSUPP;
+ }
+
+ priority = pfc_conf->priority;
+ hns3_get_fc_mode(hw, pfc_conf->fc.mode);
+ if (hw->dcb_info.pfc_en & BIT(priority) &&
+ hw->requested_mode == hw->current_mode &&
+ pfc_conf->fc.pause_time == pf->pause_time)
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_dcb_pfc_enable(dev, pfc_conf);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ int i;
+
+ rte_spinlock_lock(&hw->lock);
+ if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = pf->local_max_tc;
+ else
+ dcb_info->nb_tcs = 1;
+
+ for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
+ dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];
+ for (i = 0; i < dcb_info->nb_tcs; i++)
+ dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
+
+ for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base =
+ hw->tc_queue[i].tqp_offset;
+ dcb_info->tc_queue.tc_txq[0][i].base =
+ hw->tc_queue[i].tqp_offset;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue =
+ hw->tc_queue[i].tqp_count;
+ dcb_info->tc_queue.tc_txq[0][i].nb_queue =
+ hw->tc_queue[i].tqp_count;
+ }
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
}
static const struct eth_dev_ops hns3_eth_dev_ops = {
+ .dev_start = hns3_dev_start,
+ .dev_stop = hns3_dev_stop,
.dev_close = hns3_dev_close,
+ .promiscuous_enable = hns3_dev_promiscuous_enable,
+ .promiscuous_disable = hns3_dev_promiscuous_disable,
+ .allmulticast_enable = hns3_dev_allmulticast_enable,
+ .allmulticast_disable = hns3_dev_allmulticast_disable,
+ .mtu_set = hns3_dev_mtu_set,
+ .dev_infos_get = hns3_dev_infos_get,
+ .fw_version_get = hns3_fw_version_get,
+ .rx_queue_setup = hns3_rx_queue_setup,
+ .tx_queue_setup = hns3_tx_queue_setup,
+ .rx_queue_release = hns3_dev_rx_queue_release,
+ .tx_queue_release = hns3_dev_tx_queue_release,
+ .dev_configure = hns3_dev_configure,
+ .flow_ctrl_get = hns3_flow_ctrl_get,
+ .flow_ctrl_set = hns3_flow_ctrl_set,
+ .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
.mac_addr_add = hns3_add_mac_addr,
.mac_addr_remove = hns3_remove_mac_addr,
.mac_addr_set = hns3_set_default_mac_addr,
.set_mc_addr_list = hns3_set_mc_mac_addr_list,
+ .link_update = hns3_dev_link_update,
+ .rss_hash_update = hns3_dev_rss_hash_update,
+ .rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
+ .reta_update = hns3_dev_rss_reta_update,
+ .reta_query = hns3_dev_rss_reta_query,
+ .filter_ctrl = hns3_dev_filter_ctrl,
+ .vlan_filter_set = hns3_vlan_filter_set,
+ .vlan_tpid_set = hns3_vlan_tpid_set,
+ .vlan_offload_set = hns3_vlan_offload_set,
+ .vlan_pvid_set = hns3_vlan_pvid_set,
+ .get_dcb_info = hns3_get_dcb_info,
+ .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
};
static int
hns3_dev_init(struct rte_eth_dev *eth_dev)
{
+ struct rte_device *dev = eth_dev->device;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ uint16_t device_id = pci_dev->id.device_id;
int ret;
PMD_INIT_FUNC_TRACE();
+ eth_dev->process_private = (struct hns3_process_private *)
+ rte_zmalloc_socket("hns3_filter_list",
+ sizeof(struct hns3_process_private),
+ RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
+ if (eth_dev->process_private == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
+ return -ENOMEM;
+ }
+ /* initialize flow filter lists */
+ hns3_filterlist_init(eth_dev);
+ hns3_set_rxtx_function(eth_dev);
eth_dev->dev_ops = &hns3_eth_dev_ops;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
hw->adapter_state = HNS3_NIC_UNINITIALIZED;
+
+ if (device_id == HNS3_DEV_ID_25GE_RDMA ||
+ device_id == HNS3_DEV_ID_50GE_RDMA ||
+ device_id == HNS3_DEV_ID_100G_RDMA_MACSEC)
+ hns3_set_bit(hw->flag, HNS3_DEV_SUPPORT_DCB_B, 1);
+
hns->is_vf = false;
hw->data = eth_dev->data;
*/
eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+ rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
hns3_info(hw, "hns3 dev initialization successful!");
return 0;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
+ rte_free(eth_dev->process_private);
+ eth_dev->process_private = NULL;
return ret;
}