+static int
+hns3_remove_mac_vlan_tbl(struct hns3_hw *hw,
+ struct hns3_mac_vlan_tbl_entry_cmd *req)
+{
+ struct hns3_cmd_desc desc;
+ uint8_t resp_code;
+ uint16_t retval;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false);
+
+ memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret);
+ return ret;
+ }
+ resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
+ retval = rte_le_to_cpu_16(desc.retval);
+
+ return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
+ HNS3_MAC_VLAN_REMOVE);
+}
+
+static int
+hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_mac_vlan_tbl_entry_cmd req;
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_cmd_desc desc;
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ uint16_t egress_port = 0;
+ uint8_t vf_id;
+ int ret;
+
+ /* check if mac addr is valid */
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
+ mac_str);
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+
+ /*
+ * In current version VF is not supported when PF is driven by DPDK
+ * driver, just need to configure parameters for PF vport.
+ */
+ vf_id = HNS3_PF_FUNC_ID;
+ hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M,
+ HNS3_MAC_EPORT_VFID_S, vf_id);
+
+ req.egress_port = rte_cpu_to_le_16(egress_port);
+
+ hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
+
+ /*
+ * Lookup the mac address in the mac_vlan table, and add
+ * it if the entry is inexistent. Repeated unicast entry
+ * is not allowed in the mac vlan table.
+ */
+ ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, false);
+ if (ret == -ENOENT) {
+ if (!hns3_is_umv_space_full(hw)) {
+ ret = hns3_add_mac_vlan_tbl(hw, &req, NULL);
+ if (!ret)
+ hns3_update_umv_space(hw, false);
+ return ret;
+ }
+
+ hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size);
+
+ return -ENOSPC;
+ }
+
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
+
+ /* check if we just hit the duplicate */
+ if (ret == 0) {
+ hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str);
+ return 0;
+ }
+
+ hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table",
+ mac_str);
+
+ return ret;
+}
+
+static int
+hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ struct rte_ether_addr *addr;
+ int ret;
+ int i;
+
+ for (i = 0; i < hw->mc_addrs_num; i++) {
+ addr = &hw->mc_addrs[i];
+ /* Check if there are duplicate addresses */
+ if (rte_is_same_ether_addr(addr, mac_addr)) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ addr);
+ hns3_err(hw, "failed to add mc mac addr, same addrs"
+ "(%s) is added by the set_mc_mac_addr_list "
+ "API", mac_str);
+ return -EINVAL;
+ }
+ }
+
+ ret = hns3_add_mc_addr(hw, mac_addr);
+ if (ret) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "failed to add mc mac addr(%s), ret = %d",
+ mac_str, ret);
+ }
+ return ret;
+}
+
+static int
+hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ ret = hns3_remove_mc_addr(hw, mac_addr);
+ if (ret) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d",
+ mac_str, ret);
+ }
+ return ret;
+}
+
+static int
+hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+ uint32_t idx, __rte_unused uint32_t pool)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+
+ /*
+ * In hns3 network engine adding UC and MC mac address with different
+ * commands with firmware. We need to determine whether the input
+ * address is a UC or a MC address to call different commands.
+ * By the way, it is recommended calling the API function named
+ * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
+ * using the rte_eth_dev_mac_addr_add API function to set MC mac address
+ * may affect the specifications of UC mac addresses.
+ */
+ if (rte_is_multicast_ether_addr(mac_addr))
+ ret = hns3_add_mc_addr_common(hw, mac_addr);
+ else
+ ret = hns3_add_uc_addr_common(hw, mac_addr);
+
+ if (ret) {
+ rte_spinlock_unlock(&hw->lock);
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
+ ret);
+ return ret;
+ }
+
+ if (idx == 0)
+ hw->mac.default_addr_setted = true;
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
+{
+ struct hns3_mac_vlan_tbl_entry_cmd req;
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ /* check if mac addr is valid */
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid",
+ mac_str);
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
+ hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
+ ret = hns3_remove_mac_vlan_tbl(hw, &req);
+ if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */
+ return 0;
+ else if (ret == 0)
+ hns3_update_umv_space(hw, true);
+
+ return ret;
+}
+
+static void
+hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* index will be checked by upper level rte interface */
+ struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+
+ if (rte_is_multicast_ether_addr(mac_addr))
+ ret = hns3_remove_mc_addr_common(hw, mac_addr);
+ else
+ ret = hns3_remove_uc_addr_common(hw, mac_addr);
+ rte_spinlock_unlock(&hw->lock);
+ if (ret) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
+ ret);
+ }
+}
+
+static int
+hns3_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_ether_addr *oaddr;
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ bool default_addr_setted;
+ bool rm_succes = false;
+ int ret, ret_val;
+
+ /*
+ * It has been guaranteed that input parameter named mac_addr is valid
+ * address in the rte layer of DPDK framework.
+ */
+ oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
+ default_addr_setted = hw->mac.default_addr_setted;
+ if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr))
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ if (default_addr_setted) {
+ ret = hns3_remove_uc_addr_common(hw, oaddr);
+ if (ret) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ oaddr);
+ hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
+ mac_str, ret);
+ rm_succes = false;
+ } else
+ rm_succes = true;
+ }
+
+ ret = hns3_add_uc_addr_common(hw, mac_addr);
+ if (ret) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
+ goto err_add_uc_addr;
+ }
+
+ ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
+ if (ret) {
+ hns3_err(hw, "Failed to configure mac pause address: %d", ret);
+ goto err_pause_addr_cfg;
+ }
+
+ rte_ether_addr_copy(mac_addr,
+ (struct rte_ether_addr *)hw->mac.mac_addr);
+ hw->mac.default_addr_setted = true;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+
+err_pause_addr_cfg:
+ ret_val = hns3_remove_uc_addr_common(hw, mac_addr);
+ if (ret_val) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_warn(hw,
+ "Failed to roll back to del setted mac addr(%s): %d",
+ mac_str, ret_val);
+ }
+
+err_add_uc_addr:
+ if (rm_succes) {
+ ret_val = hns3_add_uc_addr_common(hw, oaddr);
+ if (ret_val) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ oaddr);
+ hns3_warn(hw,
+ "Failed to restore old uc mac addr(%s): %d",
+ mac_str, ret_val);
+ hw->mac.default_addr_setted = false;
+ }
+ }
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_ether_addr *addr;
+ int err = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) {
+ addr = &hw->data->mac_addrs[i];
+ if (rte_is_zero_ether_addr(addr))
+ continue;
+ if (rte_is_multicast_ether_addr(addr))
+ ret = del ? hns3_remove_mc_addr(hw, addr) :
+ hns3_add_mc_addr(hw, addr);
+ else
+ ret = del ? hns3_remove_uc_addr_common(hw, addr) :
+ hns3_add_uc_addr_common(hw, addr);
+
+ if (ret) {
+ err = ret;
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ addr);
+ hns3_err(hw, "failed to %s mac addr(%s) index:%d "
+ "ret = %d.", del ? "remove" : "restore",
+ mac_str, i, ret);
+ }
+ }
+ return err;
+}
+
+static void
+hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
+{
+#define HNS3_VF_NUM_IN_FIRST_DESC 192
+ uint8_t word_num;
+ uint8_t bit_num;
+
+ if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) {
+ word_num = vfid / 32;
+ bit_num = vfid % 32;
+ if (clr)
+ desc[1].data[word_num] &=
+ rte_cpu_to_le_32(~(1UL << bit_num));
+ else
+ desc[1].data[word_num] |=
+ rte_cpu_to_le_32(1UL << bit_num);
+ } else {
+ word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32;
+ bit_num = vfid % 32;
+ if (clr)
+ desc[2].data[word_num] &=
+ rte_cpu_to_le_32(~(1UL << bit_num));
+ else
+ desc[2].data[word_num] |=
+ rte_cpu_to_le_32(1UL << bit_num);
+ }
+}
+
+static int
+hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
+{
+ struct hns3_mac_vlan_tbl_entry_cmd req;
+ struct hns3_cmd_desc desc[3];
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ uint8_t vf_id;
+ int ret;
+
+ /* Check if mac addr is valid */
+ if (!rte_is_multicast_ether_addr(mac_addr)) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid",
+ mac_str);
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
+ hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
+ ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
+ if (ret) {
+ /* This mac addr do not exist, add new entry for it */
+ memset(desc[0].data, 0, sizeof(desc[0].data));
+ memset(desc[1].data, 0, sizeof(desc[0].data));
+ memset(desc[2].data, 0, sizeof(desc[0].data));
+ }
+
+ /*
+ * In current version VF is not supported when PF is driven by DPDK
+ * driver, just need to configure parameters for PF vport.
+ */
+ vf_id = HNS3_PF_FUNC_ID;
+ hns3_update_desc_vfid(desc, vf_id, false);
+ ret = hns3_add_mac_vlan_tbl(hw, &req, desc);
+ if (ret) {
+ if (ret == -ENOSPC)
+ hns3_err(hw, "mc mac vlan table is full");
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret);
+ }
+
+ return ret;
+}
+
+static int
+hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
+{
+ struct hns3_mac_vlan_tbl_entry_cmd req;
+ struct hns3_cmd_desc desc[3];
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ uint8_t vf_id;
+ int ret;
+
+ /* Check if mac addr is valid */
+ if (!rte_is_multicast_ether_addr(mac_addr)) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
+ mac_str);
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
+ hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
+ ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
+ if (ret == 0) {
+ /*
+ * This mac addr exist, remove this handle's VFID for it.
+ * In current version VF is not supported when PF is driven by
+ * DPDK driver, just need to configure parameters for PF vport.
+ */
+ vf_id = HNS3_PF_FUNC_ID;
+ hns3_update_desc_vfid(desc, vf_id, true);
+
+ /* All the vfid is zero, so need to delete this entry */
+ ret = hns3_remove_mac_vlan_tbl(hw, &req);
+ } else if (ret == -ENOENT) {
+ /* This mac addr doesn't exist. */
+ return 0;
+ }
+
+ if (ret) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
+ }
+
+ return ret;
+}
+
+static int
+hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ struct rte_ether_addr *addr;
+ uint32_t i;
+ uint32_t j;
+
+ if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
+ hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) "
+ "invalid. valid range: 0~%d",
+ nb_mc_addr, HNS3_MC_MACADDR_NUM);
+ return -EINVAL;
+ }
+
+ /* Check if input mac addresses are valid */
+ for (i = 0; i < nb_mc_addr; i++) {
+ addr = &mc_addr_set[i];
+ if (!rte_is_multicast_ether_addr(addr)) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ addr);
+ hns3_err(hw,
+ "failed to set mc mac addr, addr(%s) invalid.",
+ mac_str);
+ return -EINVAL;
+ }
+
+ /* Check if there are duplicate addresses */
+ for (j = i + 1; j < nb_mc_addr; j++) {
+ if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
+ rte_ether_format_addr(mac_str,
+ RTE_ETHER_ADDR_FMT_SIZE,
+ addr);
+ hns3_err(hw, "failed to set mc mac addr, "
+ "addrs invalid. two same addrs(%s).",
+ mac_str);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Check if there are duplicate addresses between mac_addrs
+ * and mc_addr_set
+ */
+ for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) {
+ if (rte_is_same_ether_addr(addr,
+ &hw->data->mac_addrs[j])) {
+ rte_ether_format_addr(mac_str,
+ RTE_ETHER_ADDR_FMT_SIZE,
+ addr);
+ hns3_err(hw, "failed to set mc mac addr, "
+ "addrs invalid. addrs(%s) has already "
+ "configured in mac_addr add API",
+ mac_str);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void
+hns3_set_mc_addr_calc_addr(struct hns3_hw *hw,
+ struct rte_ether_addr *mc_addr_set,
+ int mc_addr_num,
+ struct rte_ether_addr *reserved_addr_list,
+ int *reserved_addr_num,
+ struct rte_ether_addr *add_addr_list,
+ int *add_addr_num,
+ struct rte_ether_addr *rm_addr_list,
+ int *rm_addr_num)
+{
+ struct rte_ether_addr *addr;
+ int current_addr_num;
+ int reserved_num = 0;
+ int add_num = 0;
+ int rm_num = 0;
+ int num;
+ int i;
+ int j;
+ bool same_addr;
+
+ /* Calculate the mc mac address list that should be removed */
+ current_addr_num = hw->mc_addrs_num;
+ for (i = 0; i < current_addr_num; i++) {
+ addr = &hw->mc_addrs[i];
+ same_addr = false;
+ for (j = 0; j < mc_addr_num; j++) {
+ if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
+ same_addr = true;
+ break;
+ }
+ }
+
+ if (!same_addr) {
+ rte_ether_addr_copy(addr, &rm_addr_list[rm_num]);
+ rm_num++;
+ } else {
+ rte_ether_addr_copy(addr,
+ &reserved_addr_list[reserved_num]);
+ reserved_num++;
+ }
+ }
+
+ /* Calculate the mc mac address list that should be added */
+ for (i = 0; i < mc_addr_num; i++) {
+ addr = &mc_addr_set[i];
+ same_addr = false;
+ for (j = 0; j < current_addr_num; j++) {
+ if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) {
+ same_addr = true;
+ break;
+ }
+ }
+
+ if (!same_addr) {
+ rte_ether_addr_copy(addr, &add_addr_list[add_num]);
+ add_num++;
+ }
+ }
+
+ /* Reorder the mc mac address list maintained by driver */
+ for (i = 0; i < reserved_num; i++)
+ rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]);
+
+ for (i = 0; i < rm_num; i++) {
+ num = reserved_num + i;
+ rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]);
+ }
+
+ *reserved_addr_num = reserved_num;
+ *add_addr_num = add_num;
+ *rm_addr_num = rm_num;
+}
+
+static int
+hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM];
+ struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM];
+ struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM];
+ struct rte_ether_addr *addr;
+ int reserved_addr_num;
+ int add_addr_num;
+ int rm_addr_num;
+ int mc_addr_num;
+ int num;
+ int ret;
+ int i;
+
+ /* Check if input parameters are valid */
+ ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
+ if (ret)
+ return ret;
+
+ rte_spinlock_lock(&hw->lock);
+
+ /*
+ * Calculate the mc mac address lists those should be removed and be
+ * added, Reorder the mc mac address list maintained by driver.
+ */
+ mc_addr_num = (int)nb_mc_addr;
+ hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num,
+ reserved_addr_list, &reserved_addr_num,
+ add_addr_list, &add_addr_num,
+ rm_addr_list, &rm_addr_num);
+
+ /* Remove mc mac addresses */
+ for (i = 0; i < rm_addr_num; i++) {
+ num = rm_addr_num - i - 1;
+ addr = &rm_addr_list[num];
+ ret = hns3_remove_mc_addr(hw, addr);
+ if (ret) {
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+ hw->mc_addrs_num--;
+ }
+
+ /* Add mc mac addresses */
+ for (i = 0; i < add_addr_num; i++) {
+ addr = &add_addr_list[i];
+ ret = hns3_add_mc_addr(hw, addr);
+ if (ret) {
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ num = reserved_addr_num + i;
+ rte_ether_addr_copy(addr, &hw->mc_addrs[num]);
+ hw->mc_addrs_num++;
+ }
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_ether_addr *addr;
+ int err = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < hw->mc_addrs_num; i++) {
+ addr = &hw->mc_addrs[i];
+ if (!rte_is_multicast_ether_addr(addr))
+ continue;
+ if (del)
+ ret = hns3_remove_mc_addr(hw, addr);
+ else
+ ret = hns3_add_mc_addr(hw, addr);
+ if (ret) {
+ err = ret;
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ addr);
+ hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d",
+ del ? "Remove" : "Restore", mac_str, ret);
+ }
+ }
+ return err;
+}
+
+static int
+hns3_check_mq_mode(struct rte_eth_dev *dev)
+{
+ enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_dcb_rx_conf *dcb_rx_conf;
+ struct rte_eth_dcb_tx_conf *dcb_tx_conf;
+ uint8_t num_tc;
+ int max_tc = 0;
+ int i;
+
+ dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+
+ if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+ hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. "
+ "rx_mq_mode = %d", rx_mq_mode);
+ return -EINVAL;
+ }
+
+ if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB ||
+ tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB "
+ "is not supported. rx_mq_mode = %d, tx_mq_mode = %d",
+ rx_mq_mode, tx_mq_mode);
+ return -EINVAL;
+ }
+
+ if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) {
+ if (dcb_rx_conf->nb_tcs > pf->tc_max) {
+ hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
+ dcb_rx_conf->nb_tcs, pf->tc_max);
+ return -EINVAL;
+ }
+
+ if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
+ dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
+ hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+ "nb_tcs(%d) != %d or %d in rx direction.",
+ dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
+ return -EINVAL;
+ }
+
+ if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
+ hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
+ dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
+ if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
+ hns3_err(hw, "dcb_tc[%d] = %d in rx direction, "
+ "is not equal to one in tx direction.",
+ i, dcb_rx_conf->dcb_tc[i]);
+ return -EINVAL;
+ }
+ if (dcb_rx_conf->dcb_tc[i] > max_tc)
+ max_tc = dcb_rx_conf->dcb_tc[i];
+ }
+
+ num_tc = max_tc + 1;
+ if (num_tc > dcb_rx_conf->nb_tcs) {
+ hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
+ num_tc, dcb_rx_conf->nb_tcs);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+hns3_check_dcb_cfg(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!hns3_dev_dcb_supported(hw)) {
+ hns3_err(hw, "this port does not support dcb configurations.");
+ return -EOPNOTSUPP;
+ }
+
+ if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
+ hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
+ return -EOPNOTSUPP;
+ }
+
+ /* Check multiple queue mode */
+ return hns3_check_mq_mode(dev);
+}
+
+static int
+hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
+ enum hns3_ring_type queue_type, uint16_t queue_id)
+{
+ struct hns3_cmd_desc desc;
+ struct hns3_ctrl_vector_chain_cmd *req =
+ (struct hns3_ctrl_vector_chain_cmd *)desc.data;
+ enum hns3_cmd_status status;
+ enum hns3_opcode_type op;
+ uint16_t tqp_type_and_id = 0;
+ const char *op_str;
+ uint16_t type;
+ uint16_t gl;
+
+ op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
+ hns3_cmd_setup_basic_desc(&desc, op, false);
+ req->int_vector_id = vector_id;
+
+ if (queue_type == HNS3_RING_TYPE_RX)
+ gl = HNS3_RING_GL_RX;
+ else
+ gl = HNS3_RING_GL_TX;
+
+ type = queue_type;
+
+ hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
+ type);
+ hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
+ hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
+ gl);
+ req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
+ req->int_cause_num = 1;
+ op_str = mmap ? "Map" : "Unmap";
+ status = hns3_cmd_send(hw, &desc, 1);
+ if (status) {
+ hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.",
+ op_str, queue_id, req->int_vector_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+static int
+hns3_init_ring_with_vector(struct hns3_hw *hw)
+{
+ uint16_t vec;
+ int ret;
+ int i;
+
+ /*
+ * In hns3 network engine, vector 0 is always the misc interrupt of this
+ * function, vector 1~N can be used respectively for the queues of the
+ * function. Tx and Rx queues with the same number share the interrupt
+ * vector. In the initialization clearing the all hardware mapping
+ * relationship configurations between queues and interrupt vectors is
+ * needed, so some error caused by the residual configurations, such as
+ * the unexpected Tx interrupt, can be avoid.
+ */
+ vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+ if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
+ vec = vec - 1; /* the last interrupt is reserved */
+ hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
+ for (i = 0; i < hw->intr_tqps_num; i++) {
+ /*
+ * Set gap limiter/rate limiter/quanity limiter algorithm
+ * configuration for interrupt coalesce of queue's interrupt.
+ */
+ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+ HNS3_TQP_INTR_GL_DEFAULT);
+ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+ HNS3_TQP_INTR_GL_DEFAULT);
+ hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+ hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
+
+ ret = hns3_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_TX, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
+ "vector: %d, ret=%d", i, vec, ret);
+ return ret;
+ }
+
+ ret = hns3_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_RX, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
+ "vector: %d, ret=%d", i, vec, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+hns3_dev_configure(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_rss_conf *rss_cfg = &hw->rss_info;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t mtu;
+ bool gro_en;
+ int ret;
+
+ /*
+ * Hardware does not support individually enable/disable/reset the Tx or
+ * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
+ * and Rx queues at the same time. When the numbers of Tx queues
+ * allocated by upper applications are not equal to the numbers of Rx
+ * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
+ * of Tx/Rx queues. otherwise, network engine can not work as usual. But
+ * these fake queues are imperceptible, and can not be used by upper
+ * applications.
+ */
+ ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+ if (ret) {
+ hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
+ return ret;
+ }
+
+ hw->adapter_state = HNS3_NIC_CONFIGURING;
+ if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ hns3_err(hw, "setting link speed/duplex not supported");
+ ret = -EINVAL;
+ goto cfg_err;
+ }
+
+ if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ ret = hns3_check_dcb_cfg(dev);
+ if (ret)
+ goto cfg_err;
+ }
+
+ /* When RSS is not configured, redirect the packet queue 0 */
+ if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ rss_conf = conf->rx_adv_conf.rss_conf;
+ if (rss_conf.rss_key == NULL) {
+ rss_conf.rss_key = rss_cfg->key;
+ rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
+ }
+
+ ret = hns3_dev_rss_hash_update(dev, &rss_conf);
+ if (ret)
+ goto cfg_err;
+ }
+
+ /*
+ * If jumbo frames are enabled, MTU needs to be refreshed
+ * according to the maximum RX packet length.
+ */
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ /*
+ * Security of max_rx_pkt_len is guaranteed in dpdk frame.
+ * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
+ * can safely assign to "uint16_t" type variable.
+ */
+ mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
+ ret = hns3_dev_mtu_set(dev, mtu);
+ if (ret)
+ goto cfg_err;
+ dev->data->mtu = mtu;
+ }
+
+ ret = hns3_dev_configure_vlan(dev);
+ if (ret)
+ goto cfg_err;
+
+ /* config hardware GRO */
+ gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+ ret = hns3_config_gro(hw, gro_en);
+ if (ret)
+ goto cfg_err;
+
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+
+ return 0;
+
+cfg_err:
+ (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
+ hw->adapter_state = HNS3_NIC_INITIALIZED;
+
+ return ret;
+}
+
+static int
+hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
+{
+ struct hns3_config_max_frm_size_cmd *req;
+ struct hns3_cmd_desc desc;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
+
+ req = (struct hns3_config_max_frm_size_cmd *)desc.data;
+ req->max_frm_size = rte_cpu_to_le_16(new_mps);
+ req->min_frm_size = RTE_ETHER_MIN_LEN;
+
+ return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
+hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
+{
+ int ret;
+
+ ret = hns3_set_mac_mtu(hw, mps);
+ if (ret) {
+ hns3_err(hw, "Failed to set mtu, ret = %d", ret);
+ return ret;
+ }
+
+ ret = hns3_buffer_alloc(hw);
+ if (ret)
+ hns3_err(hw, "Failed to allocate buffer, ret = %d", ret);
+
+ return ret;
+}
+
+static int
+hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
+ struct hns3_hw *hw = &hns->hw;
+ bool is_jumbo_frame;
+ int ret;
+
+ if (dev->data->dev_started) {
+ hns3_err(hw, "Failed to set mtu, port %u must be stopped "
+ "before configuration", dev->data->port_id);
+ return -EBUSY;
+ }
+
+ rte_spinlock_lock(&hw->lock);
+ is_jumbo_frame = frame_size > RTE_ETHER_MAX_LEN ? true : false;
+ frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
+
+ /*
+ * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
+ * assign to "uint16_t" type variable.
+ */
+ ret = hns3_config_mtu(hw, (uint16_t)frame_size);
+ if (ret) {
+ rte_spinlock_unlock(&hw->lock);
+ hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
+ dev->data->port_id, mtu, ret);
+ return ret;
+ }
+ hns->pf.mps = (uint16_t)frame_size;
+ if (is_jumbo_frame)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ uint16_t queue_num = hw->tqps_num;
+
+ /*
+ * In interrupt mode, 'max_rx_queues' is set based on the number of
+ * MSI-X interrupt resources of the hardware.
+ */
+ if (hw->data->dev_conf.intr_conf.rxq == 1)
+ queue_num = hw->intr_tqps_num;
+
+ info->max_rx_queues = queue_num;
+ info->max_tx_queues = hw->tqps_num;
+ info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
+ info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
+ info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
+ info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
+ info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
+ info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_SCTP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_RSS_HASH |
+ DEV_RX_OFFLOAD_TCP_LRO);
+ info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ info->tx_queue_offload_capa |
+ hns3_txvlan_cap_get(hw));
+
+ info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = HNS3_MAX_RING_DESC,
+ .nb_min = HNS3_MIN_RING_DESC,
+ .nb_align = HNS3_ALIGN_RING_DESC,
+ };
+
+ info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = HNS3_MAX_RING_DESC,
+ .nb_min = HNS3_MIN_RING_DESC,
+ .nb_align = HNS3_ALIGN_RING_DESC,
+ .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
+ .nb_mtu_seg_max = HNS3_MAX_NON_TSO_BD_PER_PKT,
+ };
+
+ info->default_rxconf = (struct rte_eth_rxconf) {
+ /*
+ * If there are no available Rx buffer descriptors, incoming
+ * packets are always dropped by hardware based on hns3 network
+ * engine.
+ */
+ .rx_drop_en = 1,
+ };
+
+ info->vmdq_queue_num = 0;
+
+ info->reta_size = HNS3_RSS_IND_TBL_SIZE;
+ info->hash_key_size = HNS3_RSS_KEY_SIZE;
+ info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
+
+ info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
+ info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
+ info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
+ info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
+ info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
+ info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
+
+ return 0;
+}
+
+static int
+hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+ size_t fw_size)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t version = hw->fw_version;
+ int ret;
+
+ ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
+ hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
+ HNS3_FW_VERSION_BYTE3_S),
+ hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
+ HNS3_FW_VERSION_BYTE2_S),
+ hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
+ HNS3_FW_VERSION_BYTE1_S),
+ hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
+ HNS3_FW_VERSION_BYTE0_S));
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static int
+hns3_dev_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_mac *mac = &hw->mac;
+ struct rte_eth_link new_link;
+
+ if (!hns3_is_reset_pending(hns)) {
+ hns3_update_speed_duplex(eth_dev);
+ hns3_update_link_status(hw);
+ }
+
+ memset(&new_link, 0, sizeof(new_link));
+ switch (mac->link_speed) {
+ case ETH_SPEED_NUM_10M:
+ case ETH_SPEED_NUM_100M:
+ case ETH_SPEED_NUM_1G:
+ case ETH_SPEED_NUM_10G:
+ case ETH_SPEED_NUM_25G:
+ case ETH_SPEED_NUM_40G:
+ case ETH_SPEED_NUM_50G:
+ case ETH_SPEED_NUM_100G:
+ case ETH_SPEED_NUM_200G:
+ new_link.link_speed = mac->link_speed;
+ break;
+ default:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ }
+
+ new_link.link_duplex = mac->link_duplex;
+ new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ new_link.link_autoneg =
+ !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+
+ return rte_eth_linkstatus_set(eth_dev, &new_link);
+}
+
+static int
+hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = &hns->pf;
+
+ if (!(status->pf_state & HNS3_PF_STATE_DONE))
+ return -EINVAL;
+
+ pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
+
+ return 0;
+}
+
+static int
+hns3_query_function_status(struct hns3_hw *hw)
+{
+#define HNS3_QUERY_MAX_CNT 10
+#define HNS3_QUERY_SLEEP_MSCOEND 1
+ struct hns3_func_status_cmd *req;
+ struct hns3_cmd_desc desc;
+ int timeout = 0;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
+ req = (struct hns3_func_status_cmd *)desc.data;
+
+ do {
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "query function status failed %d",
+ ret);
+ return ret;
+ }
+
+ /* Check pf reset is done */
+ if (req->pf_state)
+ break;
+
+ rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
+ } while (timeout++ < HNS3_QUERY_MAX_CNT);
+
+ return hns3_parse_func_status(hw, req);
+}
+
+static int
+hns3_query_pf_resource(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_pf_res_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
+ return ret;
+ }
+
+ req = (struct hns3_pf_res_cmd *)desc.data;
+ hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num);
+ pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
+ hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
+ pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number);
+
+ if (req->tx_buf_size)
+ pf->tx_buf_size =
+ rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
+ else
+ pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
+
+ pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
+
+ if (req->dv_buf_size)
+ pf->dv_buf_size =
+ rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
+ else
+ pf->dv_buf_size = HNS3_DEFAULT_DV;
+
+ pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
+
+ hw->num_msi =
+ hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
+ HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
+
+ return 0;
+}
+
+static void
+hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
+{
+ struct hns3_cfg_param_cmd *req;
+ uint64_t mac_addr_tmp_high;
+ uint64_t mac_addr_tmp;
+ uint32_t i;
+
+ req = (struct hns3_cfg_param_cmd *)desc[0].data;
+
+ /* get the configuration */
+ cfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
+ HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S);
+ cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
+ HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
+ cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
+ HNS3_CFG_TQP_DESC_N_M,
+ HNS3_CFG_TQP_DESC_N_S);
+
+ cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
+ HNS3_CFG_PHY_ADDR_M,
+ HNS3_CFG_PHY_ADDR_S);
+ cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
+ HNS3_CFG_MEDIA_TP_M,
+ HNS3_CFG_MEDIA_TP_S);
+ cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
+ HNS3_CFG_RX_BUF_LEN_M,
+ HNS3_CFG_RX_BUF_LEN_S);
+ /* get mac address */
+ mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
+ mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
+ HNS3_CFG_MAC_ADDR_H_M,
+ HNS3_CFG_MAC_ADDR_H_S);
+
+ mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
+
+ cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
+ HNS3_CFG_DEFAULT_SPEED_M,
+ HNS3_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
+ HNS3_CFG_RSS_SIZE_M,
+ HNS3_CFG_RSS_SIZE_S);
+
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
+ cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
+
+ req = (struct hns3_cfg_param_cmd *)desc[1].data;
+ cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
+
+ cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
+ HNS3_CFG_SPEED_ABILITY_M,
+ HNS3_CFG_SPEED_ABILITY_S);
+ cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
+ HNS3_CFG_UMV_TBL_SPACE_M,
+ HNS3_CFG_UMV_TBL_SPACE_S);
+ if (!cfg->umv_space)
+ cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
+}
+
+/* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
+ * @hw: pointer to struct hns3_hw
+ * @hcfg: the config structure to be getted
+ */
+static int
+hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
+{
+ struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
+ struct hns3_cfg_param_cmd *req;
+ uint32_t offset;
+ uint32_t i;
+ int ret;
+
+ for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
+ offset = 0;
+ req = (struct hns3_cfg_param_cmd *)desc[i].data;
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
+ true);
+ hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
+ i * HNS3_CFG_RD_LEN_BYTES);
+ /* Len should be divided by 4 when send to hardware */
+ hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
+ HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
+ req->offset = rte_cpu_to_le_32(offset);
+ }
+
+ ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "get config failed %d.", ret);
+ return ret;
+ }
+
+ hns3_parse_cfg(hcfg, desc);
+
+ return 0;
+}
+
+static int
+hns3_parse_speed(int speed_cmd, uint32_t *speed)
+{
+ switch (speed_cmd) {
+ case HNS3_CFG_SPEED_10M:
+ *speed = ETH_SPEED_NUM_10M;
+ break;
+ case HNS3_CFG_SPEED_100M:
+ *speed = ETH_SPEED_NUM_100M;
+ break;
+ case HNS3_CFG_SPEED_1G:
+ *speed = ETH_SPEED_NUM_1G;
+ break;
+ case HNS3_CFG_SPEED_10G:
+ *speed = ETH_SPEED_NUM_10G;
+ break;
+ case HNS3_CFG_SPEED_25G:
+ *speed = ETH_SPEED_NUM_25G;
+ break;
+ case HNS3_CFG_SPEED_40G:
+ *speed = ETH_SPEED_NUM_40G;
+ break;
+ case HNS3_CFG_SPEED_50G:
+ *speed = ETH_SPEED_NUM_50G;
+ break;
+ case HNS3_CFG_SPEED_100G:
+ *speed = ETH_SPEED_NUM_100G;
+ break;
+ case HNS3_CFG_SPEED_200G:
+ *speed = ETH_SPEED_NUM_200G;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+hns3_set_default_dev_specifications(struct hns3_hw *hw)
+{
+ hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
+ hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
+ hw->rss_key_size = HNS3_RSS_KEY_SIZE;
+ hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
+}
+
+static void
+hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
+{
+ struct hns3_dev_specs_0_cmd *req0;
+
+ req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
+
+ hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
+ hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
+ hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
+}
+
+static int
+hns3_query_dev_specifications(struct hns3_hw *hw)
+{
+ struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
+ true);
+ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ }
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
+
+ ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hns3_parse_dev_specifications(hw, desc);
+
+ return 0;
+}
+
+static int
+hns3_get_capability(struct hns3_hw *hw)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_eth_dev *eth_dev;
+ uint16_t device_id;
+ uint8_t revision;
+ int ret;
+
+ eth_dev = &rte_eth_devices[hw->data->port_id];
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ device_id = pci_dev->id.device_id;
+
+ if (device_id == HNS3_DEV_ID_25GE_RDMA ||
+ device_id == HNS3_DEV_ID_50GE_RDMA ||
+ device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
+ device_id == HNS3_DEV_ID_200G_RDMA)
+ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
+
+ /* Get PCI revision id */
+ ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
+ HNS3_PCI_REVISION_ID);
+ if (ret != HNS3_PCI_REVISION_ID_LEN) {
+ PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
+ ret);
+ return -EIO;
+ }
+ hw->revision = revision;
+
+ if (revision < PCI_REVISION_ID_HIP09_A) {
+ hns3_set_default_dev_specifications(hw);
+ hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
+ hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
+ hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
+ hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
+ return 0;
+ }
+
+ ret = hns3_query_dev_specifications(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "failed to query dev specifications, ret = %d",
+ ret);
+ return ret;
+ }
+
+ hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
+ hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
+ hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
+ hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
+
+ return 0;
+}
+
+static int
+hns3_get_board_configuration(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_cfg cfg;
+ int ret;
+
+ ret = hns3_get_board_cfg(hw, &cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "get board config failed %d", ret);
+ return ret;
+ }
+
+ if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER &&
+ !hns3_dev_copper_supported(hw)) {
+ PMD_INIT_LOG(ERR, "media type is copper, not supported.");
+ return -EOPNOTSUPP;
+ }
+
+ hw->mac.media_type = cfg.media_type;
+ hw->rss_size_max = cfg.rss_size_max;
+ hw->rss_dis_flag = false;
+ memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);