+ hns3_atomic_clear_bit(reset_level, &hw->reset.request);
+}
+
+static enum hns3_reset_level
+hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+{
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_reset_level reset_level = HNS3_NONE_RESET;
+
+ /* Return the highest priority reset level amongst all */
+ if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
+ reset_level = HNS3_IMP_RESET;
+ else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
+ reset_level = HNS3_GLOBAL_RESET;
+ else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
+ reset_level = HNS3_FUNC_RESET;
+ else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
+ reset_level = HNS3_FLR_RESET;
+
+ if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
+ return HNS3_NONE_RESET;
+
+ return reset_level;
+}
+
+static void
+hns3_record_imp_error(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t reg_val;
+
+ reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+ if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
+ hns3_warn(hw, "Detected IMP RD poison!");
+ hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
+ hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
+ }
+
+ if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
+ hns3_warn(hw, "Detected IMP CMDQ error!");
+ hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
+ hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
+ }
+}
+
+static int
+hns3_prepare_reset(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t reg_val;
+ int ret;
+
+ switch (hw->reset.level) {
+ case HNS3_FUNC_RESET:
+ ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
+ if (ret)
+ return ret;
+
+ /*
+ * After performaning pf reset, it is not necessary to do the
+ * mailbox handling or send any command to firmware, because
+ * any mailbox handling or command to firmware is only valid
+ * after hns3_cmd_init is called.
+ */
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ hw->reset.stats.request_cnt++;
+ break;
+ case HNS3_IMP_RESET:
+ hns3_record_imp_error(hns);
+ reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+ hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
+ BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int
+hns3_set_rst_done(struct hns3_hw *hw)
+{
+ struct hns3_pf_rst_done_cmd *req;
+ struct hns3_cmd_desc desc;
+
+ req = (struct hns3_pf_rst_done_cmd *)desc.data;
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
+ req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
+ return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
+hns3_stop_service(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = &rte_eth_devices[hw->data->port_id];
+ hw->mac.link_status = ETH_LINK_DOWN;
+ if (hw->adapter_state == HNS3_NIC_STARTED) {
+ rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
+ hns3_update_linkstatus_and_event(hw, false);
+ }
+
+ hns3_set_rxtx_function(eth_dev);
+ rte_wmb();
+ /* Disable datapath on secondary process. */
+ hns3_mp_req_stop_rxtx(eth_dev);
+ rte_delay_ms(hw->tqps_num);
+
+ rte_spinlock_lock(&hw->lock);
+ if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
+ hw->adapter_state == HNS3_NIC_STOPPING) {
+ hns3_enable_all_queues(hw, false);
+ hns3_do_stop(hns);
+ hw->reset.mbuf_deferred_free = true;
+ } else
+ hw->reset.mbuf_deferred_free = false;
+
+ /*
+ * It is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
+ */
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ hns3_configure_all_mc_mac_addr(hns, true);
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_start_service(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct rte_eth_dev *eth_dev;
+
+ if (hw->reset.level == HNS3_IMP_RESET ||
+ hw->reset.level == HNS3_GLOBAL_RESET)
+ hns3_set_rst_done(hw);
+ eth_dev = &rte_eth_devices[hw->data->port_id];
+ hns3_set_rxtx_function(eth_dev);
+ hns3_mp_req_start_rxtx(eth_dev);
+ if (hw->adapter_state == HNS3_NIC_STARTED) {
+ /*
+ * This API parent function already hold the hns3_hw.lock, the
+ * hns3_service_handler may report lse, in bonding application
+ * it will call driver's ops which may acquire the hns3_hw.lock
+ * again, thus lead to deadlock.
+ * We defer calls hns3_service_handler to avoid the deadlock.
+ */
+ rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
+ hns3_service_handler, eth_dev);
+
+ /* Enable interrupt of all rx queues before enabling queues */
+ hns3_dev_all_rx_queue_intr_enable(hw, true);
+ /*
+ * Enable state of each rxq and txq will be recovered after
+ * reset, so we need to restore them before enable all tqps;
+ */
+ hns3_restore_tqp_enable_state(hw);
+ /*
+ * When finished the initialization, enable queues to receive
+ * and transmit packets.
+ */
+ hns3_enable_all_queues(hw, true);
+ }
+
+ return 0;
+}
+
+static int
+hns3_restore_conf(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3_configure_all_mac_addr(hns, false);
+ if (ret)
+ return ret;
+
+ ret = hns3_configure_all_mc_mac_addr(hns, false);
+ if (ret)
+ goto err_mc_mac;
+
+ ret = hns3_dev_promisc_restore(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_vlan_table(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_vlan_conf(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_all_fdir_filter(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_ptp(hns);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_rx_interrupt(hw);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_gro_conf(hw);
+ if (ret)
+ goto err_promisc;
+
+ ret = hns3_restore_fec(hw);
+ if (ret)
+ goto err_promisc;
+
+ if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
+ ret = hns3_do_start(hns, false);
+ if (ret)
+ goto err_promisc;
+ hns3_info(hw, "hns3 dev restart successful!");
+ } else if (hw->adapter_state == HNS3_NIC_STOPPING)
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ return 0;
+
+err_promisc:
+ hns3_configure_all_mc_mac_addr(hns, true);
+err_mc_mac:
+ hns3_configure_all_mac_addr(hns, true);
+ return ret;
+}
+
+static void
+hns3_reset_service(void *param)
+{
+ struct hns3_adapter *hns = (struct hns3_adapter *)param;
+ struct hns3_hw *hw = &hns->hw;
+ enum hns3_reset_level reset_level;
+ struct timeval tv_delta;
+ struct timeval tv_start;
+ struct timeval tv;
+ uint64_t msec;
+ int ret;
+
+ /*
+ * The interrupt is not triggered within the delay time.
+ * The interrupt may have been lost. It is necessary to handle
+ * the interrupt to recover from the error.
+ */
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_DEFERRED) {
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ __ATOMIC_RELAXED);
+ hns3_err(hw, "Handling interrupts in delayed tasks");
+ hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
+ reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+ if (reset_level == HNS3_NONE_RESET) {
+ hns3_err(hw, "No reset level is set, try IMP reset");
+ hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
+ }
+ }
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+
+ /*
+ * Check if there is any ongoing reset in the hardware. This status can
+ * be checked from reset_pending. If there is then, we need to wait for
+ * hardware to complete reset.
+ * a. If we are able to figure out in reasonable time that hardware
+ * has fully resetted then, we can proceed with driver, client
+ * reset.
+ * b. else, we can come back later to check this status so re-sched
+ * now.
+ */
+ reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+ if (reset_level != HNS3_NONE_RESET) {
+ hns3_clock_gettime(&tv_start);
+ ret = hns3_reset_process(hns, reset_level);
+ hns3_clock_gettime(&tv);
+ timersub(&tv, &tv_start, &tv_delta);
+ msec = hns3_clock_calctime_ms(&tv_delta);
+ if (msec > HNS3_RESET_PROCESS_MS)
+ hns3_err(hw, "%d handle long time delta %" PRIu64
+ " ms time=%ld.%.6ld",
+ hw->reset.level, msec,
+ tv.tv_sec, tv.tv_usec);
+ if (ret == -EAGAIN)
+ return;
+ }
+
+ /* Check if we got any *new* reset requests to be honored */
+ reset_level = hns3_get_reset_level(hns, &hw->reset.request);
+ if (reset_level != HNS3_NONE_RESET)
+ hns3_msix_process(hns, reset_level);
+}
+
+static unsigned int
+hns3_get_speed_capa_num(uint16_t device_id)
+{
+ unsigned int num;
+
+ switch (device_id) {
+ case HNS3_DEV_ID_25GE:
+ case HNS3_DEV_ID_25GE_RDMA:
+ num = 2;
+ break;
+ case HNS3_DEV_ID_100G_RDMA_MACSEC:
+ case HNS3_DEV_ID_200G_RDMA:
+ num = 1;
+ break;
+ default:
+ num = 0;
+ break;
+ }
+
+ return num;
+}
+
+static int
+hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
+ uint16_t device_id)
+{
+ switch (device_id) {
+ case HNS3_DEV_ID_25GE:
+ /* fallthrough */
+ case HNS3_DEV_ID_25GE_RDMA:
+ speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
+ speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
+
+ /* In HNS3 device, the 25G NIC is compatible with 10G rate */
+ speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
+ speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
+ break;
+ case HNS3_DEV_ID_100G_RDMA_MACSEC:
+ speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
+ speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
+ break;
+ case HNS3_DEV_ID_200G_RDMA:
+ speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
+ speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+static int
+hns3_fec_get_capability(struct rte_eth_dev *dev,
+ struct rte_eth_fec_capa *speed_fec_capa,
+ unsigned int num)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint16_t device_id = pci_dev->id.device_id;
+ unsigned int capa_num;
+ int ret;
+
+ capa_num = hns3_get_speed_capa_num(device_id);
+ if (capa_num == 0) {
+ hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
+ device_id);
+ return -ENOTSUP;
+ }
+
+ if (speed_fec_capa == NULL || num < capa_num)
+ return capa_num;
+
+ ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
+ if (ret)
+ return -ENOTSUP;
+
+ return capa_num;
+}
+
+static int
+get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
+{
+ struct hns3_config_fec_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ /*
+ * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
+ * in device of link speed
+ * below 10 Gbps.
+ */
+ if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+ *state = 0;
+ return 0;
+ }
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
+ req = (struct hns3_config_fec_cmd *)desc.data;
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "get current fec auto state failed, ret = %d",
+ ret);
+ return ret;
+ }
+
+ *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
+ return 0;
+}
+
+static int
+hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
+{
+ struct hns3_sfp_info_cmd *resp;
+ uint32_t tmp_fec_capa;
+ uint8_t auto_state;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ /*
+ * If link is down and AUTO is enabled, AUTO is returned, otherwise,
+ * configured FEC mode is returned.
+ * If link is up, current FEC mode is returned.
+ */
+ if (hw->mac.link_status == ETH_LINK_DOWN) {
+ ret = get_current_fec_auto_state(hw, &auto_state);
+ if (ret)
+ return ret;
+
+ if (auto_state == 0x1) {
+ *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
+ return 0;
+ }
+ }
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
+ resp = (struct hns3_sfp_info_cmd *)desc.data;
+ resp->query_type = HNS3_ACTIVE_QUERY;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
+ return ret;
+ } else if (ret) {
+ hns3_err(hw, "get FEC failed, ret = %d", ret);
+ return ret;
+ }
+
+ /*
+ * FEC mode order defined in hns3 hardware is inconsistend with
+ * that defined in the ethdev library. So the sequence needs
+ * to be converted.
+ */
+ switch (resp->active_fec) {
+ case HNS3_HW_FEC_MODE_NOFEC:
+ tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+ break;
+ case HNS3_HW_FEC_MODE_BASER:
+ tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+ break;
+ case HNS3_HW_FEC_MODE_RS:
+ tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+ break;
+ default:
+ tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+ break;
+ }
+
+ *fec_capa = tmp_fec_capa;
+ return 0;
+}
+
+static int
+hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return hns3_fec_get_internal(hw, fec_capa);
+}
+
+static int
+hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
+{
+ struct hns3_config_fec_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
+
+ req = (struct hns3_config_fec_cmd *)desc.data;
+ switch (mode) {
+ case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
+ hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+ HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
+ break;
+ case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
+ hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+ HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
+ break;
+ case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
+ hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+ HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
+ break;
+ case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
+ hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
+ break;
+ default:
+ return 0;
+ }
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "set fec mode failed, ret = %d", ret);
+
+ return ret;
+}
+
+static uint32_t
+get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
+{
+ struct hns3_mac *mac = &hw->mac;
+ uint32_t cur_capa;
+
+ switch (mac->link_speed) {
+ case ETH_SPEED_NUM_10G:
+ cur_capa = fec_capa[1].capa;
+ break;
+ case ETH_SPEED_NUM_25G:
+ case ETH_SPEED_NUM_100G:
+ case ETH_SPEED_NUM_200G:
+ cur_capa = fec_capa[0].capa;
+ break;
+ default:
+ cur_capa = 0;
+ break;
+ }
+
+ return cur_capa;
+}
+
+static bool
+is_fec_mode_one_bit_set(uint32_t mode)
+{
+ int cnt = 0;
+ uint8_t i;
+
+ for (i = 0; i < sizeof(mode); i++)
+ if (mode >> i & 0x1)
+ cnt++;
+
+ return cnt == 1 ? true : false;
+}
+
+static int
+hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
+{
+#define FEC_CAPA_NUM 2
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
+ struct hns3_pf *pf = &hns->pf;
+
+ struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
+ uint32_t cur_capa;
+ uint32_t num = FEC_CAPA_NUM;
+ int ret;
+
+ ret = hns3_fec_get_capability(dev, fec_capa, num);
+ if (ret < 0)
+ return ret;
+
+ /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */
+ if (!is_fec_mode_one_bit_set(mode)) {
+ hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
+ "FEC mode should be only one bit set", mode);
+ return -EINVAL;
+ }
+
+ /*
+ * Check whether the configured mode is within the FEC capability.
+ * If not, the configured mode will not be supported.
+ */
+ cur_capa = get_current_speed_fec_cap(hw, fec_capa);
+ if (!(cur_capa & mode)) {
+ hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
+ return -EINVAL;
+ }
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_set_fec_hw(hw, mode);
+ if (ret) {
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ pf->fec_mode = mode;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_restore_fec(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = &hns->pf;
+ uint32_t mode = pf->fec_mode;
+ int ret;
+
+ ret = hns3_set_fec_hw(hw, mode);
+ if (ret)
+ hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d",
+ mode, ret);
+
+ return ret;
+}
+
+static int
+hns3_query_dev_fec_info(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns);
+ int ret;
+
+ ret = hns3_fec_get_internal(hw, &pf->fec_mode);
+ if (ret)
+ hns3_err(hw, "query device FEC info failed, ret = %d", ret);
+
+ return ret;
+}
+
+static bool
+hns3_optical_module_existed(struct hns3_hw *hw)
+{
+ struct hns3_cmd_desc desc;
+ bool existed;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true);
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw,
+ "fail to get optical module exist state, ret = %d.\n",
+ ret);
+ return false;
+ }
+ existed = !!desc.data[0];
+
+ return existed;
+}
+
+static int
+hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset,
+ uint32_t len, uint8_t *data)
+{
+#define HNS3_SFP_INFO_CMD_NUM 6
+#define HNS3_SFP_INFO_MAX_LEN \
+ (HNS3_SFP_INFO_BD0_LEN + \
+ (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN)
+ struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM];
+ struct hns3_sfp_info_bd0_cmd *sfp_info_bd0;
+ uint16_t read_len;
+ uint16_t copy_len;
+ int ret;
+ int i;
+
+ for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) {
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM,
+ true);
+ if (i < HNS3_SFP_INFO_CMD_NUM - 1)
+ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ }
+
+ sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data;
+ sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset);
+ read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN);
+ sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len);
+
+ ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM);
+ if (ret) {
+ hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ /* The data format in BD0 is different with the others. */
+ copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN);
+ memcpy(data, sfp_info_bd0->data, copy_len);
+ read_len = copy_len;
+
+ for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) {
+ if (read_len >= len)
+ break;
+
+ copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN);
+ memcpy(data + read_len, desc[i].data, copy_len);
+ read_len += copy_len;
+ }
+
+ return (int)read_len;