+static void
+hns3vf_set_default_dev_specifications(struct hns3_hw *hw)
+{
+ hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
+ hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
+ hw->rss_key_size = HNS3_RSS_KEY_SIZE;
+ hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
+}
+
+static void
+hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
+{
+ struct hns3_dev_specs_0_cmd *req0;
+
+ req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
+
+ hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
+ hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
+ hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
+}
+
+static int
+hns3vf_check_dev_specifications(struct hns3_hw *hw)
+{
+ if (hw->rss_ind_tbl_size == 0 ||
+ hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
+ hns3_warn(hw, "the size of hash lookup table configured (%u)"
+ " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
+ HNS3_RSS_IND_TBL_SIZE_MAX);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+hns3vf_query_dev_specifications(struct hns3_hw *hw)
+{
+ struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
+ true);
+ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ }
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
+
+ ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hns3vf_parse_dev_specifications(hw, desc);
+
+ return hns3vf_check_dev_specifications(hw);
+}
+
+void
+hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
+{
+ uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
+ HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
+ uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
+ struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
+
+ if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
+ __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+}
+
+static void
+hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
+{
+#define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS 500
+
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+ int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
+ uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
+ uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
+ struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
+
+ __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+ __ATOMIC_RELEASE);
+
+ (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
+ NULL, 0);
+
+ while (remain_ms > 0) {
+ rte_delay_ms(HNS3_POLL_RESPONE_MS);
+ if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+ HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
+ break;
+ remain_ms--;
+ }
+
+ /*
+ * When exit above loop, the pf_push_lsc_cap could be one of the three
+ * state: unknown (means pf not ack), not_supported, supported.
+ * Here config it as 'not_supported' when it's 'unknown' state.
+ */
+ __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+
+ if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+ HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
+ hns3_info(hw, "detect PF support push link status change!");
+ } else {
+ /*
+ * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
+ * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
+ * the RTE_ETH_DEV_INTR_LSC capability.
+ */
+ dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+ }
+}
+