+static void
+hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
+{
+ struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
+ uint32_t msg_data;
+
+ if (req->match_id != 0) {
+ /*
+ * If match_id is not zero, it means PF support copy request's
+ * match_id to its response. So VF could use the match_id
+ * to match the request.
+ */
+ if (resp->matching_scheme !=
+ HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID) {
+ resp->matching_scheme =
+ HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID;
+ hns3_info(hw, "detect mailbox support match id!");
+ }
+ if (req->match_id == resp->match_id) {
+ resp->resp_status = hns3_resp_to_errno(req->msg[3]);
+ memcpy(resp->additional_info, &req->msg[4],
+ HNS3_MBX_MAX_RESP_DATA_SIZE);
+ rte_io_wmb();
+ resp->received_match_resp = true;
+ }
+ return;
+ }
+
+ /*
+ * If the below instructions can be executed, it means PF does not
+ * support copy request's match_id to its response. So VF follows the
+ * original scheme to process.
+ */
+ resp->resp_status = hns3_resp_to_errno(req->msg[3]);
+ memcpy(resp->additional_info, &req->msg[4],
+ HNS3_MBX_MAX_RESP_DATA_SIZE);
+ msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
+ hns3_update_resp_position(hw, msg_data);
+}
+
+static void
+hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
+{
+ switch (link_fail_code) {
+ case HNS3_MBX_LF_NORMAL:
+ break;
+ case HNS3_MBX_LF_REF_CLOCK_LOST:
+ hns3_warn(hw, "Reference clock lost!");
+ break;
+ case HNS3_MBX_LF_XSFP_TX_DISABLE:
+ hns3_warn(hw, "SFP tx is disabled!");
+ break;
+ case HNS3_MBX_LF_XSFP_ABSENT:
+ hns3_warn(hw, "SFP is absent!");
+ break;
+ default:
+ hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
+ break;
+ }
+}
+
+static void
+hns3pf_handle_link_change_event(struct hns3_hw *hw,
+ struct hns3_mbx_vf_to_pf_cmd *req)
+{
+#define LINK_STATUS_OFFSET 1
+#define LINK_FAIL_CODE_OFFSET 2
+
+ if (!req->msg[LINK_STATUS_OFFSET])
+ hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
+
+ hns3_update_linkstatus_and_event(hw, true);
+}
+
+static void
+hns3_update_port_base_vlan_info(struct hns3_hw *hw,
+ struct hns3_mbx_pf_to_vf_cmd *req)
+{
+#define PVID_STATE_OFFSET 1
+ uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
+ HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
+ /*
+ * Currently, hardware doesn't support more than two layers VLAN offload
+ * based on hns3 network engine, which would cause packets loss or wrong
+ * packets for these types of packets. If the hns3 PF kernel ethdev
+ * driver sets the PVID for VF device after initialization of the
+ * related VF device, the PF driver will notify VF driver to update the
+ * PVID configuration state. The VF driver will update the PVID
+ * configuration state immediately to ensure that the VLAN process in Tx
+ * and Rx is correct. But in the window period of this state transition,
+ * packets loss or packets with wrong VLAN may occur.
+ */
+ if (hw->port_base_vlan_cfg.state != new_pvid_state) {
+ hw->port_base_vlan_cfg.state = new_pvid_state;
+ hns3_update_all_queues_pvid_proc_en(hw);
+ }
+}
+
+static void
+hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
+{
+ if (!promisc_en) {
+ /*
+ * When promisc/allmulti mode is closed by the hns3 PF kernel
+ * ethdev driver for untrusted, modify VF's related status.
+ */
+ hns3_warn(hw, "Promisc mode will be closed by host for being "
+ "untrusted.");
+ hw->data->promiscuous = 0;
+ hw->data->all_multicast = 0;
+ }
+}
+
+static void
+hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw)
+{
+ struct hns3_cmq_ring *crq = &hw->cmq.crq;
+ struct hns3_mbx_pf_to_vf_cmd *req;
+ struct hns3_cmd_desc *desc;
+ uint32_t tail, next_to_use;
+ uint8_t opcode;
+ uint16_t flag;
+
+ tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
+ next_to_use = crq->next_to_use;
+ while (next_to_use != tail) {
+ desc = &crq->desc[next_to_use];
+ req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
+ opcode = req->msg[0] & 0xff;
+
+ flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag);
+ if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))
+ goto scan_next;
+
+ if (crq->desc[next_to_use].opcode == 0)
+ goto scan_next;
+
+ if (opcode == HNS3_MBX_PF_VF_RESP) {
+ hns3_handle_mbx_response(hw, req);
+ /*
+ * Clear opcode to inform intr thread don't process
+ * again.
+ */
+ crq->desc[crq->next_to_use].opcode = 0;
+ }
+
+scan_next:
+ next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num;
+ }
+}
+