+#include "ice_generic_flow.h"
+
+#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL 100000 /* us */
+static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
+
+struct ice_dcf_reset_event_param {
+ struct ice_dcf_hw *dcf_hw;
+
+ bool vfr; /* VF reset event */
+ uint16_t vf_id; /* The reset VF ID */
+};
+
+static __rte_always_inline void
+ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
+ uint16_t vsi_map)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+ bool first_update = false;
+ uint16_t new_vsi_num;
+
+ if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
+ PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
+ return;
+ }
+
+ vsi_ctx = hw->vsi_ctx[vsi_handle];
+
+ if (vsi_map & VIRTCHNL_DCF_VF_VSI_VALID) {
+ if (!vsi_ctx) {
+ vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
+ if (!vsi_ctx) {
+ PMD_DRV_LOG(ERR, "No memory for vsi context %u",
+ vsi_handle);
+ return;
+ }
+ hw->vsi_ctx[vsi_handle] = vsi_ctx;
+ first_update = true;
+ }
+
+ new_vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
+ VIRTCHNL_DCF_VF_VSI_ID_S;
+
+ /* Redirect rules if vsi mapping table changes. */
+ if (!first_update) {
+ struct ice_flow_redirect rd;
+
+ memset(&rd, 0, sizeof(struct ice_flow_redirect));
+ rd.type = ICE_FLOW_REDIRECT_VSI;
+ rd.vsi_handle = vsi_handle;
+ rd.new_vsi_num = new_vsi_num;
+ ice_flow_redirect((struct ice_adapter *)hw->back, &rd);
+ } else {
+ vsi_ctx->vsi_num = new_vsi_num;
+ }
+
+ PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
+ vsi_handle, vsi_ctx->vsi_num);
+ } else {
+ hw->vsi_ctx[vsi_handle] = NULL;
+
+ ice_free(hw, vsi_ctx);
+
+ PMD_DRV_LOG(NOTICE, "VF%u is disabled", vsi_handle);
+ }
+}
+
+static void
+ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
+ uint16_t *vf_vsi_map)
+{
+ uint16_t vf_id;
+
+ for (vf_id = 0; vf_id < num_vfs; vf_id++)
+ ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
+}
+
+static void
+ice_dcf_update_pf_vsi_map(struct ice_hw *hw, uint16_t pf_vsi_idx,
+ uint16_t pf_vsi_num)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (unlikely(pf_vsi_idx >= ICE_MAX_VSI)) {
+ PMD_DRV_LOG(ERR, "Invalid vsi handle %u", pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx = hw->vsi_ctx[pf_vsi_idx];
+
+ if (!vsi_ctx)
+ vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
+
+ if (!vsi_ctx) {
+ PMD_DRV_LOG(ERR, "No memory for vsi context %u",
+ pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx->vsi_num = pf_vsi_num;
+ hw->vsi_ctx[pf_vsi_idx] = vsi_ctx;
+
+ PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
+ pf_vsi_idx, vsi_ctx->vsi_num);
+}
+
+static void*
+ice_dcf_vsi_update_service_handler(void *param)
+{
+ struct ice_dcf_reset_event_param *reset_param = param;
+ struct ice_dcf_hw *hw = reset_param->dcf_hw;
+ struct ice_dcf_adapter *adapter;
+
+ rte_delay_us(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
+
+ rte_spinlock_lock(&vsi_update_lock);
+
+ adapter = container_of(hw, struct ice_dcf_adapter, real_hw);
+
+ if (!ice_dcf_handle_vsi_update_event(hw))
+ ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
+ hw->num_vfs, hw->vf_vsi_map);
+
+ if (reset_param->vfr && adapter->repr_infos) {
+ struct rte_eth_dev *vf_rep_eth_dev =
+ adapter->repr_infos[reset_param->vf_id].vf_rep_eth_dev;
+ if (vf_rep_eth_dev && vf_rep_eth_dev->data->dev_started) {
+ PMD_DRV_LOG(DEBUG, "VF%u representor is resetting",
+ reset_param->vf_id);
+ ice_dcf_vf_repr_init_vlan(vf_rep_eth_dev);
+ }
+ }
+
+ rte_spinlock_unlock(&vsi_update_lock);
+
+ free(param);
+
+ return NULL;
+}
+
+static void
+start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
+{
+ struct ice_dcf_reset_event_param *param;
+ pthread_t thread;
+ int ret;
+
+ param = malloc(sizeof(*param));
+ if (!param) {
+ PMD_DRV_LOG(ERR, "Failed to allocate the memory for reset handling");
+ return;
+ }
+
+ param->dcf_hw = dcf_hw;
+ param->vfr = vfr;
+ param->vf_id = vf_id;
+
+ ret = pthread_create(&thread, NULL,
+ ice_dcf_vsi_update_service_handler, param);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to start the thread for reset handling");
+ free(param);
+ }
+}