#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL 100000 /* us */
static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
+struct ice_dcf_reset_event_param {
+ struct ice_dcf_hw *dcf_hw;
+
+ bool vfr; /* VF reset event */
+ uint16_t vf_id; /* The reset VF ID */
+};
+
static __rte_always_inline void
ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
uint16_t vsi_map)
{
struct ice_vsi_ctx *vsi_ctx;
+ bool first_update = false;
+ uint16_t new_vsi_num;
if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
vsi_handle);
return;
}
+ hw->vsi_ctx[vsi_handle] = vsi_ctx;
+ first_update = true;
}
- vsi_ctx->vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
- VIRTCHNL_DCF_VF_VSI_ID_S;
- hw->vsi_ctx[vsi_handle] = vsi_ctx;
+ new_vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
+ VIRTCHNL_DCF_VF_VSI_ID_S;
+
+ /* Redirect rules if vsi mapping table changes. */
+ if (!first_update) {
+ struct ice_flow_redirect rd;
+
+ memset(&rd, 0, sizeof(struct ice_flow_redirect));
+ rd.type = ICE_FLOW_REDIRECT_VSI;
+ rd.vsi_handle = vsi_handle;
+ rd.new_vsi_num = new_vsi_num;
+ ice_flow_redirect((struct ice_adapter *)hw->back, &rd);
+ } else {
+ vsi_ctx->vsi_num = new_vsi_num;
+ }
PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
vsi_handle, vsi_ctx->vsi_num);
ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
}
+static void
+ice_dcf_update_pf_vsi_map(struct ice_hw *hw, uint16_t pf_vsi_idx,
+ uint16_t pf_vsi_num)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (unlikely(pf_vsi_idx >= ICE_MAX_VSI)) {
+ PMD_DRV_LOG(ERR, "Invalid vsi handle %u", pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx = hw->vsi_ctx[pf_vsi_idx];
+
+ if (!vsi_ctx)
+ vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
+
+ if (!vsi_ctx) {
+ PMD_DRV_LOG(ERR, "No memory for vsi context %u",
+ pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx->vsi_num = pf_vsi_num;
+ hw->vsi_ctx[pf_vsi_idx] = vsi_ctx;
+
+ PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
+ pf_vsi_idx, vsi_ctx->vsi_num);
+}
+
static void*
ice_dcf_vsi_update_service_handler(void *param)
{
- struct ice_dcf_hw *hw = param;
+ struct ice_dcf_reset_event_param *reset_param = param;
+ struct ice_dcf_hw *hw = reset_param->dcf_hw;
+ struct ice_dcf_adapter *adapter;
usleep(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
rte_spinlock_lock(&vsi_update_lock);
- if (!ice_dcf_handle_vsi_update_event(hw)) {
- struct ice_dcf_adapter *dcf_ad =
- container_of(hw, struct ice_dcf_adapter, real_hw);
+ adapter = container_of(hw, struct ice_dcf_adapter, real_hw);
- ice_dcf_update_vf_vsi_map(&dcf_ad->parent.hw,
+ if (!ice_dcf_handle_vsi_update_event(hw))
+ ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
hw->num_vfs, hw->vf_vsi_map);
+
+ if (reset_param->vfr && adapter->repr_infos) {
+ struct rte_eth_dev *vf_rep_eth_dev =
+ adapter->repr_infos[reset_param->vf_id].vf_rep_eth_dev;
+ if (vf_rep_eth_dev && vf_rep_eth_dev->data->dev_started) {
+ PMD_DRV_LOG(DEBUG, "VF%u representor is resetting",
+ reset_param->vf_id);
+ ice_dcf_vf_repr_init_vlan(vf_rep_eth_dev);
+ }
}
rte_spinlock_unlock(&vsi_update_lock);
+ free(param);
+
return NULL;
}
+static void
+start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
+{
+ struct ice_dcf_reset_event_param *param;
+ pthread_t thread;
+ int ret;
+
+ param = malloc(sizeof(*param));
+ if (!param) {
+ PMD_DRV_LOG(ERR, "Failed to allocate the memory for reset handling");
+ return;
+ }
+
+ param->dcf_hw = dcf_hw;
+ param->vfr = vfr;
+ param->vf_id = vf_id;
+
+ ret = pthread_create(&thread, NULL,
+ ice_dcf_vsi_update_service_handler, param);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to start the thread for reset handling");
+ free(param);
+ }
+}
+
void
ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
uint8_t *msg, uint16_t msglen)
{
struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg;
- pthread_t thread;
if (msglen < sizeof(struct virtchnl_pf_event)) {
PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen);
switch (pf_msg->event) {
case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
- pthread_create(&thread, NULL,
- ice_dcf_vsi_update_service_handler, dcf_hw);
+ start_vsi_reset_thread(dcf_hw, false, 0);
break;
case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
pf_msg->event_data.vf_vsi_map.vf_id,
pf_msg->event_data.vf_vsi_map.vsi_id);
- pthread_create(&thread, NULL,
- ice_dcf_vsi_update_service_handler, dcf_hw);
+ start_vsi_reset_thread(dcf_hw, true,
+ pf_msg->event_data.vf_vsi_map.vf_id);
break;
default:
PMD_DRV_LOG(ERR, "Unknown event received %u", pf_msg->event);
/* Initialize port_info struct with PHY capabilities */
status = ice_aq_get_phy_caps(hw->port_info, false,
- ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
+ ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
ice_free(hw, pcaps);
if (status)
goto err_unroll_alloc;
{
struct ice_dcf_adapter *dcf_adapter =
container_of(hw, struct ice_dcf_adapter, parent.hw);
+ struct virtchnl_pkg_info pkg_info;
+ struct dcf_virtchnl_cmd vc_cmd;
+ uint64_t dsn;
+
+ vc_cmd.v_op = VIRTCHNL_OP_DCF_GET_PKG_INFO;
+ vc_cmd.req_msglen = 0;
+ vc_cmd.req_msg = NULL;
+ vc_cmd.rsp_buflen = sizeof(pkg_info);
+ vc_cmd.rsp_msgbuf = (uint8_t *)&pkg_info;
+
+ if (ice_dcf_execute_virtchnl_cmd(&dcf_adapter->real_hw, &vc_cmd))
+ goto pkg_file_direct;
- /* TODO: check with DSN firstly by iAVF */
- PMD_INIT_LOG(DEBUG,
- "DCF VSI_ID = %u",
- dcf_adapter->real_hw.vsi_id);
+ rte_memcpy(&dsn, pkg_info.dsn, sizeof(dsn));
+ snprintf(pkg_name, ICE_MAX_PKG_FILENAME_SIZE,
+ ICE_PKG_FILE_SEARCH_PATH_UPDATES "ice-%016llx.pkg",
+ (unsigned long long)dsn);
+ if (!access(pkg_name, 0))
+ return 0;
+
+ snprintf(pkg_name, ICE_MAX_PKG_FILENAME_SIZE,
+ ICE_PKG_FILE_SEARCH_PATH_DEFAULT "ice-%016llx.pkg",
+ (unsigned long long)dsn);
+ if (!access(pkg_name, 0))
+ return 0;
+
+pkg_file_direct:
snprintf(pkg_name,
ICE_MAX_PKG_FILENAME_SIZE, "%s", ICE_PKG_FILE_UPDATES);
if (!access(pkg_name, 0))
parent_adapter->eth_dev = eth_dev;
parent_adapter->pf.adapter = parent_adapter;
parent_adapter->pf.dev_data = eth_dev->data;
+ /* create a dummy main_vsi */
+ parent_adapter->pf.main_vsi =
+ rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
+ if (!parent_adapter->pf.main_vsi)
+ return -ENOMEM;
+ parent_adapter->pf.main_vsi->adapter = parent_adapter;
+ parent_adapter->pf.adapter_stopped = 1;
+
parent_hw->back = parent_adapter;
parent_hw->mac_type = ICE_MAC_GENERIC;
parent_hw->vendor_id = ICE_INTEL_VENDOR_ID;
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ parent_adapter->pf.main_vsi->idx = hw->num_vfs;
+ ice_dcf_update_pf_vsi_map(parent_hw,
+ parent_adapter->pf.main_vsi->idx, hw->pf_vsi_id);
+
+ ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
+
err = ice_flow_init(parent_adapter);
if (err) {
PMD_INIT_LOG(ERR, "Failed to initialize flow");
goto uninit_hw;
}
- ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
-
mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
if (rte_is_valid_assigned_ether_addr(mac))
rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);