return rte_eth_linkstatus_set(dev, &new_link);
}
+bool
+ice_dcf_adminq_need_retry(struct ice_adapter *ad)
+{
+ return ad->hw.dcf_enabled &&
+ !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
+}
+
/* Add UDP tunneling port */
static int
ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
{
struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
+ struct ice_adapter *parent_adapter = &adapter->parent;
eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
+ __atomic_store_n(&parent_adapter->dcf_state_on, false,
+ __ATOMIC_RELAXED);
return -1;
}
+ __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
+
if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
int ice_dcf_vf_repr_uninit(struct rte_eth_dev *vf_rep_eth_dev);
int ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev);
void ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter *dcf_adapter);
+bool ice_dcf_adminq_need_retry(struct ice_adapter *ad);
#endif /* _ICE_DCF_ETHDEV_H_ */
{
struct ice_dcf_reset_event_param *reset_param = param;
struct ice_dcf_hw *hw = reset_param->dcf_hw;
- struct ice_dcf_adapter *adapter;
+ struct ice_dcf_adapter *adapter =
+ container_of(hw, struct ice_dcf_adapter, real_hw);
+ struct ice_adapter *parent_adapter = &adapter->parent;
pthread_detach(pthread_self());
rte_spinlock_lock(&vsi_update_lock);
- adapter = container_of(hw, struct ice_dcf_adapter, real_hw);
-
- if (!ice_dcf_handle_vsi_update_event(hw))
+ if (!ice_dcf_handle_vsi_update_event(hw)) {
+ __atomic_store_n(&parent_adapter->dcf_state_on, true,
+ __ATOMIC_RELAXED);
ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
hw->num_vfs, hw->vf_vsi_map);
+ }
if (reset_param->vfr && adapter->repr_infos) {
struct rte_eth_dev *vf_rep_eth_dev =
uint8_t *msg, uint16_t msglen)
{
struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg;
+ struct ice_dcf_adapter *adapter =
+ container_of(dcf_hw, struct ice_dcf_adapter, real_hw);
+ struct ice_adapter *parent_adapter = &adapter->parent;
if (msglen < sizeof(struct virtchnl_pf_event)) {
PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen);
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
pf_msg->event_data.vf_vsi_map.vf_id,
pf_msg->event_data.vf_vsi_map.vsi_id);
+ __atomic_store_n(&parent_adapter->dcf_state_on, false,
+ __ATOMIC_RELAXED);
start_vsi_reset_thread(dcf_hw, true,
pf_msg->event_data.vf_vsi_map.vf_id);
break;
uint64_t time_hw;
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
+ /* True if DCF state of the associated PF is on */
+ bool dcf_state_on;
#ifdef RTE_ARCH_X86
bool rx_use_avx2;
bool rx_use_avx512;
ret = ice_flow_destroy(dev, p_flow, error);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to flush flows");
- return -EINVAL;
+ if (ret != -EAGAIN)
+ ret = -EINVAL;
+ return ret;
}
}
"lookup list should not be NULL");
goto error;
}
+
+ if (ice_dcf_adminq_need_retry(ad)) {
+ rte_flow_error_set(error, EAGAIN,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "DCF is not on");
+ goto error;
+ }
+
ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
if (!ret) {
filter_conf_ptr = rte_zmalloc("ice_switch_filter",
flow->rule = filter_conf_ptr;
} else {
- rte_flow_error_set(error, EINVAL,
+ if (ice_dcf_adminq_need_retry(ad))
+ ret = -EAGAIN;
+ else
+ ret = -EINVAL;
+
+ rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"switch filter create flow fail");
goto error;
return -rte_errno;
}
+ if (ice_dcf_adminq_need_retry(ad)) {
+ rte_flow_error_set(error, EAGAIN,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "DCF is not on");
+ return -rte_errno;
+ }
+
ret = ice_rem_adv_rule_by_id(hw, &filter_conf_ptr->sw_query_data);
if (ret) {
- rte_flow_error_set(error, EINVAL,
+ if (ice_dcf_adminq_need_retry(ad))
+ ret = -EAGAIN;
+ else
+ ret = -EINVAL;
+
+ rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"fail to destroy switch filter rule");
return -rte_errno;
}
rmv_rule:
+ if (ice_dcf_adminq_need_retry(ad)) {
+ PMD_DRV_LOG(WARNING, "DCF is not on");
+ ret = -EAGAIN;
+ goto out;
+ }
+
/* Remove the old rule */
ret = ice_rem_adv_rule(hw, lkups_ref, lkups_cnt, &rinfo);
if (ret) {
}
add_rule:
+ if (ice_dcf_adminq_need_retry(ad)) {
+ PMD_DRV_LOG(WARNING, "DCF is not on");
+ ret = -EAGAIN;
+ goto out;
+ }
+
/* Update VSI context */
hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
}
out:
+ if (ret == -EINVAL)
+ if (ice_dcf_adminq_need_retry(ad))
+ ret = -EAGAIN;
+
ice_free(hw, lkups_dp);
return ret;
}