DCF PMD needs to support rte_eth_dev_reset, the reason is when a DCF
instance is killed, all the flow rules still exists in hardware, when
DCF gets to reconnect, it already lost the flow context, and if the
application wants to create new rules, it may fail due to firmware
reports rules already exist.
The rte_eth_dev_reset API provides a more elegant way for the
application to reset DCF when reconnect happens.
Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
+ if (hw->resetting)
+ return 0;
+
err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
NULL, 0);
if (err) {
err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
NULL, 0);
if (err) {
iavf_shutdown_adminq(&hw->avf);
rte_free(hw->arq_buf);
iavf_shutdown_adminq(&hw->avf);
rte_free(hw->arq_buf);
rte_free(hw->vf_vsi_map);
rte_free(hw->vf_vsi_map);
+ hw->vf_vsi_map = NULL;
+
rte_free(hw->qos_bw_cfg);
rte_free(hw->qos_bw_cfg);
+ hw->qos_bw_cfg = NULL;
+
+ rte_free(hw->ets_config);
+ hw->ets_config = NULL;
struct dcf_virtchnl_cmd args;
int err;
struct dcf_virtchnl_cmd args;
int err;
+ if (hw->resetting)
+ return 0;
+
memset(&queue_select, 0, sizeof(queue_select));
queue_select.vsi_id = hw->vsi_res->vsi_id;
memset(&queue_select, 0, sizeof(queue_select));
queue_select.vsi_id = hw->vsi_res->vsi_id;
struct dcf_virtchnl_cmd args;
int len, err = 0;
struct dcf_virtchnl_cmd args;
int len, err = 0;
+ if (hw->resetting) {
+ if (!add)
+ return 0;
+
+ PMD_DRV_LOG(ERR, "fail to add all MACs for VF resetting");
+ return -EIO;
+ }
+
len = sizeof(struct virtchnl_ether_addr_list);
addr = hw->eth_dev->data->mac_addrs;
len += sizeof(struct virtchnl_ether_addr);
len = sizeof(struct virtchnl_ether_addr_list);
addr = hw->eth_dev->data->mac_addrs;
len += sizeof(struct virtchnl_ether_addr);
/* Link status */
bool link_up;
uint32_t link_speed;
/* Link status */
bool link_up;
uint32_t link_speed;
};
int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
};
int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
+static int
+ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
+
+static int
+ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
+
static uint16_t
ice_dcf_recv_pkts(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **bufs,
static uint16_t
ice_dcf_recv_pkts(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **bufs,
struct ice_dcf_hw *hw = &dcf_ad->real_hw;
int ret;
struct ice_dcf_hw *hw = &dcf_ad->real_hw;
int ret;
+ if (hw->resetting) {
+ PMD_DRV_LOG(ERR,
+ "The DCF has been reset by PF, please reinit first");
+ return -EIO;
+ }
+
ad->pf.adapter_stopped = 0;
hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
ad->pf.adapter_stopped = 0;
hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
txq->tx_rel_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
txq->tx_rel_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ dev->data->tx_queues[i] = NULL;
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
rxq->rx_rel_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
rxq->rx_rel_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ dev->data->rx_queues[i] = NULL;
struct virtchnl_eth_stats pstats;
int ret;
struct virtchnl_eth_stats pstats;
int ret;
+ if (hw->resetting) {
+ PMD_DRV_LOG(ERR,
+ "The DCF has been reset by PF, please reinit first");
+ return -EIO;
+ }
+
ret = ice_dcf_query_stats(hw, &pstats);
if (ret == 0) {
ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
ret = ice_dcf_query_stats(hw, &pstats);
if (ret == 0) {
ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
struct virtchnl_eth_stats pstats;
int ret;
struct virtchnl_eth_stats pstats;
int ret;
+ if (hw->resetting)
+ return 0;
+
/* read stat values to clear hardware registers */
ret = ice_dcf_query_stats(hw, &pstats);
if (ret != 0)
/* read stat values to clear hardware registers */
ret = ice_dcf_query_stats(hw, &pstats);
if (ret != 0)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ (void)ice_dcf_dev_stop(dev);
+
ice_dcf_free_repr_info(adapter);
ice_dcf_uninit_parent_adapter(dev);
ice_dcf_uninit_hw(dev, &adapter->real_hw);
ice_dcf_free_repr_info(adapter);
ice_dcf_uninit_parent_adapter(dev);
ice_dcf_uninit_hw(dev, &adapter->real_hw);
+static int
+ice_dcf_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = ice_dcf_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = ice_dcf_dev_init(dev);
+
+ return ret;
+}
+
static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
.dev_start = ice_dcf_dev_start,
.dev_stop = ice_dcf_dev_stop,
.dev_close = ice_dcf_dev_close,
static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
.dev_start = ice_dcf_dev_start,
.dev_stop = ice_dcf_dev_stop,
.dev_close = ice_dcf_dev_close,
+ .dev_reset = ice_dcf_dev_reset,
.dev_configure = ice_dcf_dev_configure,
.dev_infos_get = ice_dcf_dev_info_get,
.rx_queue_setup = ice_rx_queue_setup,
.dev_configure = ice_dcf_dev_configure,
.dev_infos_get = ice_dcf_dev_info_get,
.rx_queue_setup = ice_rx_queue_setup,
{
struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
{
struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
+ adapter->real_hw.resetting = false;
eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
start_vsi_reset_thread(dcf_hw, false, 0);
case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
start_vsi_reset_thread(dcf_hw, false, 0);
+ dcf_hw->resetting = true;
break;
case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
break;
case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
err_unroll_alloc:
ice_free(hw, hw->port_info);
hw->port_info = NULL;
err_unroll_alloc:
ice_free(hw, hw->port_info);
hw->port_info = NULL;
+ hw->switch_info = NULL;
ice_free(hw, hw->port_info);
hw->port_info = NULL;
ice_free(hw, hw->port_info);
hw->port_info = NULL;
+ hw->switch_info = NULL;
ice_clear_all_vsi_ctx(hw);
}
ice_clear_all_vsi_ctx(hw);
}
struct ice_hw *parent_hw = &parent_adapter->hw;
eth_dev->data->mac_addrs = NULL;
struct ice_hw *parent_hw = &parent_adapter->hw;
eth_dev->data->mac_addrs = NULL;
+ rte_free(parent_adapter->pf.main_vsi);
+ parent_adapter->pf.main_vsi = NULL;
ice_flow_uninit(parent_adapter);
ice_dcf_uninit_parent_hw(parent_hw);
ice_flow_uninit(parent_adapter);
ice_dcf_uninit_parent_hw(parent_hw);