X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_dcf_ethdev.c;h=b8a537cb8556c727430a27130b8dc8a0281a1ebb;hb=292be511d2909c14db3c9ceaffa317f2beee67a0;hp=f73dc80bd9b19f29511cf31ec35b1cc899e7753f;hpb=58bb86cf13b8535fa8786f97cc9a4dfaa780a51e;p=dpdk.git diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index f73dc80bd9..b8a537cb85 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -32,6 +33,12 @@ static int ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); +static int +ice_dcf_dev_init(struct rte_eth_dev *eth_dev); + +static int +ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev); + static uint16_t ice_dcf_recv_pkts(__rte_unused void *rx_queue, __rte_unused struct rte_mbuf **bufs, @@ -59,14 +66,13 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq) buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; rxq->rx_hdr_len = 0; rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); - max_pkt_len = RTE_MIN((uint32_t) - ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, - dev->data->dev_conf.rxmode.max_rx_pkt_len); + max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, + dev->data->mtu + ICE_ETH_OVERHEAD); /* Check if the jumbo frame and maximum packet length are set * correctly. */ - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (dev_data->mtu > RTE_ETHER_MTU) { if (max_pkt_len <= ICE_ETH_MAX_LEN || max_pkt_len > ICE_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must be " @@ -172,10 +178,15 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* If WB_ON_ITR supports, enable it */ hw->msix_base = IAVF_RX_VEC_START; + /* Set the ITR for index zero, to 2us to make sure that + * we leave time for aggregation to occur, but don't + * increase latency dramatically. + */ IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1), - IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | - IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK); + (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | + (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT)); } else { /* If no WB_ON_ITR offload flags, need to set * interrupt for descriptor write back. @@ -511,6 +522,12 @@ ice_dcf_dev_start(struct rte_eth_dev *dev) struct ice_dcf_hw *hw = &dcf_ad->real_hw; int ret; + if (hw->resetting) { + PMD_DRV_LOG(ERR, + "The DCF has been reset by PF, please reinit first"); + return -EIO; + } + ad->pf.adapter_stopped = 0; hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, @@ -664,7 +681,6 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev, DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_VLAN_FILTER | DEV_RX_OFFLOAD_RSS_HASH; dev_info->tx_offload_capa = @@ -805,6 +821,12 @@ ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) struct virtchnl_eth_stats pstats; int ret; + if (hw->resetting) { + PMD_DRV_LOG(ERR, + "The DCF has been reset by PF, please reinit first"); + return -EIO; + } + ret = ice_dcf_query_stats(hw, &pstats); if (ret == 0) { ice_dcf_update_stats(&hw->eth_stats_offset, &pstats); @@ -831,6 +853,9 @@ ice_dcf_stats_reset(struct rte_eth_dev *dev) struct virtchnl_eth_stats pstats; int ret; + if (hw->resetting) + return 0; + /* read stat values to clear hardware registers */ ret = ice_dcf_query_stats(hw, &pstats); if (ret != 0) @@ -874,6 +899,10 @@ ice_dcf_dev_close(struct rte_eth_dev *dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; + (void)ice_dcf_dev_stop(dev); + + ice_free_queues(dev); + ice_dcf_free_repr_info(adapter); ice_dcf_uninit_parent_adapter(dev); ice_dcf_uninit_hw(dev, &adapter->real_hw); @@ -881,11 +910,59 @@ ice_dcf_dev_close(struct rte_eth_dev *dev) return 0; } -static int -ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev, +int +ice_dcf_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { - return 0; + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *hw = &ad->real_hw; + struct rte_eth_link new_link; + + memset(&new_link, 0, sizeof(new_link)); + + /* Only read status info stored in VF, and the info is updated + * when receive LINK_CHANGE event from PF by virtchnl. + */ + switch (hw->link_speed) { + case 10: + new_link.link_speed = ETH_SPEED_NUM_10M; + break; + case 100: + new_link.link_speed = ETH_SPEED_NUM_100M; + break; + case 1000: + new_link.link_speed = ETH_SPEED_NUM_1G; + break; + case 10000: + new_link.link_speed = ETH_SPEED_NUM_10G; + break; + case 20000: + new_link.link_speed = ETH_SPEED_NUM_20G; + break; + case 25000: + new_link.link_speed = ETH_SPEED_NUM_25G; + break; + case 40000: + new_link.link_speed = ETH_SPEED_NUM_40G; + break; + case 50000: + new_link.link_speed = ETH_SPEED_NUM_50G; + break; + case 100000: + new_link.link_speed = ETH_SPEED_NUM_100G; + break; + default: + new_link.link_speed = ETH_SPEED_NUM_NONE; + break; + } + + new_link.link_duplex = ETH_LINK_FULL_DUPLEX; + new_link.link_status = hw->link_up ? ETH_LINK_UP : + ETH_LINK_DOWN; + new_link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + + return rte_eth_linkstatus_set(dev, &new_link); } /* Add UDP tunneling port */ @@ -946,16 +1023,43 @@ ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, return ret; } +static int +ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, + void *arg) +{ + if (!arg) + return -EINVAL; + + *(const void **)arg = &ice_dcf_tm_ops; + + return 0; +} + +static int +ice_dcf_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = ice_dcf_dev_uninit(dev); + if (ret) + return ret; + + ret = ice_dcf_dev_init(dev); + + return ret; +} + static const struct eth_dev_ops ice_dcf_eth_dev_ops = { .dev_start = ice_dcf_dev_start, .dev_stop = ice_dcf_dev_stop, .dev_close = ice_dcf_dev_close, + .dev_reset = ice_dcf_dev_reset, .dev_configure = ice_dcf_dev_configure, .dev_infos_get = ice_dcf_dev_info_get, .rx_queue_setup = ice_rx_queue_setup, .tx_queue_setup = ice_tx_queue_setup, - .rx_queue_release = ice_rx_queue_release, - .tx_queue_release = ice_tx_queue_release, + .rx_queue_release = ice_dev_rx_queue_release, + .tx_queue_release = ice_dev_tx_queue_release, .rx_queue_start = ice_dcf_rx_queue_start, .tx_queue_start = ice_dcf_tx_queue_start, .rx_queue_stop = ice_dcf_rx_queue_stop, @@ -970,6 +1074,7 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = { .flow_ops_get = ice_dcf_dev_flow_ops_get, .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add, .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del, + .tm_ops_get = ice_dcf_tm_ops_get, }; static int @@ -977,6 +1082,7 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev) { struct ice_dcf_adapter *adapter = eth_dev->data->dev_private; + adapter->real_hw.resetting = false; eth_dev->dev_ops = &ice_dcf_eth_dev_ops; eth_dev->rx_pkt_burst = ice_dcf_recv_pkts; eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts; @@ -984,8 +1090,6 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; - adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg; if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) { PMD_INIT_LOG(ERR, "Failed to init DCF hardware");