net/mlx5: support more tunnel types
[dpdk.git] / drivers / net / ice / ice_dcf_ethdev.c
index 69fe6e6..b8a537c 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <errno.h>
 #include <stdbool.h>
+#include <sys/queue.h>
 #include <sys/types.h>
 #include <unistd.h>
 
@@ -32,6 +33,12 @@ static int
 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                                struct rte_eth_udp_tunnel *udp_tunnel);
 
+static int
+ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
+
+static int
+ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
+
 static uint16_t
 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
                  __rte_unused struct rte_mbuf **bufs,
@@ -59,14 +66,13 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
        buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
        rxq->rx_hdr_len = 0;
        rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
-       max_pkt_len = RTE_MIN((uint32_t)
-                             ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
-                             dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+                             dev->data->mtu + ICE_ETH_OVERHEAD);
 
        /* Check if the jumbo frame and maximum packet length are set
         * correctly.
         */
-       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+       if (dev_data->mtu > RTE_ETHER_MTU) {
                if (max_pkt_len <= ICE_ETH_MAX_LEN ||
                    max_pkt_len > ICE_FRAME_SIZE_MAX) {
                        PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -172,10 +178,15 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
                    VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
                        /* If WB_ON_ITR supports, enable it */
                        hw->msix_base = IAVF_RX_VEC_START;
+                       /* Set the ITR for index zero, to 2us to make sure that
+                        * we leave time for aggregation to occur, but don't
+                        * increase latency dramatically.
+                        */
                        IAVF_WRITE_REG(&hw->avf,
                                       IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
-                                      IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-                                      IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
+                                      (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+                                      IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
+                                      (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
                } else {
                        /* If no WB_ON_ITR offload flags, need to set
                         * interrupt for descriptor write back.
@@ -511,6 +522,12 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
        struct ice_dcf_hw *hw = &dcf_ad->real_hw;
        int ret;
 
+       if (hw->resetting) {
+               PMD_DRV_LOG(ERR,
+                           "The DCF has been reset by PF, please reinit first");
+               return -EIO;
+       }
+
        ad->pf.adapter_stopped = 0;
 
        hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
@@ -622,7 +639,6 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
        ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
        dev->data->dev_link.link_status = ETH_LINK_DOWN;
        ad->pf.adapter_stopped = 1;
-       dcf_ad->real_hw.tm_conf.committed = false;
 
        return 0;
 }
@@ -665,7 +681,6 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
                DEV_RX_OFFLOAD_TCP_CKSUM |
                DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
                DEV_RX_OFFLOAD_SCATTER |
-               DEV_RX_OFFLOAD_JUMBO_FRAME |
                DEV_RX_OFFLOAD_VLAN_FILTER |
                DEV_RX_OFFLOAD_RSS_HASH;
        dev_info->tx_offload_capa =
@@ -806,6 +821,12 @@ ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        struct virtchnl_eth_stats pstats;
        int ret;
 
+       if (hw->resetting) {
+               PMD_DRV_LOG(ERR,
+                           "The DCF has been reset by PF, please reinit first");
+               return -EIO;
+       }
+
        ret = ice_dcf_query_stats(hw, &pstats);
        if (ret == 0) {
                ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
@@ -832,6 +853,9 @@ ice_dcf_stats_reset(struct rte_eth_dev *dev)
        struct virtchnl_eth_stats pstats;
        int ret;
 
+       if (hw->resetting)
+               return 0;
+
        /* read stat values to clear hardware registers */
        ret = ice_dcf_query_stats(hw, &pstats);
        if (ret != 0)
@@ -875,6 +899,10 @@ ice_dcf_dev_close(struct rte_eth_dev *dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       (void)ice_dcf_dev_stop(dev);
+
+       ice_free_queues(dev);
+
        ice_dcf_free_repr_info(adapter);
        ice_dcf_uninit_parent_adapter(dev);
        ice_dcf_uninit_hw(dev, &adapter->real_hw);
@@ -1007,16 +1035,31 @@ ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
        return 0;
 }
 
+static int
+ice_dcf_dev_reset(struct rte_eth_dev *dev)
+{
+       int ret;
+
+       ret = ice_dcf_dev_uninit(dev);
+       if (ret)
+               return ret;
+
+       ret = ice_dcf_dev_init(dev);
+
+       return ret;
+}
+
 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
        .dev_start               = ice_dcf_dev_start,
        .dev_stop                = ice_dcf_dev_stop,
        .dev_close               = ice_dcf_dev_close,
+       .dev_reset               = ice_dcf_dev_reset,
        .dev_configure           = ice_dcf_dev_configure,
        .dev_infos_get           = ice_dcf_dev_info_get,
        .rx_queue_setup          = ice_rx_queue_setup,
        .tx_queue_setup          = ice_tx_queue_setup,
-       .rx_queue_release        = ice_rx_queue_release,
-       .tx_queue_release        = ice_tx_queue_release,
+       .rx_queue_release        = ice_dev_rx_queue_release,
+       .tx_queue_release        = ice_dev_tx_queue_release,
        .rx_queue_start          = ice_dcf_rx_queue_start,
        .tx_queue_start          = ice_dcf_tx_queue_start,
        .rx_queue_stop           = ice_dcf_rx_queue_stop,
@@ -1039,6 +1082,7 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
 {
        struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
 
+       adapter->real_hw.resetting = false;
        eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
        eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
        eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
@@ -1046,8 +1090,6 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
-
        adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
        if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
                PMD_INIT_LOG(ERR, "Failed to init DCF hardware");