net: add macro for VLAN header length
[dpdk.git] / drivers / net / ice / ice_dcf_ethdev.c
index b8a537c..28f7f7f 100644 (file)
@@ -69,34 +69,19 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
        max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
                              dev->data->mtu + ICE_ETH_OVERHEAD);
 
-       /* Check if the jumbo frame and maximum packet length are set
-        * correctly.
-        */
-       if (dev_data->mtu > RTE_ETHER_MTU) {
-               if (max_pkt_len <= ICE_ETH_MAX_LEN ||
-                   max_pkt_len > ICE_FRAME_SIZE_MAX) {
-                       PMD_DRV_LOG(ERR, "maximum packet length must be "
-                                   "larger than %u and smaller than %u, "
-                                   "as jumbo frame is enabled",
-                                   (uint32_t)ICE_ETH_MAX_LEN,
-                                   (uint32_t)ICE_FRAME_SIZE_MAX);
-                       return -EINVAL;
-               }
-       } else {
-               if (max_pkt_len < RTE_ETHER_MIN_LEN ||
-                   max_pkt_len > ICE_ETH_MAX_LEN) {
-                       PMD_DRV_LOG(ERR, "maximum packet length must be "
-                                   "larger than %u and smaller than %u, "
-                                   "as jumbo frame is disabled",
-                                   (uint32_t)RTE_ETHER_MIN_LEN,
-                                   (uint32_t)ICE_ETH_MAX_LEN);
-                       return -EINVAL;
-               }
+       /* Check maximum packet length is set correctly.  */
+       if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
+           max_pkt_len > ICE_FRAME_SIZE_MAX) {
+               PMD_DRV_LOG(ERR, "maximum packet length must be "
+                           "larger than %u and smaller than %u",
+                           (uint32_t)RTE_ETHER_MIN_LEN,
+                           (uint32_t)ICE_FRAME_SIZE_MAX);
+               return -EINVAL;
        }
 
        rxq->max_pkt_len = max_pkt_len;
-       if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
-           (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
+       if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
+           (rxq->max_pkt_len + 2 * RTE_VLAN_HLEN) > buf_size) {
                dev_data->scattered_rx = 1;
        }
        rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
@@ -159,11 +144,9 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
                        return -1;
        }
 
-       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
-               intr_handle->intr_vec =
-                       rte_zmalloc("intr_vec",
-                                   dev->data->nb_rx_queues * sizeof(int), 0);
-               if (!intr_handle->intr_vec) {
+       if (rte_intr_dp_is_en(intr_handle)) {
+               if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+                                                  dev->data->nb_rx_queues)) {
                        PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
                                    dev->data->nb_rx_queues);
                        return -1;
@@ -213,7 +196,8 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
                        hw->msix_base = IAVF_MISC_VEC_ID;
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                                hw->rxq_map[hw->msix_base] |= 1 << i;
-                               intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
+                               rte_intr_vec_list_index_set(intr_handle,
+                                                       i, IAVF_MISC_VEC_ID);
                        }
                        PMD_DRV_LOG(DEBUG,
                                    "vector %u are mapping to all Rx queues",
@@ -223,12 +207,13 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
                         * multi interrupts, then the vec is from 1
                         */
                        hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
-                                             intr_handle->nb_efd);
+                                     rte_intr_nb_efd_get(intr_handle));
                        hw->msix_base = IAVF_MISC_VEC_ID;
                        vec = IAVF_MISC_VEC_ID;
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                                hw->rxq_map[vec] |= 1 << i;
-                               intr_handle->intr_vec[i] = vec++;
+                               rte_intr_vec_list_index_set(intr_handle,
+                                                                  i, vec++);
                                if (vec >= hw->nb_msix)
                                        vec = IAVF_RX_VEC_START;
                        }
@@ -528,6 +513,12 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
                return -EIO;
        }
 
+       if (hw->tm_conf.root && !hw->tm_conf.committed) {
+               PMD_DRV_LOG(ERR,
+                       "please call hierarchy_commit() before starting the port");
+               return -EIO;
+       }
+
        ad->pf.adapter_stopped = 0;
 
        hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
@@ -576,7 +567,7 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
                return ret;
        }
 
-       dev->data->dev_link.link_status = ETH_LINK_UP;
+       dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
        return 0;
 }
@@ -619,6 +610,7 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
        struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
        struct rte_intr_handle *intr_handle = dev->intr_handle;
        struct ice_adapter *ad = &dcf_ad->parent;
+       struct ice_dcf_hw *hw = &dcf_ad->real_hw;
 
        if (ad->pf.adapter_stopped == 1) {
                PMD_DRV_LOG(DEBUG, "Port is already stopped");
@@ -631,14 +623,12 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
        ice_dcf_stop_queues(dev);
 
        rte_intr_efd_disable(intr_handle);
-       if (intr_handle->intr_vec) {
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
-       }
+       rte_intr_vec_list_free(intr_handle);
 
        ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
-       dev->data->dev_link.link_status = ETH_LINK_DOWN;
+       dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
        ad->pf.adapter_stopped = 1;
+       hw->tm_conf.committed = false;
 
        return 0;
 }
@@ -652,8 +642,8 @@ ice_dcf_dev_configure(struct rte_eth_dev *dev)
        ad->rx_bulk_alloc_allowed = true;
        ad->tx_simple_allowed = true;
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        return 0;
 }
@@ -673,29 +663,30 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
        dev_info->hash_key_size = hw->vf_res->rss_key_size;
        dev_info->reta_size = hw->vf_res->rss_lut_size;
        dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_UDP_CKSUM |
-               DEV_RX_OFFLOAD_TCP_CKSUM |
-               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_SCATTER |
-               DEV_RX_OFFLOAD_VLAN_FILTER |
-               DEV_RX_OFFLOAD_RSS_HASH;
+               RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+               RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+               RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+               RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+               RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+               RTE_ETH_RX_OFFLOAD_SCATTER |
+               RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+               RTE_ETH_RX_OFFLOAD_RSS_HASH;
        dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_UDP_CKSUM |
-               DEV_TX_OFFLOAD_TCP_CKSUM |
-               DEV_TX_OFFLOAD_SCTP_CKSUM |
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_TCP_TSO |
-               DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-               DEV_TX_OFFLOAD_GRE_TNL_TSO |
-               DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-               DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-               DEV_TX_OFFLOAD_MULTI_SEGS;
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+               RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+               RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+               RTE_ETH_TX_OFFLOAD_TCP_TSO |
+               RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+               RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+               RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+               RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+               RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -925,42 +916,42 @@ ice_dcf_link_update(struct rte_eth_dev *dev,
         */
        switch (hw->link_speed) {
        case 10:
-               new_link.link_speed = ETH_SPEED_NUM_10M;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
                break;
        case 100:
-               new_link.link_speed = ETH_SPEED_NUM_100M;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
        case 1000:
-               new_link.link_speed = ETH_SPEED_NUM_1G;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
                break;
        case 10000:
-               new_link.link_speed = ETH_SPEED_NUM_10G;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
                break;
        case 20000:
-               new_link.link_speed = ETH_SPEED_NUM_20G;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
                break;
        case 25000:
-               new_link.link_speed = ETH_SPEED_NUM_25G;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
                break;
        case 40000:
-               new_link.link_speed = ETH_SPEED_NUM_40G;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
                break;
        case 50000:
-               new_link.link_speed = ETH_SPEED_NUM_50G;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
                break;
        case 100000:
-               new_link.link_speed = ETH_SPEED_NUM_100G;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
                break;
        default:
-               new_link.link_speed = ETH_SPEED_NUM_NONE;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
                break;
        }
 
-       new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       new_link.link_status = hw->link_up ? ETH_LINK_UP :
-                                            ETH_LINK_DOWN;
+       new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+       new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
+                                            RTE_ETH_LINK_DOWN;
        new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-                               ETH_LINK_SPEED_FIXED);
+                               RTE_ETH_LINK_SPEED_FIXED);
 
        return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -979,11 +970,11 @@ ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
                                        udp_tunnel->udp_port);
                break;
-       case RTE_TUNNEL_TYPE_ECPRI:
+       case RTE_ETH_TUNNEL_TYPE_ECPRI:
                ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
                                        udp_tunnel->udp_port);
                break;
@@ -1010,8 +1001,8 @@ ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
-       case RTE_TUNNEL_TYPE_ECPRI:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_ECPRI:
                ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
                break;
        default:
@@ -1035,11 +1026,44 @@ ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
        return 0;
 }
 
+static inline void
+ice_dcf_reset_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
+{
+       ice_dcf_uninit_hw(eth_dev, hw);
+       ice_dcf_init_hw(eth_dev, hw);
+}
+
+/* Check if reset has been triggered by PF */
+static inline bool
+ice_dcf_is_reset(struct rte_eth_dev *dev)
+{
+       struct ice_dcf_adapter *ad = dev->data->dev_private;
+       struct iavf_hw *hw = &ad->real_hw.avf;
+
+       return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) &
+                IAVF_VF_ARQLEN1_ARQENABLE_MASK);
+}
+
 static int
 ice_dcf_dev_reset(struct rte_eth_dev *dev)
 {
+       struct ice_dcf_adapter *ad = dev->data->dev_private;
+       struct ice_dcf_hw *hw = &ad->real_hw;
        int ret;
 
+       if (ice_dcf_is_reset(dev)) {
+               if (!ad->real_hw.resetting)
+                       ad->real_hw.resetting = true;
+               PMD_DRV_LOG(ERR, "The DCF has been reset by PF");
+
+               /*
+                * Simply reset hw to trigger an additional DCF enable/disable
+                * cycle which help to workaround the issue that kernel driver
+                * may not clean up resource during previous reset.
+                */
+               ice_dcf_reset_hw(dev, hw);
+       }
+
        ret = ice_dcf_dev_uninit(dev);
        if (ret)
                return ret;
@@ -1082,7 +1106,6 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
 {
        struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
 
-       adapter->real_hw.resetting = false;
        eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
        eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
        eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;