net/ark: update DDM functions for firmware update
[dpdk.git] / drivers / net / nfp / nfp_common.c
index 0003fd5..34e3a03 100644 (file)
@@ -160,8 +160,8 @@ nfp_net_configure(struct rte_eth_dev *dev)
        rxmode = &dev_conf->rxmode;
        txmode = &dev_conf->txmode;
 
-       if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-               rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        /* Checking TX mode */
        if (txmode->mq_mode) {
@@ -170,12 +170,19 @@ nfp_net_configure(struct rte_eth_dev *dev)
        }
 
        /* Checking RX mode */
-       if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+       if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
            !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
                PMD_INIT_LOG(INFO, "RSS not supported");
                return -EINVAL;
        }
 
+       /* Checking MTU set */
+       if (rxmode->mtu > hw->flbufsz) {
+               PMD_INIT_LOG(INFO, "MTU (%u) larger then current mbufsize (%u) not supported",
+                                   rxmode->mtu, hw->flbufsz);
+               return -ERANGE;
+       }
+
        return 0;
 }
 
@@ -280,10 +287,6 @@ nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
                return -EBUSY;
        }
 
-       if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
-           !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
-               return -EBUSY;
-
        /* Writing new MAC to the specific port BAR address */
        nfp_net_write_mac(hw, (uint8_t *)mac_addr);
 
@@ -307,24 +310,21 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
        struct nfp_net_hw *hw;
        int i;
 
-       if (!intr_handle->intr_vec) {
-               intr_handle->intr_vec =
-                       rte_zmalloc("intr_vec",
-                                   dev->data->nb_rx_queues * sizeof(int), 0);
-               if (!intr_handle->intr_vec) {
-                       PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
-                                    " intr_vec", dev->data->nb_rx_queues);
-                       return -ENOMEM;
-               }
+       if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+                                   dev->data->nb_rx_queues)) {
+               PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+                            " intr_vec", dev->data->nb_rx_queues);
+               return -ENOMEM;
        }
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
+       if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
                PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
                /* UIO just supports one queue and no LSC*/
                nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
-               intr_handle->intr_vec[0] = 0;
+               if (rte_intr_vec_list_index_set(intr_handle, 0, 0))
+                       return -1;
        } else {
                PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -333,9 +333,12 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
                         * efd interrupts
                        */
                        nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
-                       intr_handle->intr_vec[i] = i + 1;
+                       if (rte_intr_vec_list_index_set(intr_handle, i,
+                                                              i + 1))
+                               return -1;
                        PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
-                                           intr_handle->intr_vec[i]);
+                               rte_intr_vec_list_index_get(intr_handle,
+                                                                  i));
                }
        }
 
@@ -359,19 +362,19 @@ nfp_check_offloads(struct rte_eth_dev *dev)
        rxmode = &dev_conf->rxmode;
        txmode = &dev_conf->txmode;
 
-       if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
                if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
                        ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
        }
 
-       if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
                if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
                        ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
        }
 
        hw->mtu = dev->data->mtu;
 
-       if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+       if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
                ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
        /* L2 broadcast */
@@ -383,13 +386,13 @@ nfp_check_offloads(struct rte_eth_dev *dev)
                ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
        /* TX checksum offload */
-       if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-           txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-           txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+       if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+           txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+           txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
                ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 
        /* LSO offload */
-       if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+       if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
                if (hw->cap & NFP_NET_CFG_CTRL_LSO)
                        ctrl |= NFP_NET_CFG_CTRL_LSO;
                else
@@ -397,7 +400,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
        }
 
        /* RX gather */
-       if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+       if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
                ctrl |= NFP_NET_CFG_CTRL_GATHER;
 
        return ctrl;
@@ -485,14 +488,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
        int ret;
 
        static const uint32_t ls_to_ethtool[] = {
-               [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
-               [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
-               [NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
-               [NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
-               [NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
-               [NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
-               [NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
-               [NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
+               [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
+               [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
+               [NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
+               [NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
+               [NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
+               [NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
+               [NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
+               [NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
        };
 
        PMD_DRV_LOG(DEBUG, "Link update");
@@ -504,15 +507,15 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
        memset(&link, 0, sizeof(struct rte_eth_link));
 
        if (nn_link_status & NFP_NET_CFG_STS_LINK)
-               link.link_status = ETH_LINK_UP;
+               link.link_status = RTE_ETH_LINK_UP;
 
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
                         NFP_NET_CFG_STS_LINK_RATE_MASK;
 
        if (nn_link_status >= RTE_DIM(ls_to_ethtool))
-               link.link_speed = ETH_SPEED_NUM_NONE;
+               link.link_speed = RTE_ETH_SPEED_NUM_NONE;
        else
                link.link_speed = ls_to_ethtool[nn_link_status];
 
@@ -696,31 +699,41 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
        dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
-       dev_info->max_rx_pktlen = hw->max_mtu;
+       /*
+        * The maximum rx packet length (max_rx_pktlen) is set to the
+        * maximum supported frame size that the NFP can handle. This
+        * includes layer 2 headers, CRC and other metadata that can
+        * optionally be used.
+        * The maximum layer 3 MTU (max_mtu) is read from hardware,
+        * which was set by the firmware loaded onto the card.
+        */
+       dev_info->max_rx_pktlen = NFP_FRAME_SIZE_MAX;
+       dev_info->max_mtu = hw->max_mtu;
+       dev_info->min_mtu = RTE_ETHER_MIN_MTU;
        /* Next should change when PF support is implemented */
        dev_info->max_mac_addrs = 1;
 
        if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
-               dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+               dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
        if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
-               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-                                            DEV_RX_OFFLOAD_UDP_CKSUM |
-                                            DEV_RX_OFFLOAD_TCP_CKSUM;
+               dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+                                            RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+                                            RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
        if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
-               dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+               dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
        if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
-               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-                                            DEV_TX_OFFLOAD_UDP_CKSUM |
-                                            DEV_TX_OFFLOAD_TCP_CKSUM;
+               dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+                                            RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+                                            RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
        if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
-               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+               dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
        if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
-               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+               dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -757,22 +770,22 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        };
 
        if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+               dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-               dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-                                                  ETH_RSS_NONFRAG_IPV4_TCP |
-                                                  ETH_RSS_NONFRAG_IPV4_UDP |
-                                                  ETH_RSS_IPV6 |
-                                                  ETH_RSS_NONFRAG_IPV6_TCP |
-                                                  ETH_RSS_NONFRAG_IPV6_UDP;
+               dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+                                                  RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+                                                  RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+                                                  RTE_ETH_RSS_IPV6 |
+                                                  RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+                                                  RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
                dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
                dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
        }
 
-       dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-                              ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-                              ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+                              RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+                              RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 
        return 0;
 }
@@ -804,7 +817,8 @@ nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
-       if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
+       if (rte_intr_type_get(pci_dev->intr_handle) !=
+                                                       RTE_INTR_HANDLE_UIO)
                base = 1;
 
        /* Make sure all updates are written before un-masking */
@@ -824,7 +838,8 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
-       if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
+       if (rte_intr_type_get(pci_dev->intr_handle) !=
+                                                       RTE_INTR_HANDLE_UIO)
                base = 1;
 
        /* Make sure all updates are written before un-masking */
@@ -843,7 +858,7 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
        if (link.link_status)
                PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
                            dev->data->port_id, link.link_speed,
-                           link.link_duplex == ETH_LINK_FULL_DUPLEX
+                           link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
                            ? "full-duplex" : "half-duplex");
        else
                PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -874,7 +889,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
        if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
                /* If MSI-X auto-masking is used, clear the entry */
                rte_wmb();
-               rte_intr_ack(&pci_dev->intr_handle);
+               rte_intr_ack(pci_dev->intr_handle);
        } else {
                /* Make sure all updates are written before un-masking */
                rte_wmb();
@@ -954,6 +969,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EBUSY;
        }
 
+       /* MTU larger then current mbufsize not supported */
+       if (mtu > hw->flbufsz) {
+               PMD_DRV_LOG(ERR, "MTU (%u) larger then current mbufsize (%u) not supported",
+                           mtu, hw->flbufsz);
+               return -ERANGE;
+       }
+
        /* writing to configuration space */
        nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);
 
@@ -967,22 +989,25 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        uint32_t new_ctrl, update;
        struct nfp_net_hw *hw;
+       struct rte_eth_conf *dev_conf;
        int ret;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       new_ctrl = 0;
-
-       /* Enable vlan strip if it is not configured yet */
-       if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
-           !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
-               new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
+       dev_conf = &dev->data->dev_conf;
+       new_ctrl = hw->ctrl;
 
-       /* Disable vlan strip just if it is configured */
-       if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
-           (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
-               new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
+       /*
+        * Vlan stripping setting
+        * Enable or disable VLAN stripping
+        */
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+                       new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+               else
+                       new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
+       }
 
-       if (new_ctrl == 0)
+       if (new_ctrl == hw->ctrl)
                return 0;
 
        update = NFP_NET_CFG_UPDATE_GEN;
@@ -1018,8 +1043,8 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
         */
        for (i = 0; i < reta_size; i += 4) {
                /* Handling 4 RSS entries per loop */
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
                if (!mask)
@@ -1099,8 +1124,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
         */
        for (i = 0; i < reta_size; i += 4) {
                /* Handling 4 RSS entries per loop */
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
                if (!mask)
@@ -1138,22 +1163,22 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 
        rss_hf = rss_conf->rss_hf;
 
-       if (rss_hf & ETH_RSS_IPV4)
+       if (rss_hf & RTE_ETH_RSS_IPV4)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
 
-       if (rss_hf & ETH_RSS_IPV6)
+       if (rss_hf & RTE_ETH_RSS_IPV6)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
 
        cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
@@ -1223,22 +1248,22 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
        cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
 
        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
-               rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+               rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
-               rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+               rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
-               rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+               rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
-               rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+               rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
-               rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+               rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
-               rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+               rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
        /* Propagate current RSS hash functions to caller */
        rss_conf->rss_hf = rss_hf;