X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftxgbe%2Ftxgbe_ethdev.c;h=7b42285b9edda7b6677c79c90f908727600563ad;hb=2e1ecb460db92bc9dcd2350ed48dfe0b504f380f;hp=b267da462bcb3538283f0a1d4dac133d38547a85;hpb=8c9f976f05bb73e2353741137ff4def526b47cb2;p=dpdk.git diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index b267da462b..7b42285b9e 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -109,7 +109,8 @@ static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); -static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); +static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, + struct rte_intr_handle *handle); static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *handle); static void txgbe_dev_interrupt_handler(void *param); @@ -494,7 +495,7 @@ txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs) struct rte_kvargs *kvlist; u16 auto_neg = 1; u16 poll = 0; - u16 present = 1; + u16 present = 0; u16 sgmii = 0; u16 ffe_set = 0; u16 ffe_main = 27; @@ -547,7 +548,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev); struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; const struct rte_memzone *mz; uint32_t ctrl_ext; uint16_t csum; @@ -820,10 +821,8 @@ static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev); struct txgbe_fdir_filter *fdir_filter; - if (fdir_info->hash_map) - rte_free(fdir_info->hash_map); - if (fdir_info->hash_handle) - rte_hash_free(fdir_info->hash_handle); + rte_free(fdir_info->hash_map); + rte_hash_free(fdir_info->hash_handle); while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { TAILQ_REMOVE(&fdir_info->fdir_list, @@ -840,10 +839,8 @@ static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); struct txgbe_l2_tn_filter *l2_tn_filter; - if (l2_tn_info->hash_map) - rte_free(l2_tn_info->hash_map); - if (l2_tn_info->hash_handle) - rte_hash_free(l2_tn_info->hash_handle); + rte_free(l2_tn_info->hash_map); + rte_hash_free(l2_tn_info->hash_handle); while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { TAILQ_REMOVE(&l2_tn_info->l2_tn_list, @@ -997,7 +994,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx)); rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx)); rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); - if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { restart = (rxcfg & TXGBE_RXCFG_ENA) && !(rxcfg & TXGBE_RXCFG_VLAN); rxcfg |= TXGBE_RXCFG_VLAN; @@ -1032,7 +1029,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev, vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT); qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ); switch (vlan_type) { - case ETH_VLAN_TYPE_INNER: + case RTE_ETH_VLAN_TYPE_INNER: if (vlan_ext) { wr32m(hw, TXGBE_VLANCTL, TXGBE_VLANCTL_TPID_MASK, @@ -1052,7 +1049,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev, TXGBE_TAGTPID_LSB(tpid)); } break; - case ETH_VLAN_TYPE_OUTER: + case RTE_ETH_VLAN_TYPE_OUTER: if (vlan_ext) { /* Only the high 16-bits is valid */ wr32m(hw, TXGBE_EXTAG, @@ -1136,11 +1133,11 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) rxq = dev->data->rx_queues[queue]; if (on) { - rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; - rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; + rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } else { - rxq->vlan_flags = PKT_RX_VLAN; - rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + rxq->vlan_flags = RTE_MBUF_F_RX_VLAN; + rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } } @@ -1239,7 +1236,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) txgbe_vlan_strip_queue_set(dev, i, 1); else txgbe_vlan_strip_queue_set(dev, i, 0); @@ -1253,17 +1250,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) struct rte_eth_rxmode *rxmode; struct txgbe_rx_queue *rxq; - if (mask & ETH_VLAN_STRIP_MASK) { + if (mask & RTE_ETH_VLAN_STRIP_MASK) { rxmode = &dev->data->dev_conf.rxmode; - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } else for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } } } @@ -1274,25 +1271,25 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) struct rte_eth_rxmode *rxmode; rxmode = &dev->data->dev_conf.rxmode; - if (mask & ETH_VLAN_STRIP_MASK) + if (mask & RTE_ETH_VLAN_STRIP_MASK) txgbe_vlan_hw_strip_config(dev); - if (mask & ETH_VLAN_FILTER_MASK) { - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + if (mask & RTE_ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) txgbe_vlan_hw_filter_enable(dev); else txgbe_vlan_hw_filter_disable(dev); } - if (mask & ETH_VLAN_EXTEND_MASK) { - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + if (mask & RTE_ETH_VLAN_EXTEND_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) txgbe_vlan_hw_extend_enable(dev); else txgbe_vlan_hw_extend_disable(dev); } - if (mask & ETH_QINQ_STRIP_MASK) { - if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) + if (mask & RTE_ETH_QINQ_STRIP_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) txgbe_qinq_hw_strip_enable(dev); else txgbe_qinq_hw_strip_disable(dev); @@ -1330,10 +1327,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) switch (nb_rx_q) { case 1: case 2: - RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; + RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS; break; case 4: - RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; + RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS; break; default: return -EINVAL; @@ -1356,18 +1353,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) if (RTE_ETH_DEV_SRIOV(dev).active != 0) { /* check multi-queue mode */ switch (dev_conf->rxmode.mq_mode) { - case ETH_MQ_RX_VMDQ_DCB: - PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); + case RTE_ETH_MQ_RX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); break; - case ETH_MQ_RX_VMDQ_DCB_RSS: + case RTE_ETH_MQ_RX_VMDQ_DCB_RSS: /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ PMD_INIT_LOG(ERR, "SRIOV active," " unsupported mq_mode rx %d.", dev_conf->rxmode.mq_mode); return -EINVAL; - case ETH_MQ_RX_RSS: - case ETH_MQ_RX_VMDQ_RSS: - dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; + case RTE_ETH_MQ_RX_RSS: + case RTE_ETH_MQ_RX_VMDQ_RSS: + dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS; if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { PMD_INIT_LOG(ERR, "SRIOV is active," @@ -1377,13 +1374,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } break; - case ETH_MQ_RX_VMDQ_ONLY: - case ETH_MQ_RX_NONE: + case RTE_ETH_MQ_RX_VMDQ_ONLY: + case RTE_ETH_MQ_RX_NONE: /* if nothing mq mode configure, use default scheme */ dev->data->dev_conf.rxmode.mq_mode = - ETH_MQ_RX_VMDQ_ONLY; + RTE_ETH_MQ_RX_VMDQ_ONLY; break; - default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ + default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/ /* SRIOV only works in VMDq enable mode */ PMD_INIT_LOG(ERR, "SRIOV is active," " wrong mq_mode rx %d.", @@ -1392,13 +1389,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) } switch (dev_conf->txmode.mq_mode) { - case ETH_MQ_TX_VMDQ_DCB: - PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); - dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + case RTE_ETH_MQ_TX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); + dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; break; - default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ + default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */ dev->data->dev_conf.txmode.mq_mode = - ETH_MQ_TX_VMDQ_ONLY; + RTE_ETH_MQ_TX_VMDQ_ONLY; break; } @@ -1413,13 +1410,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } } else { - if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { + if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) { PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" " not supported."); return -EINVAL; } /* check configuration for vmdb+dcb mode */ - if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { + if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { const struct rte_eth_vmdq_dcb_conf *conf; if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { @@ -1428,15 +1425,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; - if (!(conf->nb_queue_pools == ETH_16_POOLS || - conf->nb_queue_pools == ETH_32_POOLS)) { + if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || + conf->nb_queue_pools == RTE_ETH_32_POOLS)) { PMD_INIT_LOG(ERR, "VMDQ+DCB selected," " nb_queue_pools must be %d or %d.", - ETH_16_POOLS, ETH_32_POOLS); + RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); return -EINVAL; } } - if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) { const struct rte_eth_vmdq_dcb_tx_conf *conf; if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { @@ -1445,39 +1442,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; - if (!(conf->nb_queue_pools == ETH_16_POOLS || - conf->nb_queue_pools == ETH_32_POOLS)) { + if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || + conf->nb_queue_pools == RTE_ETH_32_POOLS)) { PMD_INIT_LOG(ERR, "VMDQ+DCB selected," " nb_queue_pools != %d and" " nb_queue_pools != %d.", - ETH_16_POOLS, ETH_32_POOLS); + RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); return -EINVAL; } } /* For DCB mode check our configuration before we go further */ - if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { + if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) { const struct rte_eth_dcb_rx_conf *conf; conf = &dev_conf->rx_adv_conf.dcb_rx_conf; - if (!(conf->nb_tcs == ETH_4_TCS || - conf->nb_tcs == ETH_8_TCS)) { + if (!(conf->nb_tcs == RTE_ETH_4_TCS || + conf->nb_tcs == RTE_ETH_8_TCS)) { PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" " and nb_tcs != %d.", - ETH_4_TCS, ETH_8_TCS); + RTE_ETH_4_TCS, RTE_ETH_8_TCS); return -EINVAL; } } - if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) { const struct rte_eth_dcb_tx_conf *conf; conf = &dev_conf->tx_adv_conf.dcb_tx_conf; - if (!(conf->nb_tcs == ETH_4_TCS || - conf->nb_tcs == ETH_8_TCS)) { + if (!(conf->nb_tcs == RTE_ETH_4_TCS || + conf->nb_tcs == RTE_ETH_8_TCS)) { PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" " and nb_tcs != %d.", - ETH_4_TCS, ETH_8_TCS); + RTE_ETH_4_TCS, RTE_ETH_8_TCS); return -EINVAL; } } @@ -1494,8 +1491,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) - dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; /* multiple queue mode checking */ ret = txgbe_check_mq_mode(dev); @@ -1619,7 +1616,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; uint32_t intr_vector = 0; int err; bool link_up = false, negotiate = 0; @@ -1633,17 +1630,6 @@ txgbe_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - /* TXGBE devices don't support: - * - half duplex (checked afterwards for valid speeds) - * - fixed speed: TODO implement - */ - if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { - PMD_INIT_LOG(ERR, - "Invalid link_speeds for port %u, fix speed not supported", - dev->data->port_id); - return -EINVAL; - } - /* Stop the link setup handler before resetting the HW. */ rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); @@ -1680,18 +1666,15 @@ txgbe_dev_start(struct rte_eth_dev *dev) return -1; } - if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { - intr_handle->intr_vec = - rte_zmalloc("intr_vec", - dev->data->nb_rx_queues * sizeof(int), 0); - if (intr_handle->intr_vec == NULL) { + if (rte_intr_dp_is_en(intr_handle)) { + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + dev->data->nb_rx_queues)) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" " intr_vec", dev->data->nb_rx_queues); return -ENOMEM; } } - - /* confiugre msix for sleep until rx interrupt */ + /* configure msix for sleep until rx interrupt */ txgbe_configure_msix(dev); /* initialize transmission unit */ @@ -1704,15 +1687,15 @@ txgbe_dev_start(struct rte_eth_dev *dev) goto error; } - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | - ETH_VLAN_EXTEND_MASK; + mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | + RTE_ETH_VLAN_EXTEND_MASK; err = txgbe_vlan_offload_config(dev, mask); if (err) { PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); goto error; } - if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) { /* Enable vlan filtering for VMDq */ txgbe_vmdq_vlan_hw_filter_enable(dev); } @@ -1773,30 +1756,30 @@ txgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; - allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_10G; + allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | + RTE_ETH_LINK_SPEED_10G; link_speeds = &dev->data->dev_conf.link_speeds; - if (*link_speeds & ~allowed_speeds) { + if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { PMD_INIT_LOG(ERR, "Invalid link setting"); goto error; } speed = 0x0; - if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { + if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { speed = (TXGBE_LINK_SPEED_100M_FULL | TXGBE_LINK_SPEED_1GB_FULL | TXGBE_LINK_SPEED_10GB_FULL); } else { - if (*link_speeds & ETH_LINK_SPEED_10G) + if (*link_speeds & RTE_ETH_LINK_SPEED_10G) speed |= TXGBE_LINK_SPEED_10GB_FULL; - if (*link_speeds & ETH_LINK_SPEED_5G) + if (*link_speeds & RTE_ETH_LINK_SPEED_5G) speed |= TXGBE_LINK_SPEED_5GB_FULL; - if (*link_speeds & ETH_LINK_SPEED_2_5G) + if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G) speed |= TXGBE_LINK_SPEED_2_5GB_FULL; - if (*link_speeds & ETH_LINK_SPEED_1G) + if (*link_speeds & RTE_ETH_LINK_SPEED_1G) speed |= TXGBE_LINK_SPEED_1GB_FULL; - if (*link_speeds & ETH_LINK_SPEED_100M) + if (*link_speeds & RTE_ETH_LINK_SPEED_100M) speed |= TXGBE_LINK_SPEED_100M_FULL; } @@ -1871,7 +1854,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev) struct txgbe_hw *hw = TXGBE_DEV_HW(dev); struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int vf; struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); @@ -1921,10 +1904,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev) /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec != NULL) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + rte_intr_vec_list_free(intr_handle); /* reset hierarchy commit */ tm_conf->committed = false; @@ -1953,6 +1933,7 @@ txgbe_dev_set_link_up(struct rte_eth_dev *dev) } else { /* Turn on the laser */ hw->mac.enable_tx_laser(hw); + hw->dev_start = true; txgbe_dev_link_update(dev, 0); } @@ -1973,6 +1954,7 @@ txgbe_dev_set_link_down(struct rte_eth_dev *dev) } else { /* Turn off the laser */ hw->mac.disable_tx_laser(hw); + hw->dev_start = false; txgbe_dev_link_update(dev, 0); } @@ -1987,7 +1969,7 @@ txgbe_dev_close(struct rte_eth_dev *dev) { struct txgbe_hw *hw = TXGBE_DEV_HW(dev); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int retries = 0; int ret; @@ -2611,8 +2593,9 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC; dev_info->max_vfs = pci_dev->max_vfs; - dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; dev_info->vmdq_queue_num = dev_info->max_rx_queues; + dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev); dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) | dev_info->rx_queue_offload_capa); @@ -2644,11 +2627,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->tx_desc_lim = tx_desc_lim; dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); - dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128; dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL; - dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; - dev_info->speed_capa |= ETH_LINK_SPEED_100M; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; + dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M; /* Driver-preferred Rx/Tx parameters */ dev_info->default_rxportconf.burst_size = 32; @@ -2705,10 +2688,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, int wait = 1; memset(&link, 0, sizeof(link)); - link.link_status = ETH_LINK_DOWN; - link.link_speed = ETH_SPEED_NUM_NONE; - link.link_duplex = ETH_LINK_HALF_DUPLEX; - link.link_autoneg = ETH_LINK_AUTONEG; + link.link_status = RTE_ETH_LINK_DOWN; + link.link_speed = RTE_ETH_SPEED_NUM_NONE; + link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + RTE_ETH_LINK_SPEED_FIXED); hw->mac.get_link_status = true; @@ -2722,8 +2706,8 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, err = hw->mac.check_link(hw, &link_speed, &link_up, wait); if (err != 0) { - link.link_speed = ETH_SPEED_NUM_100M; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = RTE_ETH_SPEED_NUM_100M; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; return rte_eth_linkstatus_set(dev, &link); } @@ -2742,34 +2726,34 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, } intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; - link.link_status = ETH_LINK_UP; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_status = RTE_ETH_LINK_UP; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; switch (link_speed) { default: case TXGBE_LINK_SPEED_UNKNOWN: - link.link_duplex = ETH_LINK_FULL_DUPLEX; - link.link_speed = ETH_SPEED_NUM_100M; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + link.link_speed = RTE_ETH_SPEED_NUM_100M; break; case TXGBE_LINK_SPEED_100M_FULL: - link.link_speed = ETH_SPEED_NUM_100M; + link.link_speed = RTE_ETH_SPEED_NUM_100M; break; case TXGBE_LINK_SPEED_1GB_FULL: - link.link_speed = ETH_SPEED_NUM_1G; + link.link_speed = RTE_ETH_SPEED_NUM_1G; break; case TXGBE_LINK_SPEED_2_5GB_FULL: - link.link_speed = ETH_SPEED_NUM_2_5G; + link.link_speed = RTE_ETH_SPEED_NUM_2_5G; break; case TXGBE_LINK_SPEED_5GB_FULL: - link.link_speed = ETH_SPEED_NUM_5G; + link.link_speed = RTE_ETH_SPEED_NUM_5G; break; case TXGBE_LINK_SPEED_10GB_FULL: - link.link_speed = ETH_SPEED_NUM_10G; + link.link_speed = RTE_ETH_SPEED_NUM_10G; break; } @@ -2938,12 +2922,17 @@ txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) * - On failure, a negative value. */ static int -txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) +txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) { uint32_t eicr; struct txgbe_hw *hw = TXGBE_DEV_HW(dev); struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + if (rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_UIO && + rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX) + wr32(hw, TXGBE_PX_INTA, 1); + /* clear all cause mask */ txgbe_disable_intr(hw); @@ -2994,7 +2983,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev) PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", (int)(dev->data->port_id), (unsigned int)link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX ? + link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? "full-duplex" : "half-duplex"); } else { PMD_INIT_LOG(INFO, " Port %d: Link Down", @@ -3107,7 +3096,7 @@ txgbe_dev_interrupt_delayed_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); struct txgbe_hw *hw = TXGBE_DEV_HW(dev); uint32_t eicr; @@ -3165,7 +3154,7 @@ txgbe_dev_interrupt_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - txgbe_dev_interrupt_get_status(dev); + txgbe_dev_interrupt_get_status(dev, dev->intr_handle); txgbe_dev_interrupt_action(dev, dev->intr_handle); } @@ -3175,7 +3164,7 @@ txgbe_dev_led_on(struct rte_eth_dev *dev) struct txgbe_hw *hw; hw = TXGBE_DEV_HW(dev); - return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP; + return txgbe_led_on(hw, TXGBE_LEDCTL_ACTIVE) == 0 ? 0 : -ENOTSUP; } static int @@ -3184,7 +3173,7 @@ txgbe_dev_led_off(struct rte_eth_dev *dev) struct txgbe_hw *hw; hw = TXGBE_DEV_HW(dev); - return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP; + return txgbe_led_off(hw, TXGBE_LEDCTL_ACTIVE) == 0 ? 0 : -ENOTSUP; } static int @@ -3225,13 +3214,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) tx_pause = 0; if (rx_pause && tx_pause) - fc_conf->mode = RTE_FC_FULL; + fc_conf->mode = RTE_ETH_FC_FULL; else if (rx_pause) - fc_conf->mode = RTE_FC_RX_PAUSE; + fc_conf->mode = RTE_ETH_FC_RX_PAUSE; else if (tx_pause) - fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; else - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; return 0; } @@ -3363,16 +3352,16 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev, return -ENOTSUP; } - if (reta_size != ETH_RSS_RETA_SIZE_128) { + if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); return -EINVAL; } for (i = 0; i < reta_size; i += 4) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); if (!mask) continue; @@ -3404,16 +3393,16 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if (reta_size != ETH_RSS_RETA_SIZE_128) { + if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); return -EINVAL; } for (i = 0; i < reta_size; i += 4) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); if (!mask) continue; @@ -3463,32 +3452,19 @@ static int txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct txgbe_hw *hw = TXGBE_DEV_HW(dev); - struct rte_eth_dev_info dev_info; uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; struct rte_eth_dev_data *dev_data = dev->data; - int ret; - - ret = txgbe_dev_info_get(dev, &dev_info); - if (ret != 0) - return ret; - - /* check that mtu is within the allowed range */ - if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) - return -EINVAL; /* If device is started, refuse mtu that requires the support of * scattered packets when this feature has not been enabled before. */ if (dev_data->dev_started && !dev_data->scattered_rx && - (frame_size + 2 * TXGBE_VLAN_TAG_SIZE > + (frame_size + 2 * RTE_VLAN_HLEN > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { PMD_INIT_LOG(ERR, "Stop port first."); return -EINVAL; } - /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - if (hw->mode) wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, TXGBE_FRAME_SIZE_MAX); @@ -3593,12 +3569,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) return -ENOTSUP; if (on) { - for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { + for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { uta_info->uta_shadow[i] = ~0; wr32(hw, TXGBE_UCADDRTBL(i), ~0); } } else { - for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { + for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { uta_info->uta_shadow[i] = 0; wr32(hw, TXGBE_UCADDRTBL(i), 0); } @@ -3622,15 +3598,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) { uint32_t new_val = orig_val; - if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) + if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) new_val |= TXGBE_POOLETHCTL_UTA; - if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) + if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC) new_val |= TXGBE_POOLETHCTL_MCHA; - if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) + if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC) new_val |= TXGBE_POOLETHCTL_UCHA; - if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) + if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST) new_val |= TXGBE_POOLETHCTL_BCA; - if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) + if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST) new_val |= TXGBE_POOLETHCTL_MCP; return new_val; @@ -3640,7 +3616,7 @@ static int txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; uint32_t mask; struct txgbe_hw *hw = TXGBE_DEV_HW(dev); @@ -3704,7 +3680,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, wr32(hw, TXGBE_IVARMISC, tmp); } else { /* rx or tx causes */ - /* Workround for ICR lost */ + /* Workaround for ICR lost */ idx = ((16 * (queue & 1)) + (8 * direction)); tmp = rd32(hw, TXGBE_IVAR(queue >> 1)); tmp &= ~(0xFF << idx); @@ -3722,7 +3698,7 @@ static void txgbe_configure_msix(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct txgbe_hw *hw = TXGBE_DEV_HW(dev); uint32_t queue_id, base = TXGBE_MISC_VEC_ID; uint32_t vec = TXGBE_MISC_VEC_ID; @@ -3756,8 +3732,10 @@ txgbe_configure_msix(struct rte_eth_dev *dev) queue_id++) { /* by default, 1:1 mapping */ txgbe_set_ivar_map(hw, 0, queue_id, vec); - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) + rte_intr_vec_list_index_set(intr_handle, + queue_id, vec); + if (vec < base + rte_intr_nb_efd_get(intr_handle) + - 1) vec++; } @@ -4281,15 +4259,15 @@ txgbe_start_timecounters(struct rte_eth_dev *dev) rte_eth_linkstatus_get(dev, &link); switch (link.link_speed) { - case ETH_SPEED_NUM_100M: + case RTE_ETH_SPEED_NUM_100M: incval = TXGBE_INCVAL_100; shift = TXGBE_INCVAL_SHIFT_100; break; - case ETH_SPEED_NUM_1G: + case RTE_ETH_SPEED_NUM_1G: incval = TXGBE_INCVAL_1GB; shift = TXGBE_INCVAL_SHIFT_1GB; break; - case ETH_SPEED_NUM_10G: + case RTE_ETH_SPEED_NUM_10G: default: incval = TXGBE_INCVAL_10GB; shift = TXGBE_INCVAL_SHIFT_10GB; @@ -4407,7 +4385,7 @@ txgbe_timesync_disable(struct rte_eth_dev *dev) /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0); - /* Stop incrementating the System Time registers. */ + /* Stop incrementing the System Time registers. */ wr32(hw, TXGBE_TSTIMEINC, 0); return 0; @@ -4645,7 +4623,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, uint8_t nb_tcs; uint8_t i, j; - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; else dcb_info->nb_tcs = 1; @@ -4656,7 +4634,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, if (dcb_config->vt_mode) { /* vt is enabled */ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; if (RTE_ETH_DEV_SRIOV(dev).active > 0) { for (j = 0; j < nb_tcs; j++) { @@ -4680,9 +4658,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, } else { /* vt is disabled */ struct rte_eth_dcb_rx_conf *rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; - if (dcb_info->nb_tcs == ETH_4_TCS) { + if (dcb_info->nb_tcs == RTE_ETH_4_TCS) { for (i = 0; i < dcb_info->nb_tcs; i++) { dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; @@ -4695,7 +4673,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; - } else if (dcb_info->nb_tcs == ETH_8_TCS) { + } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) { for (i = 0; i < dcb_info->nb_tcs; i++) { dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; @@ -4925,7 +4903,7 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, } switch (l2_tunnel->l2_tunnel_type) { - case RTE_L2_TUNNEL_TYPE_E_TAG: + case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: ret = txgbe_e_tag_filter_add(dev, l2_tunnel); break; default: @@ -4956,7 +4934,7 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, return ret; switch (l2_tunnel->l2_tunnel_type) { - case RTE_L2_TUNNEL_TYPE_E_TAG: + case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: ret = txgbe_e_tag_filter_del(dev, l2_tunnel); break; default: @@ -4996,7 +4974,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, return -EINVAL; switch (udp_tunnel->prot_type) { - case RTE_TUNNEL_TYPE_VXLAN: + case RTE_ETH_TUNNEL_TYPE_VXLAN: if (udp_tunnel->udp_port == 0) { PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); ret = -EINVAL; @@ -5004,7 +4982,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, } wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port); break; - case RTE_TUNNEL_TYPE_GENEVE: + case RTE_ETH_TUNNEL_TYPE_GENEVE: if (udp_tunnel->udp_port == 0) { PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed."); ret = -EINVAL; @@ -5012,7 +4990,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, } wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port); break; - case RTE_TUNNEL_TYPE_TEREDO: + case RTE_ETH_TUNNEL_TYPE_TEREDO: if (udp_tunnel->udp_port == 0) { PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed."); ret = -EINVAL; @@ -5020,7 +4998,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, } wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port); break; - case RTE_TUNNEL_TYPE_VXLAN_GPE: + case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE: if (udp_tunnel->udp_port == 0) { PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); ret = -EINVAL; @@ -5052,7 +5030,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, return -EINVAL; switch (udp_tunnel->prot_type) { - case RTE_TUNNEL_TYPE_VXLAN: + case RTE_ETH_TUNNEL_TYPE_VXLAN: cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT); if (cur_port != udp_tunnel->udp_port) { PMD_DRV_LOG(ERR, "Port %u does not exist.", @@ -5062,7 +5040,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, } wr32(hw, TXGBE_VXLANPORT, 0); break; - case RTE_TUNNEL_TYPE_GENEVE: + case RTE_ETH_TUNNEL_TYPE_GENEVE: cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT); if (cur_port != udp_tunnel->udp_port) { PMD_DRV_LOG(ERR, "Port %u does not exist.", @@ -5072,7 +5050,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, } wr32(hw, TXGBE_GENEVEPORT, 0); break; - case RTE_TUNNEL_TYPE_TEREDO: + case RTE_ETH_TUNNEL_TYPE_TEREDO: cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT); if (cur_port != udp_tunnel->udp_port) { PMD_DRV_LOG(ERR, "Port %u does not exist.", @@ -5082,7 +5060,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, } wr32(hw, TXGBE_TEREDOPORT, 0); break; - case RTE_TUNNEL_TYPE_VXLAN_GPE: + case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE: cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE); if (cur_port != udp_tunnel->udp_port) { PMD_DRV_LOG(ERR, "Port %u does not exist.",